From 4c60248a6dccb674059641816161fb9b38ea1718 Mon Sep 17 00:00:00 2001 From: dvorst <87502756+dvorst@users.noreply.github.com> Date: Tue, 22 Aug 2023 23:14:12 +0200 Subject: [PATCH 0001/1718] DOC: add description of dtype b1 in arrays.dtypes.rst Closes #23366 --- doc/source/reference/arrays.dtypes.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index 3f2b6d9d0e4c..2f317cc01fc8 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -230,12 +230,12 @@ One-character strings Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining characters specify the number of bytes per item, except for Unicode, - where it is interpreted as the number of characters. The item size - must correspond to an existing type, or an error will be raised. The - supported kinds are + where it is interpreted as the number of characters, and except ``b1`` + which represents boolean. The item size must correspond to an existing + type, or an error will be raised. The supported kinds are - ================ ======================== - ``'?'`` boolean + ================= ======================== + ``'?'``, ``'b1'`` boolean ``'b'`` (signed) byte ``'B'`` unsigned byte ``'i'`` (signed) integer @@ -248,7 +248,7 @@ Array-protocol type strings (see :ref:`arrays.interface`) ``'S'``, ``'a'`` zero-terminated bytes (not recommended) ``'U'`` Unicode string ``'V'`` raw data (:class:`void`) - ================ ======================== + ================= ======================== .. admonition:: Example From 8efcbdd8ed7a7753e43cb0ca8240aa52d905ffe3 Mon Sep 17 00:00:00 2001 From: fengluo Date: Sat, 26 Oct 2024 10:37:17 +0800 Subject: [PATCH 0002/1718] delete unused macro in string_buffer.h --- numpy/_core/src/umath/string_buffer.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index ae89ede46ddc..f281342b2277 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -14,9 +14,6 @@ #include "string_fastsearch.h" #include "gil_utils.h" -#define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 -#define MSB(val) ((val) >> 7 & 1) - enum class ENCODING { ASCII, UTF32, UTF8 From 0d0709421a497d49e3c1cf0a6d332c5d432b4ef6 Mon Sep 17 00:00:00 2001 From: UV Date: Sat, 11 Jan 2025 08:57:00 +0530 Subject: [PATCH 0003/1718] DOC: Fix ambiguity in polyfit description --- numpy/lib/_polynomial_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 420e357eb354..d919373ea1f4 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -457,7 +457,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``f(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. From 1c577f4d5744fa49bc0b784e066860ae82cfb4de Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 19 Apr 2025 20:33:25 +0200 Subject: [PATCH 0004/1718] Fix NPY_FINLINE macro for C++ class methods Remove 'static' qualifier in C++ mode to allow NPY_FINLINE usage within class methods. --- numpy/_core/include/numpy/npy_common.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 79ad8ad78cb2..96b6ce67bf64 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) From c952b9a82a120934793efc4e30ffca3858d04397 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Fri, 24 Nov 2023 11:01:14 +0200 Subject: [PATCH 0005/1718] ENH: Enable native half-precision scalar conversion operations on ARM --- numpy/_core/src/common/half.hpp | 81 ++++++--------------------- numpy/_core/src/npymath/halffloat.cpp | 28 ++------- 2 files changed, 20 insertions(+), 89 deletions(-) diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } From 94039f36c381a7ed214b1f95890c7a811b11eba2 Mon Sep 17 00:00:00 2001 From: Marco Barbosa Date: Thu, 15 May 2025 11:01:41 -0300 Subject: [PATCH 0006/1718] DOC: improves np.fromfile file description (#28840) --- numpy/_core/_add_newdocs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 65a29476a075..ea851e16e751 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1528,7 +1528,9 @@ Parameters ---------- file : file or str or Path - Open file object or filename. + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order From db5535a0a565b6847a9054b40c8941dd46ec75de Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:39:17 +0200 Subject: [PATCH 0007/1718] MNT: constant string arrays instead of pointers in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 6 +++--- numpy/_core/src/multiarray/stringdtype/casts.cpp | 16 ++++++++-------- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/f2py/rules.py | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 8810182812e5..e1fd8404d3bf 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -277,7 +277,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 26b898fa1479..86b60cf75944 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -920,11 +920,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1465,7 +1465,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index f66727501f97..c69818173e24 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -605,7 +605,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - const char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -617,7 +617,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - const char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -821,8 +821,8 @@ static PyType_Slot s2int_slots[] = { static const char * make_s2type_name(NPY_TYPES typenum) { - const char *prefix = "cast_StringDType_to_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); @@ -842,14 +842,14 @@ make_s2type_name(NPY_TYPES typenum) { static const char * make_type2s_name(NPY_TYPES typenum) { - const char *prefix = "cast_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); - const char *suffix = "_to_StringDType"; - size_t slen = strlen(suffix); + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 02ab7d246a7a..1c29bbb67f7e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -404,7 +404,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 4122f0a49f17..c10d2afdd097 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1154,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' From a5401fd0599aa7cb7044c6983d0d7df17d4ed06f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 12:51:22 +0200 Subject: [PATCH 0008/1718] MNT: alternative handling of trailing null character Trying to avoid this compiler warning: ../numpy/_core/src/multiarray/stringdtype/casts.cpp:860:12: warning: 'char* strncat(char*, const char*, size_t)' specified bound 15 equals source length [-Wstringop-overflow=] 860 | strncat(buf, suffix, slen); | ~~~~~~~^~~~~~~~~~~~~~~~~~~ --- .../src/multiarray/stringdtype/casts.cpp | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index c69818173e24..3632e359c9a9 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -833,10 +833,12 @@ make_s2type_name(NPY_TYPES typenum) { return NULL; } - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); return buf; } @@ -853,11 +855,14 @@ make_type2s_name(NPY_TYPES typenum) { char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); - strncat(buf, suffix, slen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); return buf; } From 22132f1522f8ac3d278a3773fe071b699df3fc31 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 16 May 2025 13:23:05 +0200 Subject: [PATCH 0009/1718] MNT: more const'ness for arrays of string literals in C --- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/umath/string_ufuncs.cpp | 12 ++++++------ numpy/_core/src/umath/stringdtype_ufuncs.cpp | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index e1fd8404d3bf..f15f636cdb1e 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -246,7 +246,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5b4b67cda625..ffd757815364 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -1521,7 +1521,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1635,7 +1635,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1664,7 +1664,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1691,7 +1691,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1750,7 +1750,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1827,7 +1827,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 37ae0a39a349..adcbfd3b7480 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2605,7 +2605,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2654,7 +2654,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2874,7 +2874,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2898,7 +2898,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -3082,7 +3082,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK From 92f2622e376e05620fd4e63fb453bc5d1e5b160a Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Tue, 1 Apr 2025 22:58:10 +0200 Subject: [PATCH 0010/1718] ENH, SIMD: Initial implementation of Highway wrapper A thin wrapper over Google's Highway SIMD library to simplify its interface. This commit provides the implementation of that wrapper, consisting of: - simd.hpp: Main header defining the SIMD namespaces and configuration - simd.inc.hpp: Template header included multiple times with different namespaces The wrapper eliminates Highway's class tags by: - Using lane types directly which can be deduced from arguments - Leveraging namespaces (np::simd and np::simd128) for different register widths A README is included to guide usage and document design decisions. --- numpy/_core/src/common/simd/README.md | 258 +++++++++++++++++++++++ numpy/_core/src/common/simd/simd.hpp | 80 +++++++ numpy/_core/src/common/simd/simd.inc.hpp | 102 +++++++++ 3 files changed, 440 insertions(+) create mode 100644 numpy/_core/src/common/simd/README.md create mode 100644 numpy/_core/src/common/simd/simd.hpp create mode 100644 numpy/_core/src/common/simd/simd.inc.hpp diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md new file mode 100644 index 000000000000..ac58c4a6bd94 --- /dev/null +++ b/numpy/_core/src/common/simd/README.md @@ -0,0 +1,258 @@ +# NumPy SIMD Wrapper for Highway + +This directory contains a lightweight C++ wrapper over Google's [Highway](https://github.com/google/highway) SIMD library, designed specifically for NumPy's needs. + +> **Note**: This directory also contains the C interface of universal intrinsics (under `simd.h`) which is no longer supported. The Highway wrapper described in this document should be used instead for all new SIMD code. + +## Overview + +The wrapper simplifies Highway's SIMD interface by eliminating class tags and using lane types directly, which can be deduced from arguments in most cases. This design makes the SIMD code more intuitive and easier to maintain while still leveraging Highway generic intrinsics. + +## Architecture + +The wrapper consists of two main headers: + +1. `simd.hpp`: The main header that defines namespaces and includes configuration macros +2. `simd.inc.hpp`: Implementation details included by `simd.hpp` multiple times for different namespaces + +Additionally, this directory contains legacy C interface files for universal intrinsics (`simd.h` and related files) which are deprecated and should not be used for new code. All new SIMD code should use the Highway wrapper. + + +## Usage + +### Basic Usage + +```cpp +#include "simd/simd.hpp" + +// Use np::simd for maximum width SIMD operations +using namespace np::simd; +float *data = /* ... */; +Vec v = LoadU(data); +v = Add(v, v); +StoreU(v, data); + +// Use np::simd128 for fixed 128-bit SIMD operations +using namespace np::simd128; +Vec v128 = LoadU(data); +v128 = Add(v128, v128); +StoreU(v128, data); +``` + +### Checking for SIMD Support + +```cpp +#include "simd/simd.hpp" + +// Check if SIMD is enabled +#if NPY_SIMDX + // SIMD code +#else + // Scalar fallback code +#endif + +// Check for float64 support +#if NPY_SIMDX_F64 + // Use float64 SIMD operations +#endif + +// Check for FMA support +#if NPY_SIMDX_FMA + // Use FMA operations +#endif +``` + +## Type Support and Constraints + +The wrapper provides type constraints to help with SFINAE (Substitution Failure Is Not An Error) and compile-time type checking: + +- `kSupportLane`: Determines whether the specified lane type is supported by the SIMD extension. + ```cpp + // Base template - always defined, even when SIMD is not enabled (for SFINAE) + template + constexpr bool kSupportLane = NPY_SIMDX != 0; + template <> + constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + ``` + + +```cpp +#include "simd/simd.hpp" + +// Check if float64 operations are supported +if constexpr (np::simd::kSupportLane) { + // Use float64 operations +} +``` + +These constraints allow for compile-time checking of which lane types are supported, which can be used in SFINAE contexts to enable or disable functions based on type support. + +## Available Operations + +The wrapper provides the following common operations that are used in NumPy: + +- Vector creation operations: + - `Zero`: Returns a vector with all lanes set to zero + - `Set`: Returns a vector with all lanes set to the given value + - `Undefined`: Returns an uninitialized vector + +- Memory operations: + - `LoadU`: Unaligned load of a vector from memory + - `StoreU`: Unaligned store of a vector to memory + +- Vector information: + - `Lanes`: Returns the number of vector lanes based on the lane type + +- Type conversion: + - `BitCast`: Reinterprets a vector to a different type without modifying the underlying data + - `VecFromMask`: Converts a mask to a vector + +- Comparison operations: + - `Eq`: Element-wise equality comparison + - `Le`: Element-wise less than or equal comparison + - `Lt`: Element-wise less than comparison + - `Gt`: Element-wise greater than comparison + - `Ge`: Element-wise greater than or equal comparison + +- Arithmetic operations: + - `Add`: Element-wise addition + - `Sub`: Element-wise subtraction + - `Mul`: Element-wise multiplication + - `Div`: Element-wise division + - `Min`: Element-wise minimum + - `Max`: Element-wise maximum + - `Abs`: Element-wise absolute value + - `Sqrt`: Element-wise square root + +- Logical operations: + - `And`: Bitwise AND + - `Or`: Bitwise OR + - `Xor`: Bitwise XOR + - `AndNot`: Bitwise AND NOT (a & ~b) + +Additional Highway operations can be accessed via the `hn` namespace alias inside the `simd` or `simd128` namespaces. + +## Extending + +To add more operations from Highway: + +1. Import them in the `simd.inc.hpp` file using the `using` directive if they don't require a tag: + ```cpp + // For operations that don't require a tag + using hn::FunctionName; + ``` + +2. Define wrapper functions for intrinsics that require a class tag: + ```cpp + // For operations that require a tag + template + HWY_API ReturnType FunctionName(Args... args) { + return hn::FunctionName(_Tag(), args...); + } + ``` + +3. Add appropriate documentation and SFINAE constraints if needed + + +## Build Configuration + +The SIMD wrapper automatically disables SIMD operations when optimizations are disabled: + +- When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled +- SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + +## Design Notes + +1. **Why avoid Highway scalar operations?** + - NumPy already provides kernels for scalar operations + - Compilers can better optimize standard library implementations + - Not all Highway intrinsics are fully supported in scalar mode + +2. **Legacy Universal Intrinsics** + - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - The legacy code is maintained for compatibility but will eventually be removed + +3. **Feature Detection Constants vs. Highway Constants** + - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Our constants combine both checks: + ```cpp + #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + ``` + - This ensures SIMD features won't be used when: + - Hardware supports it but NumPy optimization is disabled via meson option: + ``` + option('disable-optimization', type: 'boolean', value: false, + description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + ``` + - Highway target is scalar (`HWY_TARGET == HWY_SCALAR`) + - Using these constants ensures consistent behavior across different compilation settings + - Without this additional layer, code might incorrectly try to use SIMD paths in scalar mode + +4. **Namespace Design** + - `np::simd`: Maximum width SIMD operations (scalable) + - `np::simd128`: Fixed 128-bit SIMD operations + - `hn`: Highway namespace alias (available within the SIMD namespaces) + +5. **Why Namespaces and Why Not Just Use Highway Directly?** + - Highway's design uses class tag types as template parameters (e.g., `Vec>`) when defining vector types + - Many Highway functions require explicitly passing a tag instance as the first parameter + - This class tag-based approach increases verbosity and complexity in user code + - Our wrapper eliminates this by internally managing tags through namespaces, letting users directly use types e.g. `Vec` + - Simple example with raw Highway: + ```cpp + // Highway's approach + float *data = /* ... */; + + namespace hn = hwy::HWY_NAMESPACE; + using namespace hn; + + // Full-width operations + ScalableTag df; // Create a tag instance + Vec v = LoadU(df, data); // LoadU requires a tag instance + StoreU(v, df, data); // StoreU requires a tag instance + + // 128-bit operations + Full128 df128; // Create a 128-bit tag instance + Vec v128 = LoadU(df128, data); // LoadU requires a tag instance + StoreU(v128, df128, data); // StoreU requires a tag instance + ``` + + - Simple example with our wrapper: + ```cpp + // Our wrapper approach + float *data = /* ... */; + + // Full-width operations + using namespace np::simd; + Vec v = LoadU(data); // Full-width vector load + StoreU(v, data); + + // 128-bit operations + using namespace np::simd128; + Vec v128 = LoadU(data); // 128-bit vector load + StoreU(v128, data); + ``` + + - The namespaced approach simplifies code, reduces errors, and provides a more intuitive interface + - It preserves all Highway operations benefits while reducing cognitive overhead + +5. **Why Namespaces Are Essential for This Design?** + - Namespaces allow us to define different internal tag types (`hn::ScalableTag` in `np::simd` vs `hn::Full128` in `np::simd128`) + - This provides a consistent type-based interface (`Vec`) without requiring users to manually create tags + - Enables using the same function names (like `LoadU`) with different implementations based on SIMD width + - Without namespaces, we'd have to either reintroduce tags (defeating the purpose of the wrapper) or create different function names for each variant (e.g., `LoadU` vs `LoadU128`) + +6. **Template Type Parameters** + - `TLane`: The scalar type for each vector lane (e.g., uint8_t, float, double) + + +## Requirements + +- C++17 or later +- Google Highway library + +## License + +Same as NumPy's license diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp new file mode 100644 index 000000000000..698da4adf865 --- /dev/null +++ b/numpy/_core/src/common/simd/simd.hpp @@ -0,0 +1,80 @@ +#ifndef NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ +#define NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ + +/** + * This header provides a thin wrapper over Google's Highway SIMD library. + * + * The wrapper aims to simplify the SIMD interface of Google's Highway by + * get ride of its class tags and use lane types directly which can be deduced + * from the args in most cases. + */ +/** + * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, + * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * C++ code. + * + * Highway SIMD is only available when optimization is enabled. + * When NPY_DISABLE_OPTIMIZATION is defined, SIMD operations are disabled + * and the code falls back to scalar implementations. + */ +#ifndef NPY_DISABLE_OPTIMIZATION +#include + +/** + * We avoid using Highway scalar operations for the following reasons: + * 1. We already provide kernels for scalar operations, so falling back to + * the NumPy implementation is more appropriate. Compilers can often + * optimize these better since they rely on standard libraries. + * 2. Not all Highway intrinsics are fully supported in scalar mode. + * + * Therefore, we only enable SIMD when the Highway target is not scalar. + */ +#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) + +// Indicates if the SIMD operations are available for float16. +#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// to check for it. + +// Indicates if the SIMD operations are available for float64. +#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) + +// Indicates if the SIMD floating operations are natively supports fma. +#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) + +#else +#define NPY_SIMDX 0 +#define NPY_SIMDX_F16 0 +#define NPY_SIMDX_F64 0 +#define NPY_SIMDX_FMA 0 +#endif + +namespace np { + +/// Represents the max SIMD width supported by the platform. +namespace simd { +#if NPY_SIMDX +/// The highway namespace alias. +/// We can not import all the symbols from the HWY_NAMESPACE because it will +/// conflict with the existing symbols in the numpy namespace. +namespace hn = hwy::HWY_NAMESPACE; +// internaly used by the template header +template +using _Tag = hn::ScalableTag; +#endif +#include "simd.inc.hpp" +} // namespace simd + +/// Represents the 128-bit SIMD width. +namespace simd128 { +#if NPY_SIMDX +namespace hn = hwy::HWY_NAMESPACE; +template +using _Tag = hn::Full128; +#endif +#include "simd.inc.hpp" +} // namespace simd128 + +} // namespace np + +#endif // NUMPY__CORE_SRC_COMMON_SIMD_SIMD_HPP_ diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp new file mode 100644 index 000000000000..d052d829720a --- /dev/null +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -0,0 +1,102 @@ +#ifndef NPY_SIMDX +#error "This is not a standalone header. Include simd.hpp instead." +#endif + +// NOTE: This file is included by simd.hpp multiple times with different namespaces +// so avoid including any headers here +// #define NPY_SIMDX 1 // uncomment to enable Highlighting + +/** + * Determines whether the specified lane type is supported by the SIMD extension. + * Always defined as false when SIMD is not enabled, so it can be used in SFINAE. + * + * @tparam TLane The lane type to check for support. + */ +template +constexpr bool kSupportLane = NPY_SIMDX != 0; + +#if NPY_SIMDX +// Define lane type support based on Highway capabilities +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; +template <> +constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; + +/// Represents an N-lane vector based on the specified lane type. +/// @tparam TLane The scalar type for each vector lane +template +using Vec = hn::Vec<_Tag>; + +/// Represents a mask vector with boolean values or as a bitmask. +/// @tparam TLane The scalar type the mask corresponds to +template +using Mask = hn::Mask<_Tag>; + +/// Unaligned load of a vector from memory. +template +HWY_API Vec LoadU(const TLane* ptr) { + return hn::LoadU(_Tag(), ptr); +} + +/// Unaligned store of a vector to memory. +template +HWY_API void StoreU(const Vec& a, TLane* ptr) { + hn::StoreU(a, _Tag(), ptr); +} + +/// Returns the number of vector lanes based on the lane type. +template +HWY_API constexpr size_t Lanes(TLane tag = 0) { + return hn::Lanes(_Tag()); +} + +/// Returns an uninitialized N-lane vector. +template +HWY_API Vec Undefined(TLane tag = 0) { + return hn::Undefined(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to zero. +template +HWY_API Vec Zero(TLane tag = 0) { + return hn::Zero(_Tag()); +} + +/// Returns N-lane vector with all lanes equal to the given value of type `TLane`. +template +HWY_API Vec Set(TLane val) { + return hn::Set(_Tag(), val); +} + +/// Converts a mask to a vector based on the specified lane type. +template +HWY_API Vec VecFromMask(const TMask &m) { + return hn::VecFromMask(_Tag(), m); +} + +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +template +HWY_API Vec BitCast(const TVec &v) { + return hn::BitCast(_Tag(), v); +} + +// Import common Highway intrinsics +using hn::Eq; +using hn::Le; +using hn::Lt; +using hn::Gt; +using hn::Ge; +using hn::And; +using hn::Or; +using hn::Xor; +using hn::AndNot; +using hn::Sub; +using hn::Add; +using hn::Mul; +using hn::Div; +using hn::Min; +using hn::Max; +using hn::Abs; +using hn::Sqrt; + +#endif // NPY_SIMDX From 5d48ec32b7e4f17e9c5934f1e029c25f683dc840 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 12 Apr 2025 19:25:27 +0200 Subject: [PATCH 0011/1718] SIMD: Update wrapper with improved docs and type support - Fix hardware/platform terminology in documentation for clarity - Add support for long double in template specializations - Add kMaxLanes constant to expose maximum vector width information - Follows clang formatting style for consistency with NumPy codebase. --- numpy/_core/src/common/simd/README.md | 9 ++- numpy/_core/src/common/simd/simd.inc.hpp | 70 ++++++++++++++++-------- 2 files changed, 54 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index ac58c4a6bd94..9a68d1aa1bfc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -75,6 +75,11 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; ``` +- `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. + ```cpp + template + constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); + ``` ```cpp #include "simd/simd.hpp" @@ -175,13 +180,13 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 3. **Feature Detection Constants vs. Highway Constants** - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants - - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check hardware capabilities but don't consider NumPy's build configuration + - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - - Hardware supports it but NumPy optimization is disabled via meson option: + - Platform supports it but NumPy optimization is disabled via meson option: ``` option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index d052d829720a..6d12c9a3caeb 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,10 +1,10 @@ #ifndef NPY_SIMDX #error "This is not a standalone header. Include simd.hpp instead." +#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here -// #define NPY_SIMDX 1 // uncomment to enable Highlighting /** * Determines whether the specified lane type is supported by the SIMD extension. @@ -21,6 +21,13 @@ template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT64 != 0; +template <> +constexpr bool kSupportLane = + HWY_HAVE_FLOAT64 != 0 && sizeof(long double) == sizeof(double); + +/// Maximum number of lanes supported by the SIMD extension for the specified lane type. +template +constexpr size_t kMaxLanes = HWY_MAX_LANES_D(_Tag); /// Represents an N-lane vector based on the specified lane type. /// @tparam TLane The scalar type for each vector lane @@ -34,69 +41,86 @@ using Mask = hn::Mask<_Tag>; /// Unaligned load of a vector from memory. template -HWY_API Vec LoadU(const TLane* ptr) { +HWY_API Vec +LoadU(const TLane *ptr) +{ return hn::LoadU(_Tag(), ptr); } /// Unaligned store of a vector to memory. template -HWY_API void StoreU(const Vec& a, TLane* ptr) { +HWY_API void +StoreU(const Vec &a, TLane *ptr) +{ hn::StoreU(a, _Tag(), ptr); } /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t Lanes(TLane tag = 0) { +HWY_API constexpr size_t +Lanes(TLane tag = 0) +{ return hn::Lanes(_Tag()); } /// Returns an uninitialized N-lane vector. template -HWY_API Vec Undefined(TLane tag = 0) { +HWY_API Vec +Undefined(TLane tag = 0) +{ return hn::Undefined(_Tag()); } /// Returns N-lane vector with all lanes equal to zero. template -HWY_API Vec Zero(TLane tag = 0) { +HWY_API Vec +Zero(TLane tag = 0) +{ return hn::Zero(_Tag()); } /// Returns N-lane vector with all lanes equal to the given value of type `TLane`. template -HWY_API Vec Set(TLane val) { +HWY_API Vec +Set(TLane val) +{ return hn::Set(_Tag(), val); } /// Converts a mask to a vector based on the specified lane type. template -HWY_API Vec VecFromMask(const TMask &m) { +HWY_API Vec +VecFromMask(const TMask &m) +{ return hn::VecFromMask(_Tag(), m); } -/// Convert (Reinterpret) an N-lane vector to a different type without modifying the underlying data. +/// Convert (Reinterpret) an N-lane vector to a different type without modifying the +/// underlying data. template -HWY_API Vec BitCast(const TVec &v) { +HWY_API Vec +BitCast(const TVec &v) +{ return hn::BitCast(_Tag(), v); } // Import common Highway intrinsics -using hn::Eq; -using hn::Le; -using hn::Lt; -using hn::Gt; -using hn::Ge; +using hn::Abs; +using hn::Add; using hn::And; -using hn::Or; -using hn::Xor; using hn::AndNot; -using hn::Sub; -using hn::Add; -using hn::Mul; using hn::Div; -using hn::Min; +using hn::Eq; +using hn::Ge; +using hn::Gt; +using hn::Le; +using hn::Lt; using hn::Max; -using hn::Abs; +using hn::Min; +using hn::Mul; +using hn::Or; using hn::Sqrt; +using hn::Sub; +using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_SIMDX From e099bba5cba528036593321e539c6565a5e69765 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 04:37:59 +0200 Subject: [PATCH 0012/1718] SIMD: Improve isolation and constexpr handling in wrapper - Add anonymous namespace around implementation to ensure each translation unit gets its own constants based on local flags - Use HWY_LANES_CONSTEXPR for Lanes function to ensure proper constexpr evaluation across platforms --- numpy/_core/src/common/simd/simd.inc.hpp | 8 +++++++- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 6d12c9a3caeb..64d28bc47118 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -3,6 +3,10 @@ #define NPY_SIMDX 1 // Prevent editors from graying out the happy branch #endif +// Using anonymous namespace instead of inline to ensure each translation unit +// gets its own copy of constants based on local compilation flags +namespace { + // NOTE: This file is included by simd.hpp multiple times with different namespaces // so avoid including any headers here @@ -57,7 +61,7 @@ StoreU(const Vec &a, TLane *ptr) /// Returns the number of vector lanes based on the lane type. template -HWY_API constexpr size_t +HWY_API HWY_LANES_CONSTEXPR size_t Lanes(TLane tag = 0) { return hn::Lanes(_Tag()); @@ -124,3 +128,5 @@ using hn::Sub; using hn::Xor; #endif // NPY_SIMDX + +} // namespace anonymous diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index ae696db4cd4a..9ce2571a1528 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -3,7 +3,9 @@ #include "loops_utils.h" #include "simd/simd.h" +#include "simd/simd.hpp" #include + namespace hn = hwy::HWY_NAMESPACE; /* From 1fd2076a37e35b0d2f9cf84195117f65637c185d Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:13:20 +0200 Subject: [PATCH 0013/1718] Update Highway submodule to latest master --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 0b696633f9ad..12b325bc1793 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca +Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 From 80e6a30f3efc30d067d7aa28568ff854572f8a89 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 17 Apr 2025 16:27:30 +0200 Subject: [PATCH 0014/1718] SIMD: Fix compile error by using MaxLanes instead of Lanes for array size Replace hn::Lanes(f64) with hn::MaxLanes(f64) when defining the index array size to fix error C2131: "expression did not evaluate to a constant". This error occurs because Lanes() isn't always constexpr compatible, especially with scalable vector extensions. MaxLanes() provides a compile-time constant value suitable for static array allocation and should be used with non-scalable SIMD extensions when defining fixed-size arrays. --- numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src | 2 +- numpy/_core/src/umath/loops_trigonometric.dispatch.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src index 8c66229942ee..93d288fbdb2e 100755 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.cpp.src @@ -385,7 +385,7 @@ simd_tanh_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_ vec_f64 b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16; if constexpr(hn::MaxLanes(f64) == 2){ vec_f64 e0e1_0, e0e1_1; - uint64_t index[hn::Lanes(f64)]; + uint64_t index[hn::MaxLanes(f64)]; hn::StoreU(idx, u64, index); /**begin repeat diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 9ce2571a1528..d298a8596cc4 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -186,7 +186,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, "larger than 256 bits."); simd_maski = ((uint8_t *)&simd_maski)[0]; #endif - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::MaxLanes(f32)]; hn::Store(x_in, f32, ip_fback); // process elements using libc for large elements From 6519b288d66dcc7ed27cfeb664a1eb2d279d8492 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 17 May 2025 10:18:39 +0300 Subject: [PATCH 0015/1718] SIMD: Rename NPY_SIMDX to NPY_HWY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename Highway wrapper macros for clarity: - NPY_SIMDX → NPY_HWY - NPY_SIMDX_F16 → NPY_HWY_F16 - NPY_SIMDX_F64 → NPY_HWY_F64 - NPY_SIMDX_FMA → NPY_HWY_FMA To avoids confusion with legacy SIMD macros. --- numpy/_core/src/common/simd/README.md | 16 ++++++++-------- numpy/_core/src/common/simd/simd.hpp | 22 +++++++++++----------- numpy/_core/src/common/simd/simd.inc.hpp | 12 ++++++------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index 9a68d1aa1bfc..e69bab0e7ba5 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -45,19 +45,19 @@ StoreU(v128, data); #include "simd/simd.hpp" // Check if SIMD is enabled -#if NPY_SIMDX +#if NPY_HWY // SIMD code #else // Scalar fallback code #endif // Check for float64 support -#if NPY_SIMDX_F64 +#if NPY_HWY_F64 // Use float64 SIMD operations #endif // Check for FMA support -#if NPY_SIMDX_FMA +#if NPY_HWY_FMA // Use FMA operations #endif ``` @@ -70,9 +70,9 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure ```cpp // Base template - always defined, even when SIMD is not enabled (for SFINAE) template - constexpr bool kSupportLane = NPY_SIMDX != 0; + constexpr bool kSupportLane = NPY_HWY != 0; template <> - constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + constexpr bool kSupportLane = NPY_HWY_F64 != 0; ``` - `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. @@ -175,15 +175,15 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated - - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) - The legacy code is maintained for compatibility but will eventually be removed 3. **Feature Detection Constants vs. Highway Constants** - - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp - #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - Platform supports it but NumPy optimization is disabled via meson option: diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 698da4adf865..c4e572dc8d0f 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -10,7 +10,7 @@ */ /** * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, - * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway * C++ code. * * Highway SIMD is only available when optimization is enabled. @@ -29,31 +29,31 @@ * * Therefore, we only enable SIMD when the Highway target is not scalar. */ -#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY (HWY_TARGET != HWY_SCALAR) // Indicates if the SIMD operations are available for float16. -#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) // Note: Highway requires SIMD extentions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. -#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) // Indicates if the SIMD floating operations are natively supports fma. -#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) #else -#define NPY_SIMDX 0 -#define NPY_SIMDX_F16 0 -#define NPY_SIMDX_F64 0 -#define NPY_SIMDX_FMA 0 +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 #endif namespace np { /// Represents the max SIMD width supported by the platform. namespace simd { -#if NPY_SIMDX +#if NPY_HWY /// The highway namespace alias. /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. @@ -67,7 +67,7 @@ using _Tag = hn::ScalableTag; /// Represents the 128-bit SIMD width. namespace simd128 { -#if NPY_SIMDX +#if NPY_HWY namespace hn = hwy::HWY_NAMESPACE; template using _Tag = hn::Full128; diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 64d28bc47118..f4a2540927dd 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,6 +1,6 @@ -#ifndef NPY_SIMDX +#ifndef NPY_HWY #error "This is not a standalone header. Include simd.hpp instead." -#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch +#define NPY_HWY 1 // Prevent editors from graying out the happy branch #endif // Using anonymous namespace instead of inline to ensure each translation unit @@ -17,9 +17,9 @@ namespace { * @tparam TLane The lane type to check for support. */ template -constexpr bool kSupportLane = NPY_SIMDX != 0; +constexpr bool kSupportLane = NPY_HWY != 0; -#if NPY_SIMDX +#if NPY_HWY // Define lane type support based on Highway capabilities template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; @@ -127,6 +127,6 @@ using hn::Sqrt; using hn::Sub; using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_HWY -} // namespace anonymous +} // namespace From 275e45ccc3c59759a77306031f3ca66601a70807 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 19 May 2025 18:27:58 +0300 Subject: [PATCH 0016/1718] Disable Highway EMU128 scalar emulation Skip Highway's EMU128 in favor of NumPy's scalar implementations for due to strict IEEE 754 floating-point compliance requirements --- numpy/_core/src/common/simd/README.md | 3 +++ numpy/_core/src/common/simd/simd.hpp | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index e69bab0e7ba5..a13a0f75b6fc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -165,6 +165,7 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled - SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) ## Design Notes @@ -172,6 +173,8 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - NumPy already provides kernels for scalar operations - Compilers can better optimize standard library implementations - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index c4e572dc8d0f..40556a68c59d 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -22,14 +22,20 @@ /** * We avoid using Highway scalar operations for the following reasons: - * 1. We already provide kernels for scalar operations, so falling back to - * the NumPy implementation is more appropriate. Compilers can often - * optimize these better since they rely on standard libraries. - * 2. Not all Highway intrinsics are fully supported in scalar mode. * - * Therefore, we only enable SIMD when the Highway target is not scalar. + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. */ -#define NPY_HWY (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) // Indicates if the SIMD operations are available for float16. #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) From 530409d43dbfa27df91ad26752a2cc42d6cf7d37 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 19 May 2025 20:45:51 -0400 Subject: [PATCH 0017/1718] CI: update cibuildwheel to 3.0.0b1 and enable cp314 [wheel build] --- .github/workflows/wheels.yml | 7 +++++-- tools/wheels/cibw_test_command.sh | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e96021775f3c..f502e6d64d1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -92,7 +92,7 @@ jobs: - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] + python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] @@ -107,6 +107,8 @@ jobs: python: "pp311" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp314t" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} @@ -175,9 +177,10 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 + uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} + CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 60e90ef5beb6..45dbc8a102cf 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,6 +4,10 @@ set -xe PROJECT_DIR="$1" +if [ -d tools ]; then + cd tools +fi + python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From d68ba1ebfc356c4d3df8e4996d76035148573de5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 08:55:52 -0400 Subject: [PATCH 0018/1718] MNT: respond to code review [wheel build] --- .github/workflows/wheels.yml | 3 ++- pyproject.toml | 2 +- tools/wheels/cibw_test_command.sh | 4 +--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f502e6d64d1f..bede99dd6e9a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,7 +180,8 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - CIBW_ENABLE: cpython-prerelease cpython-freethreading pypy + # delete when we switch 3.14(t)-dev to 3.14(t) + CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..b1eba50044fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +enable = ["cpython-freethreading", "pypy"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 45dbc8a102cf..4763ad512b22 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,9 +4,7 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi +export PYTHONSAFEPATH=1 python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 198cc9aade98ef880d4d09315aef9c565ea69b52 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 May 2025 09:08:21 -0400 Subject: [PATCH 0019/1718] MNT: move back to 'cd tools' hack [wheel build] --- tools/wheels/cibw_test_command.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 4763ad512b22..2d39687a861b 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,7 +4,9 @@ set -xe PROJECT_DIR="$1" -export PYTHONSAFEPATH=1 +if [ -d tools ]; then + cd tools +fi python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From 084f183a66cdaa888205ffeb599aff6c083178cc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 May 2025 16:11:29 -0600 Subject: [PATCH 0020/1718] BEG, MAINT: Begin NumPy 2.4.0 development. - Create 2.4.0-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml - Update cversions.txt - Update numpyconfig.h - Delete release fragments --- doc/release/upcoming_changes/26018.change.rst | 7 ----- .../upcoming_changes/26018.performance.rst | 7 ----- .../upcoming_changes/26745.highlight.rst | 10 ------- .../upcoming_changes/27288.improvement.rst | 3 -- .../upcoming_changes/27789.new_function.rst | 5 ---- doc/release/upcoming_changes/27883.c_api.rst | 4 --- doc/release/upcoming_changes/27883.change.rst | 17 ----------- doc/release/upcoming_changes/27998.c_api.rst | 10 ------- doc/release/upcoming_changes/28080.c_api.rst | 1 - .../upcoming_changes/28080.improvement.rst | 2 -- doc/release/upcoming_changes/28102.change.rst | 6 ---- .../upcoming_changes/28105.improvement.rst | 2 -- .../upcoming_changes/28129.deprecation.rst | 4 --- .../upcoming_changes/28205.improvement.rst | 6 ---- .../upcoming_changes/28214.new_feature.rst | 23 --------------- .../upcoming_changes/28250.improvement.rst | 3 -- .../upcoming_changes/28254.expired.rst | 29 ------------------- doc/release/upcoming_changes/28343.change.rst | 1 - doc/release/upcoming_changes/28426.change.rst | 6 ---- doc/release/upcoming_changes/28436.change.rst | 10 ------- .../upcoming_changes/28442.improvement.rst | 1 - doc/release/upcoming_changes/28569.change.rst | 2 -- .../upcoming_changes/28576.new_feature.rst | 15 ---------- doc/release/upcoming_changes/28615.change.rst | 5 ---- .../upcoming_changes/28619.highlight.rst | 6 ---- .../upcoming_changes/28619.performance.rst | 7 ----- .../upcoming_changes/28669.new_feature.rst | 3 -- doc/release/upcoming_changes/28703.change.rst | 3 -- doc/release/upcoming_changes/28713.change.rst | 1 - doc/release/upcoming_changes/28741.change.rst | 1 - .../upcoming_changes/28769.performance.rst | 8 ----- .../upcoming_changes/28856.improvement.rst | 5 ---- .../upcoming_changes/28884.deprecation.rst | 28 ------------------ .../upcoming_changes/28940.new_feature.rst | 6 ---- .../upcoming_changes/28961.expired.rst | 1 - doc/source/release.rst | 1 + doc/source/release/2.4.0-notes.rst | 19 ++++++++++++ numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/numpyconfig.h | 2 +- pavement.py | 2 +- pyproject.toml | 2 +- 41 files changed, 25 insertions(+), 252 deletions(-) delete mode 100644 doc/release/upcoming_changes/26018.change.rst delete mode 100644 doc/release/upcoming_changes/26018.performance.rst delete mode 100644 doc/release/upcoming_changes/26745.highlight.rst delete mode 100644 doc/release/upcoming_changes/27288.improvement.rst delete mode 100644 doc/release/upcoming_changes/27789.new_function.rst delete mode 100644 doc/release/upcoming_changes/27883.c_api.rst delete mode 100644 doc/release/upcoming_changes/27883.change.rst delete mode 100644 doc/release/upcoming_changes/27998.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.c_api.rst delete mode 100644 doc/release/upcoming_changes/28080.improvement.rst delete mode 100644 doc/release/upcoming_changes/28102.change.rst delete mode 100644 doc/release/upcoming_changes/28105.improvement.rst delete mode 100644 doc/release/upcoming_changes/28129.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28205.improvement.rst delete mode 100644 doc/release/upcoming_changes/28214.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28250.improvement.rst delete mode 100644 doc/release/upcoming_changes/28254.expired.rst delete mode 100644 doc/release/upcoming_changes/28343.change.rst delete mode 100644 doc/release/upcoming_changes/28426.change.rst delete mode 100644 doc/release/upcoming_changes/28436.change.rst delete mode 100644 doc/release/upcoming_changes/28442.improvement.rst delete mode 100644 doc/release/upcoming_changes/28569.change.rst delete mode 100644 doc/release/upcoming_changes/28576.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28615.change.rst delete mode 100644 doc/release/upcoming_changes/28619.highlight.rst delete mode 100644 doc/release/upcoming_changes/28619.performance.rst delete mode 100644 doc/release/upcoming_changes/28669.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28703.change.rst delete mode 100644 doc/release/upcoming_changes/28713.change.rst delete mode 100644 doc/release/upcoming_changes/28741.change.rst delete mode 100644 doc/release/upcoming_changes/28769.performance.rst delete mode 100644 doc/release/upcoming_changes/28856.improvement.rst delete mode 100644 doc/release/upcoming_changes/28884.deprecation.rst delete mode 100644 doc/release/upcoming_changes/28940.new_feature.rst delete mode 100644 doc/release/upcoming_changes/28961.expired.rst create mode 100644 doc/source/release/2.4.0-notes.rst diff --git a/doc/release/upcoming_changes/26018.change.rst b/doc/release/upcoming_changes/26018.change.rst deleted file mode 100644 index 9d7c139be183..000000000000 --- a/doc/release/upcoming_changes/26018.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` may return unsorted data ------------------------------------------- -The relatively new function (added in NumPy 2.0) ``unique_values`` may now -return unsorted results. Just as ``unique_counts`` and ``unique_all`` -these never guaranteed a sorted result, however, the result -was sorted until now. In cases where these do return a sorted result, this -may change in future releases to improve performance. diff --git a/doc/release/upcoming_changes/26018.performance.rst b/doc/release/upcoming_changes/26018.performance.rst deleted file mode 100644 index ffeab51dbdf6..000000000000 --- a/doc/release/upcoming_changes/26018.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.unique`` ------------------------------------------ -``np.unique`` now tries to use a hash table to find unique values instead of sorting -values before finding unique values. This is limited to certain dtypes for now, and -the function is now faster for those dtypes. The function now also exposes a ``sorted`` -parameter to allow returning unique values as they were found, instead of sorting them -afterwards. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26745.highlight.rst b/doc/release/upcoming_changes/26745.highlight.rst deleted file mode 100644 index 5636f919c80d..000000000000 --- a/doc/release/upcoming_changes/26745.highlight.rst +++ /dev/null @@ -1,10 +0,0 @@ -Interactive examples in the NumPy documentation ------------------------------------------------ - -The NumPy documentation includes a number of examples that -can now be run interactively in your browser using WebAssembly -and Pyodide. - -Please note that the examples are currently experimental in -nature and may not work as expected for all methods in the -public API. diff --git a/doc/release/upcoming_changes/27288.improvement.rst b/doc/release/upcoming_changes/27288.improvement.rst deleted file mode 100644 index c7319554c63f..000000000000 --- a/doc/release/upcoming_changes/27288.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Scalar comparisons between non-comparable dtypes such as - `np.array(1) == np.array('s')` now return a NumPy bool instead of - a Python bool. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27789.new_function.rst b/doc/release/upcoming_changes/27789.new_function.rst deleted file mode 100644 index 734a0c3bc2b5..000000000000 --- a/doc/release/upcoming_changes/27789.new_function.rst +++ /dev/null @@ -1,5 +0,0 @@ -New function `numpy.strings.slice` ----------------------------------- -The new function `numpy.strings.slice` was added, which implements fast -native slicing of string arrays. It supports the full slicing API including -negative slice offsets and steps. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.c_api.rst b/doc/release/upcoming_changes/27883.c_api.rst deleted file mode 100644 index 107e0036c5c2..000000000000 --- a/doc/release/upcoming_changes/27883.c_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -* `NpyIter_GetTransferFlags` is now available to check if - the iterator needs the Python API or if casts may cause floating point - errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` - to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.change.rst b/doc/release/upcoming_changes/27883.change.rst deleted file mode 100644 index ea68771efba3..000000000000 --- a/doc/release/upcoming_changes/27883.change.rst +++ /dev/null @@ -1,17 +0,0 @@ -Changes to the main iterator and potential numerical changes ------------------------------------------------------------- -The main iterator, used in math functions and via ``np.nditer`` from Python -and ``NpyIter`` in C, now behaves differently for some buffered iterations. -This means that: - -* The buffer size used will often be smaller than the maximum buffer sized - allowed by the ``buffersize`` parameter. -* The "growinner" flag is now honored with buffered reductions when no operand - requires buffering. - -For ``np.sum()`` such changes in buffersize may slightly change numerical -results of floating point operations. -Users who use "growinner" for custom reductions could notice -changes in precision (for example, in NumPy we removed it from -``einsum`` to avoid most precision changes and improve precision -for some 64bit floating point inputs). diff --git a/doc/release/upcoming_changes/27998.c_api.rst b/doc/release/upcoming_changes/27998.c_api.rst deleted file mode 100644 index edc6371af1f9..000000000000 --- a/doc/release/upcoming_changes/27998.c_api.rst +++ /dev/null @@ -1,10 +0,0 @@ -New `NpyIter_GetTransferFlags` and ``NpyIter_IterationNeedsAPI`` change ------------------------------------------------------------------------ -NumPy now has the new `NpyIter_GetTransferFlags` function as a more precise -way checking of iterator/buffering needs. I.e. whether the Python API/GIL is -required or floating point errors may occur. -This function is also faster if you already know your needs without buffering. - -The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were -previously performed at setup time. While it was never necessary to call it -multiple times, doing so will now have a larger cost. diff --git a/doc/release/upcoming_changes/28080.c_api.rst b/doc/release/upcoming_changes/28080.c_api.rst deleted file mode 100644 index f72be7ef52fe..000000000000 --- a/doc/release/upcoming_changes/28080.c_api.rst +++ /dev/null @@ -1 +0,0 @@ -* ``NpyIter`` now has no limit on the number of operands it supports. diff --git a/doc/release/upcoming_changes/28080.improvement.rst b/doc/release/upcoming_changes/28080.improvement.rst deleted file mode 100644 index 19b85ae3c96a..000000000000 --- a/doc/release/upcoming_changes/28080.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.nditer`` now has no limit on the number of supported operands - (C-integer). diff --git a/doc/release/upcoming_changes/28102.change.rst b/doc/release/upcoming_changes/28102.change.rst deleted file mode 100644 index bd54378a652e..000000000000 --- a/doc/release/upcoming_changes/28102.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -The minimum supported GCC version is now 9.3.0 ----------------------------------------------- -The minimum supported version was updated from 8.4.0 to 9.3.0, -primarily in order to reduce the chance of platform-specific bugs in old GCC -versions from causing issues. - diff --git a/doc/release/upcoming_changes/28105.improvement.rst b/doc/release/upcoming_changes/28105.improvement.rst deleted file mode 100644 index 537467575234..000000000000 --- a/doc/release/upcoming_changes/28105.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* No-copy pickling is now supported for any - array that can be transposed to a C-contiguous array. \ No newline at end of file diff --git a/doc/release/upcoming_changes/28129.deprecation.rst b/doc/release/upcoming_changes/28129.deprecation.rst deleted file mode 100644 index b1beb0c5cca3..000000000000 --- a/doc/release/upcoming_changes/28129.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic - static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` - section of your mypy configuration. If this change results in new errors being - reported, kindly open an issue. diff --git a/doc/release/upcoming_changes/28205.improvement.rst b/doc/release/upcoming_changes/28205.improvement.rst deleted file mode 100644 index 42eaaac98363..000000000000 --- a/doc/release/upcoming_changes/28205.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Added warnings to `np.isclose` ---------------------------------- -Added warning messages if at least one of atol or rtol are -either `np.nan` or `np.inf` within `np.isclose` - -* Warnings follow the user's `np.seterr` settings diff --git a/doc/release/upcoming_changes/28214.new_feature.rst b/doc/release/upcoming_changes/28214.new_feature.rst deleted file mode 100644 index eb95a0739e79..000000000000 --- a/doc/release/upcoming_changes/28214.new_feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -NumPy now registers its pkg-config paths with the pkgconf_ PyPI package ------------------------------------------------------------------------ - -The pkgconf_ PyPI package provides an interface for projects like NumPy to -register their own paths to be added to the pkg-config search path. This means -that when using pkgconf_ from PyPI, NumPy will be discoverable without needing -for any custom environment configuration. - -.. attention:: Attention - - This only applies when using the pkgconf_ package from PyPI_, or put another - way, this only applies when installing pkgconf_ via a Python package - manager. - - If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or - any other source that does not use the pkgconf-pypi_ project, the NumPy - pkg-config directory will not be automatically added to the search path. In - these situations, you might want to use ``numpy-config``. - - -.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi -.. _PyPI: https://pypi.org/ -.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi diff --git a/doc/release/upcoming_changes/28250.improvement.rst b/doc/release/upcoming_changes/28250.improvement.rst deleted file mode 100644 index 703a8bb0c2e1..000000000000 --- a/doc/release/upcoming_changes/28250.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the - custom dtype over a more generic name constructed from its ``kind`` and - ``itemsize``. diff --git a/doc/release/upcoming_changes/28254.expired.rst b/doc/release/upcoming_changes/28254.expired.rst deleted file mode 100644 index 5f391eb6cbe2..000000000000 --- a/doc/release/upcoming_changes/28254.expired.rst +++ /dev/null @@ -1,29 +0,0 @@ -* Remove deprecated macros like ``NPY_OWNDATA`` from cython interfaces in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove alias ``generate_divbyzero_error`` to ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to ``npy_set_floatstatus_overflow`` (deprecated since 1.10) -* Remove ``np.tostring`` (deprecated since 1.19) -* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) -* Raise when using ``np.bincount(...minlength=None)``, use 0 instead (deprecated since 1.14) -* Passing ``shape=None`` to functions with a non-optional shape argument errors, use ``()`` instead (deprecated since 1.20) -* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) -* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) -* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they would guess (deprecated since 1.18) -* ``datetime64`` and ``timedelta64`` construction with a tuple no longer accepts an ``event`` value, either use a two-tuple of (unit, num) or a 4-tuple of (unit, num, den, 1) (deprecated since 1.14) -* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that attribute must be a dtype-instance rather than a thing that can be parsed as a dtype instance (deprecated in 1.19). At some point the whole construct of using a dtype attribute will be deprecated (see #25306) -* Passing booleans as partition index errors (deprecated since 1.23) -* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) -* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) -* Disallow make a non-writeable array writeable for arrays with a base that do not own their data (deprecated since 1.17) -* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, not ``unsafe`` (deprecated since 1.20) -* Unpickling a scalar with object dtype errors (deprecated since 1.20) -* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead (deprecated since 1.14) -* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated since 1.19) -* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since 1.19) -* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or ``scalar.round`` instead (deprecated since 1.19) -* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) -* Parsing an integer via a float string is no longer supported. (deprecated since 1.23) To avoid this error you can - * make sure the original data is stored as integers. - * use the ``converters=float`` keyword argument. - * Use ``np.loadtxt(...).astype(np.int64)`` -* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` or fill the tuple with ``None`` (deprecated since 1.19) -* Special handling of matrix is in np.outer is removed. Convert to a ndarray via ``matrix.A`` (deprecated since 1.20) diff --git a/doc/release/upcoming_changes/28343.change.rst b/doc/release/upcoming_changes/28343.change.rst deleted file mode 100644 index 378ef775b62e..000000000000 --- a/doc/release/upcoming_changes/28343.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` now always returns zero for empty arrays. Empty arrays have at least one axis of size zero. This affects `np.linalg.norm`, `np.linalg.vector_norm`, and `np.linalg.matrix_norm`. Previously, NumPy would raises errors or return zero depending on the shape of the array. diff --git a/doc/release/upcoming_changes/28426.change.rst b/doc/release/upcoming_changes/28426.change.rst deleted file mode 100644 index d1c48640eed0..000000000000 --- a/doc/release/upcoming_changes/28426.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -Changes to automatic bin selection in numpy.histogram ------------------------------------------------------ -The automatic bin selection algorithm in ``numpy.histogram`` has been modified -to avoid out-of-memory errors for samples with low variation. -For full control over the selected bins the user can use set -the ``bin`` or ``range`` parameters of ``numpy.histogram``. diff --git a/doc/release/upcoming_changes/28436.change.rst b/doc/release/upcoming_changes/28436.change.rst deleted file mode 100644 index 60149e55a4d0..000000000000 --- a/doc/release/upcoming_changes/28436.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -Build manylinux_2_28 wheels ---------------------------- - -Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the ``manylinux2014`` tag), which means -dropping support for redhat7/centos7, amazonlinux2, debian9, ubuntu18.04, and -other pre-glibc2.28 operating system versions, as per the `PEP 600 support -table`_. - -.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check - diff --git a/doc/release/upcoming_changes/28442.improvement.rst b/doc/release/upcoming_changes/28442.improvement.rst deleted file mode 100644 index 16d71bde19c5..000000000000 --- a/doc/release/upcoming_changes/28442.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -* ``np.dot`` now reports floating point exceptions. diff --git a/doc/release/upcoming_changes/28569.change.rst b/doc/release/upcoming_changes/28569.change.rst deleted file mode 100644 index f9d26fda0484..000000000000 --- a/doc/release/upcoming_changes/28569.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* A spelling error in the error message returned when converting a string to a float with the - method ``np.format_float_positional`` has been fixed. diff --git a/doc/release/upcoming_changes/28576.new_feature.rst b/doc/release/upcoming_changes/28576.new_feature.rst deleted file mode 100644 index 2c50887a49f2..000000000000 --- a/doc/release/upcoming_changes/28576.new_feature.rst +++ /dev/null @@ -1,15 +0,0 @@ -Allow ``out=...`` in ufuncs to ensure array result --------------------------------------------------- -NumPy has the sometimes difficult behavior that it currently usually -returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). -This is especially problematic for non-numerical dtypes (e.g. ``object``). - -For ufuncs (i.e. most simple math functions) it is now possible -to use ``out=...`` (literally `...`, e.g. ``out=Ellipsis``) which is identical in behavior to ``out`` not -being passed, but will ensure a non-scalar return. -This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` -also ensures a non-scalar return. - -Other functions with an ``out=`` kwarg should gain support eventually. -Downstream libraries that interoperate via ``__array_ufunc__`` or -``__array_function__`` may need to adapt to support this. diff --git a/doc/release/upcoming_changes/28615.change.rst b/doc/release/upcoming_changes/28615.change.rst deleted file mode 100644 index 58b751e40704..000000000000 --- a/doc/release/upcoming_changes/28615.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. -* `numpy.count_nonzero` for ``axis=None`` (default) now returns a NumPy scalar - instead of a Python integer. -* The parameter ``axis`` in `numpy.take_along_axis` function has now a default - value of ``-1``. diff --git a/doc/release/upcoming_changes/28619.highlight.rst b/doc/release/upcoming_changes/28619.highlight.rst deleted file mode 100644 index 6c296b92899e..000000000000 --- a/doc/release/upcoming_changes/28619.highlight.rst +++ /dev/null @@ -1,6 +0,0 @@ -Building NumPy with OpenMP Parallelization -------------------------------------------- -NumPy now supports OpenMP parallel processing capabilities when built with the -``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. -When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for -parallel thread execution, improving performance for these operations. diff --git a/doc/release/upcoming_changes/28619.performance.rst b/doc/release/upcoming_changes/28619.performance.rst deleted file mode 100644 index 904decbe0ba6..000000000000 --- a/doc/release/upcoming_changes/28619.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.sort`` and ``np.argsort`` ----------------------------------------------------------- -``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel -thread execution, resulting in up to 3.5x speedups on x86 architectures with -AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built -with the -Denable_openmp Meson flag. Users can control the number of threads -used by setting the OMP_NUM_THREADS environment variable. diff --git a/doc/release/upcoming_changes/28669.new_feature.rst b/doc/release/upcoming_changes/28669.new_feature.rst deleted file mode 100644 index 2953a5123ccc..000000000000 --- a/doc/release/upcoming_changes/28669.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. - This way, static type-checkers will infer ``dtype: np.dtype`` as - ``dtype: np.dtype[Any]``, without reporting an error. diff --git a/doc/release/upcoming_changes/28703.change.rst b/doc/release/upcoming_changes/28703.change.rst deleted file mode 100644 index 87bb431951f9..000000000000 --- a/doc/release/upcoming_changes/28703.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by - adjusting the transition to scientific notation based on the floating point precision. - A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. diff --git a/doc/release/upcoming_changes/28713.change.rst b/doc/release/upcoming_changes/28713.change.rst deleted file mode 100644 index 5e5c5adde88b..000000000000 --- a/doc/release/upcoming_changes/28713.change.rst +++ /dev/null @@ -1 +0,0 @@ -Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, and results in libraries that cannot link to other libraries built with ld (new). diff --git a/doc/release/upcoming_changes/28741.change.rst b/doc/release/upcoming_changes/28741.change.rst deleted file mode 100644 index ca9531f490d8..000000000000 --- a/doc/release/upcoming_changes/28741.change.rst +++ /dev/null @@ -1 +0,0 @@ -Re-enable overriding functions in the :mod:`numpy.strings` module. diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst deleted file mode 100644 index 7fb8f02282f6..000000000000 --- a/doc/release/upcoming_changes/28769.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -Performance improvements for ``np.float16`` casts --------------------------------------------------- -Earlier, floating point casts to and from ``np.float16`` types -were emulated in software on all platforms. - -Now, on ARM devices that support Neon float16 intrinsics (such as -recent Apple Silicon), the native float16 path is used to achieve -the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst deleted file mode 100644 index 83911035f097..000000000000 --- a/doc/release/upcoming_changes/28856.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -* ``np.dtypes.StringDType`` is now a - `generic type `_ which - accepts a type argument for ``na_object`` that defaults to ``typing.Never``. - For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, - and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst deleted file mode 100644 index c1be55fb0dd3..000000000000 --- a/doc/release/upcoming_changes/28884.deprecation.rst +++ /dev/null @@ -1,28 +0,0 @@ -``numpy.typing.NBitBase`` deprecation -------------------------------------- -The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. - -This type was previously intended to be used as a generic upper bound for type-parameters, for example: - -.. code-block:: python - - import numpy as np - import numpy.typing as npt - - def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... - -But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. - -So instead, the better approach is to use ``typing.overload``: - -.. code-block:: python - - import numpy as np - from typing import overload - - @overload - def f(x: np.complex64) -> np.float32: ... - @overload - def f(x: np.complex128) -> np.float64: ... - @overload - def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/release/upcoming_changes/28940.new_feature.rst b/doc/release/upcoming_changes/28940.new_feature.rst deleted file mode 100644 index e0d3dc8888c3..000000000000 --- a/doc/release/upcoming_changes/28940.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -* Static type-checkers now interpret: - - - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. - - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. - - This is because their type parameters now have default values. diff --git a/doc/release/upcoming_changes/28961.expired.rst b/doc/release/upcoming_changes/28961.expired.rst deleted file mode 100644 index 92031de35e62..000000000000 --- a/doc/release/upcoming_changes/28961.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Removed the ``np.compat`` package source code (removed in 2.0) diff --git a/doc/source/release.rst b/doc/source/release.rst index 36d5e6731f4f..6c6a853b06f5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.4.0 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..29a7e5ce6073 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index d448df066a19..0d642d760b21 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -78,5 +78,6 @@ # Version 19 (NumPy 2.1.0) Only header additions # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 -# Version 19 (NumPy 2.3.0) +# Version 20 (NumPy 2.3.0) +# Version 20 (NumPy 2.4.0) No change 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index ba44c28b9d0f..52d7e2b5d7d7 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -172,7 +172,7 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ #define NPY_FEATURE_VERSION_STRING "2.3" #else #error "Missing version string define for new NumPy version." diff --git a/pavement.py b/pavement.py index e00b9647f5e3..369b8703b0ba 100644 --- a/pavement.py +++ b/pavement.py @@ -35,7 +35,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.3.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.3.0.dev0" +version = "2.4.0.dev0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From bae7d087c54f5a4ce25f4906b8fcefc92fed799c Mon Sep 17 00:00:00 2001 From: Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Date: Wed, 21 May 2025 20:50:31 +0100 Subject: [PATCH 0021/1718] Fix workflow error --- .github/workflows/windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f61419df09cd..6c02563150da 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} From 54529e22743cffe6f997b725a7fcff650736395b Mon Sep 17 00:00:00 2001 From: Angus Gibson Date: Thu, 22 May 2025 11:42:18 +1000 Subject: [PATCH 0022/1718] BUG: Avoid compile errors in f2py modules Some of the casts from cfuncs pass PyObject* to PyArrayObject*, which causes compile errors due to incompatible pointer types on at least GCC 14. --- numpy/f2py/cfuncs.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index a9a56b2455f2..6c48c1ef0175 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1047,9 +1047,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1131,10 +1134,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { From ce6a72c41efe545f68dcd831e6193fcdd450180b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:09:04 +0200 Subject: [PATCH 0023/1718] DOC: Expand/clean up extension module import error We get a lot of these reports and it is never us... But unfortunately, Python (currently) doesn't report *why* the module wasn't found. (I have opened an issue asking for that.) Until Python does, try to figure it out ourselves, i.e. list C modules (I guess its always one, but OK). If anything it'll give *us* an immediate thing to point out if an issue is reported... I also hid the "source import" thing to only occur if __config__ doesn't exist. Not sure that catches this fully, but I also feel like this isn't an actual problem anymore (i.e. we could just delete it also). Tested locally by renaming or deleting `_multiarray_umath`. --- numpy/__init__.py | 10 ++++++---- numpy/_core/__init__.py | 43 +++++++++++++++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 8fb2e742dfc4..3a67bd221247 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,10 +111,12 @@ try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e from . import _core from ._core import ( diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index d0da7e0ad9ed..f19557277756 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -22,29 +22,56 @@ from . import multiarray except ImportError as exc: import sys - msg = """ + + # Basically always, the problem should be that the C module is wrong/missing... + if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + import sys + candidates = [] + for path in __path__: + candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, was NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + raise ImportError(msg) from exc finally: for envkey in env_added: From 552ce2a6ddba62c874629d7d5c2dfb21af403d50 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:15:36 +0200 Subject: [PATCH 0024/1718] appease linter --- numpy/_core/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index f19557277756..268932649eac 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -24,11 +24,15 @@ import sys # Basically always, the problem should be that the C module is wrong/missing... - if isinstance(exc, ModuleNotFoundError) and exc.name == "numpy._core._multiarray_umath": + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): import sys candidates = [] for path in __path__: - candidates.extend(f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( "We found no compiled module, was NumPy build successfully?\n") From d63cc918f588d6eb92fd20b22dc509e345a728d8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 22 May 2025 12:33:27 +0200 Subject: [PATCH 0025/1718] Revert enable/CIBW_ENABLE changes [wheel build] --- .github/workflows/wheels.yml | 2 -- pyproject.toml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bede99dd6e9a..fa2c1cb5ae97 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -180,8 +180,6 @@ jobs: uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - # delete when we switch 3.14(t)-dev to 3.14(t) - CIBW_ENABLE: cpython-prerelease - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: diff --git a/pyproject.toml b/pyproject.toml index 2d9fbb1e2675..5cf75b20a6b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -148,7 +148,7 @@ before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy"] +enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" From dd95e3ec53570fea721d6ff82537d7e657792787 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 May 2025 12:34:46 +0200 Subject: [PATCH 0026/1718] explicitly re-raise again (worked before because the next line raises...) --- numpy/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/__init__.py b/numpy/__init__.py index 3a67bd221247..aadc1fab3407 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -117,6 +117,7 @@ its source directory; please exit the numpy source tree, and relaunch your python interpreter from there.""" raise ImportError(msg) from e + raise from . import _core from ._core import ( From ae01519952d0cf4bd8ace320453340a3e8a42ee7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 13:50:56 +0200 Subject: [PATCH 0027/1718] Update numpy/_core/__init__.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- numpy/_core/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 268932649eac..7b19cefb2f93 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -35,7 +35,7 @@ f for f in os.listdir(path) if f.startswith("_multiarray_umath")) if len(candidates) == 0: bad_c_module_info = ( - "We found no compiled module, was NumPy build successfully?\n") + "We found no compiled module, did NumPy build successfully?\n") else: candidate_str = '\n * '.join(candidates) # cache_tag is documented to be possibly None, so just use name if it is From 3cbb8cd321f7676cf3ba387e081c42a5ff08637b Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:00:09 -0400 Subject: [PATCH 0028/1718] BUG: handle the case of modules with derived types --- numpy/f2py/auxfuncs.py | 16 +++++++++++++++- numpy/f2py/auxfuncs.pyi | 2 ++ numpy/f2py/f90mod_rules.py | 4 ++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 6e58e6352224..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -42,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -569,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 2a0d4e106bcc..1212f229c660 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -200,11 +200,13 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # +def containsderivedtypes(rout: _ROut) -> _Bool: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... def hasbody(rout: _ROut) -> _Bool: ... def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... def hascallstatement(rout: _ROut) -> bool: ... def isroutine(rout: _ROut) -> bool: ... def ismodule(rout: _ROut) -> bool: ... diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 29adbe78a26f..d13a42a9d71f 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -120,6 +120,10 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] From f84c105194013644a6f2e305a3ac6a6d3c946eb3 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:01:41 -0400 Subject: [PATCH 0029/1718] TST: tests for modules with derived types --- .../src/regression/mod_derived_types.f90 | 23 +++++++++++++++++++ numpy/f2py/tests/test_regression.py | 10 ++++++++ 2 files changed, 33 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/mod_derived_types.f90 diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 1931ad21a48b..5e3bac38b287 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -37,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] From ac9a024a1b469ccf5a684b74048ea76b0e0d7434 Mon Sep 17 00:00:00 2001 From: Abdu Zoghbi Date: Fri, 23 May 2025 10:21:36 -0400 Subject: [PATCH 0030/1718] fix W291 --- numpy/f2py/tests/test_regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 5e3bac38b287..93eb29e8e723 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -44,7 +44,7 @@ class TestModuleWithDerivedType(util.F2PyTest): @pytest.mark.slow def test_mtypes(self): assert self.module.no_type_subroutine(10) == 110 - assert self.module.type_subroutine(10) == 210 + assert self.module.type_subroutine(10) == 210 class TestNegativeBounds(util.F2PyTest): From cbd2fef887aa32af84bc6efb3653370560e4bcec Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:17:57 +0200 Subject: [PATCH 0031/1718] BUG: Fix cache use regression gh-29006 got the branching wrong leaving the cache undefined on most GCC/clang, which means we wouldn't use it. Also move it up so that we can just remove the unused globals entirely. --- numpy/_core/src/multiarray/alloc.c | 35 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 280ca81a35a7..64c2b854167d 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,9 +27,22 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ +/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN + * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#define USE_ALLOC_CACHE 1 +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +#elif defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif + +#if USE_ALLOC_CACHE +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ @@ -37,7 +50,7 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; - +#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with @@ -99,20 +112,6 @@ indicate_hugepages(void *p, size_t size) { } -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ -#ifdef Py_GIL_DISABLED -#define USE_ALLOC_CACHE 0 -#elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif -#else -#define USE_ALLOC_CACHE 1 -#endif - - /* as the cache is managed in global variables verify the GIL is held */ /* From e57bb2b429fab1b8af27a85182d1ebebf3ea80aa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 23 May 2025 17:31:42 +0200 Subject: [PATCH 0032/1718] ok, don't attempt to remove the statics (too ingrained) --- numpy/_core/src/multiarray/alloc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 64c2b854167d..cc9c5762a196 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -39,7 +39,6 @@ # endif #endif -#if USE_ALLOC_CACHE # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ # define NCACHE 7 /* number of cache entries per bucket */ @@ -50,7 +49,6 @@ typedef struct { } cache_bucket; static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; -#endif /* USE_ALLOC_CACHE */ /* * This function tells whether NumPy attempts to call `madvise` with From 06f4d5ed1b854869d4e9b1a3df7e9a7b9e60e77d Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Fri, 23 May 2025 11:48:01 -0400 Subject: [PATCH 0033/1718] ENH: add __array_function__ protocol in polynomial (#28996) --- numpy/polynomial/polynomial.py | 9 ++++++++- numpy/polynomial/tests/test_polynomial.py | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32b53b757a1c..6ec0dc58a1de 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -82,6 +82,7 @@ import numpy as np import numpy.linalg as la +from numpy._core.overrides import array_function_dispatch from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu @@ -841,7 +842,13 @@ def polyvalfromroots(x, r, tensor=True): raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +900,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 27513fd682e8..8bfa3c184cf7 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -667,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" From 4c1eb7076a7623937097544cdd6a4ae5fc1aee6d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 03:23:48 +0200 Subject: [PATCH 0034/1718] TYP: annotate strings.slice --- numpy/_core/strings.pyi | 15 +++++++++++++++ numpy/strings/__init__.pyi | 2 ++ numpy/typing/tests/data/reveal/strings.pyi | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 52d244b36ccd..b187ce71d25c 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -54,6 +54,7 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] _StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] @@ -494,3 +495,17 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 2f924dca0b5d..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -36,6 +36,7 @@ from numpy._core.strings import ( rjust, rpartition, rstrip, + slice, startswith, str_len, strip, @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 746e804ce577..18bd252d5ff9 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -190,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) From aaae32e45830a9b3d9c026d5e2d79390ccda50e6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 25 May 2025 18:48:58 +0200 Subject: [PATCH 0035/1718] TYP: remove expired ``tostring`` methods --- numpy/lib/_user_array_impl.pyi | 4 +--- numpy/ma/core.pyi | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index c1c72b2320f1..13c0a0163421 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -2,7 +2,7 @@ from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeVar, deprecated, override +from typing_extensions import TypeVar, override import numpy as np import numpy.typing as npt @@ -220,8 +220,6 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # def copy(self, /) -> Self: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /) -> bytes: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f457f18d57bd..7e87611037b9 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -5,7 +5,7 @@ from collections.abc import Sequence from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete -from typing_extensions import TypeIs, TypeVar, deprecated +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( @@ -1057,8 +1057,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... From 8ba2c63d1f30fc2aece18fcad78597d2a8fa94b6 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 25 May 2025 21:11:10 +0300 Subject: [PATCH 0036/1718] use pypy 3.11 nightly which has a fix for ctypeslib [skip azp][skip circleci] --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dc9ef34db71d..742ca5c34144 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-v7.3.19' + python-version: 'pypy3.11-nightly' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From 3d4629c9de79fe95d7f3d0724693c875d91d48f8 Mon Sep 17 00:00:00 2001 From: Zebreus Date: Mon, 26 May 2025 09:24:10 +0200 Subject: [PATCH 0037/1718] BLD: allow targeting webassembly without emscripten --- numpy/_core/include/numpy/npy_cpu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 4fb3fb406869..72f7331a0267 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -111,8 +111,9 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 -#elif defined(__EMSCRIPTEN__) +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ From 8389ff9d1e6cbe7e922592e2f3d27913cb2e2e99 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:19:08 +0200 Subject: [PATCH 0038/1718] TYP: fix invalid overload definition in ``_core.defchararray.add`` --- numpy/_core/defchararray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 776962c53998..43005745bfab 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -555,7 +555,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... From 4cce8e4dbdf58ed8851d0ddbdf37bca14fe3d9f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:15 +0200 Subject: [PATCH 0039/1718] TYP: annotate the ``*args`` and ``**kwargs`` of the ``ufunc`` methods --- numpy/_typing/_ufunc.pyi | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 766cde1ad420..104307da89db 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -146,10 +146,10 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -382,11 +382,11 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -439,11 +439,11 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @@ -494,11 +494,11 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): From 466f64f961b9bc0845a7d4e69e9186c13b2012c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:27:54 +0200 Subject: [PATCH 0040/1718] TYP: annotate the return type of ``numpy.typing.__getattr__`` --- numpy/typing/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 173c094b40aa..163655bd7662 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -169,7 +169,7 @@ def __dir__() -> list[str]: return __DIR -def __getattr__(name: str): +def __getattr__(name: str) -> object: if name == "NBitBase": import warnings From 1707fa13355d32b487b1f628a8da9925be136581 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 26 May 2025 22:29:09 +0200 Subject: [PATCH 0041/1718] TYP: annotate ``numpy.lib._format_impl`` --- numpy/lib/_format_impl.pyi | 73 +++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index f4898d9aefa4..870c2d761bb0 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,26 +1,57 @@ -from typing import Final, Literal +import os +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard -from numpy.lib._utils_impl import drop_metadata # noqa: F401 +from _typeshed import SupportsRead, SupportsWrite + +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 -GROWTH_AXIS_MAX_DIGITS: Literal[21] +_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] + +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... -def isfileobj(f): ... +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` From 436ec159fad4aafd99bba4543ae9aa925e64385f Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 27 May 2025 07:02:02 +0300 Subject: [PATCH 0042/1718] BLD: use sonoma image on Cirrus for [wheel build][skip actions][skip azp] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index b531e953daee..6d02411df2e9 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -11,7 +11,7 @@ macosx_arm64_task: CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: - image: ghcr.io/cirruslabs/macos-monterey-xcode + image: ghcr.io/cirruslabs/macos-runner:sonoma matrix: - env: From fba2e60fb95891e8e044113afe0a321405f581ea Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 11:07:56 +0200 Subject: [PATCH 0043/1718] DOC: fix typo in documentation of vecmat The body of the summary uses the symbol v to reference the `x1` parameter, however in the displayed math, b is used instead. This commit changes b to v in the displayed math for concistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 5d3ba73c92f0..aa9fe4acea05 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3030,7 +3030,7 @@ def add_newdoc(place, name, doc): vector-matrix product is defined as: .. math:: - \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} where the sum is over the last dimension of ``x1`` and the one-but-last dimensions in ``x2`` (unless `axes` is specified) and where From 39a422230997bfb282503b74b39a26bc75be9a85 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 11:30:44 +0200 Subject: [PATCH 0044/1718] MAINT: Enforce ruff E501 --- benchmarks/benchmarks/bench_core.py | 3 ++- benchmarks/benchmarks/bench_function_base.py | 3 ++- benchmarks/benchmarks/bench_ufunc_strides.py | 5 +++-- numpy/_typing/_dtype_like.py | 2 +- numpy/tests/test_public_api.py | 2 +- ruff.toml | 9 ++++----- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 5e174704f105..a9a6c88b87a0 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -151,7 +151,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 57499dc761f8..9b770aeb60bf 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -241,7 +241,8 @@ class Sort(Benchmark): def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index b86be87f9e68..95df16e2cb5e 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -208,8 +208,9 @@ def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1 - self.Y_train) * np.log(1 - A)) - dz = A - self.Y_train + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train dw = (1 / self.size) * np.matmul(self.X_train.T, dz) self.W = self.W - self.alpha * dw diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..d341db5dc23a 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 # Would create a dtype[np.void] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index a56cd13296e3..6a36358c3a06 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -780,7 +780,7 @@ def test___qualname___and___module___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name not in {"tests", "typing"} and # 2024-12: type names don't match + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): diff --git a/ruff.toml b/ruff.toml index 6b05d8de69ee..c0008da43226 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,16 +68,15 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/*py" = ["E501"] +"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/**" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/*py" = ["E501"] +"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/*py" = ["E501"] -"numpy/linalg/tests/*py" = ["E501"] -"numpy/ma/tests/*py" = ["E501"] -"numpy/tests/*py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] +"numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] "__init__.py" = ["F401", "F403", "F405"] From f8b37c123485032260319f8f79ede1d6ed7e7694 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:13:12 +0200 Subject: [PATCH 0045/1718] enforce more files --- numpy/lib/tests/test_format.py | 3 --- numpy/lib/tests/test_histograms.py | 8 ++++---- numpy/lib/tests/test_recfunctions.py | 5 ++--- ruff.toml | 5 ++++- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index d805d3493ca4..2ab7026ccc7c 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -384,9 +384,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index b7752d1a8f1e..be5268d9813a 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,7 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -562,10 +562,10 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index eee1f47f834f..72377b8f7c35 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -524,9 +524,8 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) diff --git a/ruff.toml b/ruff.toml index c0008da43226..7ea90ee57b69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -74,7 +74,10 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/_typing/_array_like.py" = ["E501"] -"numpy/lib/tests/*py" = ["E501"] +"numpy/lib/tests/test_function_base.py" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/lib/tests/test_io.py" = ["E501"] +"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy*pyi" = ["E501"] From 9ea773dec671da1c08068250d64813098d55bb3b Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 27 May 2025 12:24:05 +0200 Subject: [PATCH 0046/1718] more enforcement --- numpy/_core/tests/test_cython.py | 3 ++- numpy/_core/tests/test_function_base.py | 3 ++- numpy/_core/tests/test_half.py | 2 +- numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 4 ++-- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_unicode.py | 3 ++- ruff.toml | 19 ++++++++++++++++++- 8 files changed, 30 insertions(+), 10 deletions(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fb3839fd2685..2c7b40c5614c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -345,7 +345,8 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c925cf1f77e5..3a8552de2d36 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -32,7 +32,8 @@ def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 68f17b2a5e14..e2d6e6796db4 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -531,7 +531,7 @@ def test_half_fpe(self): assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index e722d0c1a9df..1d42cde48682 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,7 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together + # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index d1735670ad6b..b437a7e14298 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -166,7 +166,7 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +402,7 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d6b5bdd73fc..c957aec4f9b2 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', - 'readonly': True} + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index 9fdc55b0e322..6a86503a35ae 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -134,7 +134,8 @@ def test_valuesSD(self): def test_valuesMD(self): # Check creation of multi-dimensional objects with values - ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4, dtype=f'U{self.ulen}') + data = [[[self.ucs_value * self.ulen] * 2] * 3] * 4 + ua = np.array(data, dtype=f'U{self.ulen}') self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4) self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4) diff --git a/ruff.toml b/ruff.toml index 7ea90ee57b69..b4190fe41787 100644 --- a/ruff.toml +++ b/ruff.toml @@ -69,7 +69,24 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/**" = ["E501"] +"numpy/_core/tests/test_api.py" = ["E501"] +"numpy/_core/tests/test_arrayprint.py" = ["E501"] +"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] +"numpy/_core/tests/test_cpu_features.py" = ["E501"] +"numpy/_core/tests/test_datetime.py" = ["E501"] +"numpy/_core/tests/test_dtype.py" = ["E501"] +"numpy/_core/tests/test_defchararray.py" = ["E501"] +"numpy/_core/tests/test_einsum.py" = ["E501"] +"numpy/_core/tests/test_multiarray.py" = ["E501"] +"numpy/_core/tests/test_multithreading.py" = ["E501"] +"numpy/_core/tests/test_nditer*py" = ["E501"] +"numpy/_core/tests/test_ufunc*py" = ["E501"] +"numpy/_core/tests/test_umath*py" = ["E501"] +"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_regression.py" = ["E501"] +"numpy/_core/tests/test_shape_base.py" = ["E501"] +"numpy/_core/tests/test_simd*.py" = ["E501"] +"numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From 6dd3021b5d5e66f47b156b375895b5c3a63933ad Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 27 May 2025 09:34:03 +0100 Subject: [PATCH 0047/1718] TYP: Run `pyright_cov` in CI, enforce minimum type completeness of 80% --- .github/workflows/mypy.yml | 3 + requirements/test_requirements.txt | 1 + tools/pyright_cov.py | 90 ++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 tools/pyright_cov.py diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..eac863cdeb45 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -72,3 +72,6 @@ jobs: - name: Run Mypy run: | spin mypy + - name: Check Pyright's type compelteness is above 80% + run: | + spin run python tools/pyright_cov.py --verifytypes numpy --ignoreexternal --fail-under 80 --exclude-like '*.tests.*' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..e37114ed48fd 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -14,6 +14,7 @@ pytest-timeout # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml mypy==1.15.0; platform_python_implementation != "PyPy" +pyright typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer diff --git a/tools/pyright_cov.py b/tools/pyright_cov.py new file mode 100644 index 000000000000..eca60462584a --- /dev/null +++ b/tools/pyright_cov.py @@ -0,0 +1,90 @@ +""" +Run PyRight's `--verifytypes` and check that its reported type completeness is above +a minimum threshold. + +Example usage: + + spin run python tools/pyright_cov.py --verifytypes numpy --ignoreexternal \ + --fail-under 80 --exclude-like '*.tests.*' + +We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib +`numbers` module, see https://github.com/microsoft/pyright/discussions/9911. + +It might be possible to replace this with `basedpyright` +https://github.com/DetachHead/basedpyright/issues/125 in the future. +""" +from __future__ import annotations + +import argparse +import fnmatch +import json +import subprocess +import sys +from collections.abc import Sequence + + +def main(argv: Sequence[str] | None = None) -> int: + parser = argparse.ArgumentParser() + parser.add_argument( + "--fail-under", + type=float, + default=100.0, + help="Fail if coverage is below this percentage", + ) + parser.add_argument( + "--exclude-like", + required=False, + type=str, + help="Exclude symbols whose names matches this glob pattern", + ) + args, unknownargs = parser.parse_known_args(argv) + pyright_args = list(unknownargs) + if "--outputjson" not in pyright_args: + pyright_args.append("--outputjson") + return run_pyright_with_coverage(pyright_args, args.fail_under, args.exclude_like) + + +def run_pyright_with_coverage( + pyright_args: list[str], + cov_fail_under: float, + exclude_like: str | None, +) -> int: + result = subprocess.run( + ["pyright", *pyright_args], capture_output=True, text=True + ) + + try: + data = json.loads(result.stdout) + except json.decoder.JSONDecodeError: + sys.stdout.write(result.stderr) + sys.stderr.write(result.stderr) + return 1 + + if exclude_like is not None: + symbols = data["typeCompleteness"]["symbols"] + matched_symbols = [ + x for x in symbols if not fnmatch.fnmatch(x["name"], exclude_like) + and x['isExported'] + ] + cov_percent = ( + sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols) * 100 + ) + else: + cov_percent = data["typeCompleteness"]["completenessScore"] * 100 + + sys.stderr.write(result.stderr) + if cov_percent < cov_fail_under: + sys.stdout.write( + f"Coverage {cov_percent:.1f}% is below minimum required " + f"{cov_fail_under:.1f}%" + ) + return 1 + sys.stdout.write( + f"Coverage {cov_percent:.1f}% is at or above minimum required " + f"{cov_fail_under:.1f}%" + ) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) From cb2d621cb9cdfe6c304a3c1d70c5a6475133e4d1 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 27 May 2025 11:47:45 +0100 Subject: [PATCH 0048/1718] wip --- tools/pyright_cov.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/pyright_cov.py b/tools/pyright_cov.py index eca60462584a..78a12f79747a 100644 --- a/tools/pyright_cov.py +++ b/tools/pyright_cov.py @@ -56,7 +56,7 @@ def run_pyright_with_coverage( try: data = json.loads(result.stdout) except json.decoder.JSONDecodeError: - sys.stdout.write(result.stderr) + sys.stdout.write(result.stdout) sys.stderr.write(result.stderr) return 1 @@ -78,6 +78,7 @@ def run_pyright_with_coverage( f"Coverage {cov_percent:.1f}% is below minimum required " f"{cov_fail_under:.1f}%" ) + sys.stdout.write(result.stdout) return 1 sys.stdout.write( f"Coverage {cov_percent:.1f}% is at or above minimum required " From f84d9645584d85be1bdd688927f53792b424959d Mon Sep 17 00:00:00 2001 From: Hamza Meel Date: Tue, 27 May 2025 12:47:50 +0200 Subject: [PATCH 0049/1718] DOC: fix typo in documentation of matvec Same as for commit fba2e60fb9, the vector was referred to as v in the body of the summary but b in the displayed math. This commit fixes the inconsistency. --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index aa9fe4acea05..ddae87bd6012 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -2963,7 +2963,7 @@ def add_newdoc(place, name, doc): matrix-vector product is defined as: .. math:: - \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j where the sum is over the last dimensions in ``x1`` and ``x2`` (unless ``axes`` is specified). (For a matrix-vector product with the From 78cc9222697e07807478181aeca58d7a4cc7386c Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 27 May 2025 12:07:37 +0100 Subject: [PATCH 0050/1718] always print for debugging --- tools/pyright_cov.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/pyright_cov.py b/tools/pyright_cov.py index 78a12f79747a..0fab6908277d 100644 --- a/tools/pyright_cov.py +++ b/tools/pyright_cov.py @@ -78,12 +78,12 @@ def run_pyright_with_coverage( f"Coverage {cov_percent:.1f}% is below minimum required " f"{cov_fail_under:.1f}%" ) - sys.stdout.write(result.stdout) return 1 sys.stdout.write( f"Coverage {cov_percent:.1f}% is at or above minimum required " f"{cov_fail_under:.1f}%" ) + sys.stdout.write(result.stdout) return 0 From f121010ef959ee7c956c453ab604ddf00d35735e Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 27 May 2025 12:07:48 +0100 Subject: [PATCH 0051/1718] fixup --- tools/pyright_cov.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/pyright_cov.py b/tools/pyright_cov.py index 0fab6908277d..c38d65fff7e8 100644 --- a/tools/pyright_cov.py +++ b/tools/pyright_cov.py @@ -73,6 +73,7 @@ def run_pyright_with_coverage( cov_percent = data["typeCompleteness"]["completenessScore"] * 100 sys.stderr.write(result.stderr) + sys.stdout.write(result.stdout) if cov_percent < cov_fail_under: sys.stdout.write( f"Coverage {cov_percent:.1f}% is below minimum required " @@ -83,7 +84,6 @@ def run_pyright_with_coverage( f"Coverage {cov_percent:.1f}% is at or above minimum required " f"{cov_fail_under:.1f}%" ) - sys.stdout.write(result.stdout) return 0 From b1acc3b67487bb4da132f2ad6e03644803f222a6 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 27 May 2025 14:31:42 +0100 Subject: [PATCH 0052/1718] only run type completeness on linux --- .github/workflows/mypy.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index eac863cdeb45..8e0cfc8b1b56 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -73,5 +73,7 @@ jobs: run: | spin mypy - name: Check Pyright's type compelteness is above 80% + # Pyright reports different percentages on different platforms + if: runner.os == 'Linux' run: | spin run python tools/pyright_cov.py --verifytypes numpy --ignoreexternal --fail-under 80 --exclude-like '*.tests.*' From 7eacb9efdb67733081eb3308cbe149f24800f3dc Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 27 May 2025 16:38:13 +0100 Subject: [PATCH 0053/1718] Merge pull request #29063 from MarcoGorelli/align-maskedarray-with-ndarray --- numpy/typing/tests/data/fail/ma.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 41306b23fe78..5dc6706ebf81 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -2,10 +2,10 @@ from typing import TypeAlias, TypeVar import numpy as np import numpy.typing as npt -from numpy._typing import _Shape +from numpy._typing import _AnyShape _ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] MAR_b: MaskedArray[np.bool] From ba27d95903ba9d3d201c051ce9c534380b5b3f4d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 27 May 2025 12:16:06 -0600 Subject: [PATCH 0054/1718] BUG: add bounds-checking to in-place string multiply (#29060) * BUG: add bounds-checking to in-place string multiply * MNT: check for overflow and raise OverflowError * MNT: respond to review suggestion * MNT: handle overflow in one more spot * MNT: make test behave the same on all architectures * MNT: reorder to avoid work in some cases --- doc/release/upcoming_changes/29060.change.rst | 3 ++ numpy/_core/src/umath/string_buffer.h | 12 +++++ numpy/_core/src/umath/string_ufuncs.cpp | 48 ++++++++++++++----- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 12 ++--- numpy/_core/strings.py | 2 +- numpy/_core/tests/test_stringdtype.py | 4 +- numpy/_core/tests/test_strings.py | 13 ++++- numpy/typing/tests/data/pass/ma.py | 3 +- 8 files changed, 74 insertions(+), 23 deletions(-) create mode 100644 doc/release/upcoming_changes/29060.change.rst diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst new file mode 100644 index 000000000000..1561da7bf94e --- /dev/null +++ b/doc/release/upcoming_changes/29060.change.rst @@ -0,0 +1,3 @@ +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 554f9ece5197..dafedcbc03ff 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -297,6 +297,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index ffd757815364..95f30ccb109e 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,44 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } if (len1 == 1) { out.buffer_memset(*buf1, reps); out.buffer_fill_with_zeros_after_index(reps); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + size_t width = out.buffer_width(); + if (width < newlen) { + reps = width / len1; + pad = width % len1; } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; + } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +257,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +288,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -752,10 +775,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index adcbfd3b7480..b0181d4186c9 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -137,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1748,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index fc0a2d0b4d1a..b4dc1656024f 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -218,7 +218,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c15c4895eaf..9bab810d4421 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -128,8 +128,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index f6de208d7951..56e928df4d7b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -224,9 +224,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index dc9474fe4069..e7915a583210 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -16,7 +16,8 @@ MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) -MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], "T")) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) From 5a8775642acfa24907dd52684c4be26f1ad3e852 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 12:01:46 +0100 Subject: [PATCH 0055/1718] MAINT: Convert umath_linalg to multi-phase init (PEP 489) --- .../upcoming_changes/29030.compatibility.rst | 6 ++ numpy/linalg/umath_linalg.cpp | 71 ++++++++++++------- numpy/tests/test_lazyloading.py | 36 +++++----- numpy/tests/test_reloading.py | 35 +++++---- 4 files changed, 91 insertions(+), 57 deletions(-) create mode 100644 doc/release/upcoming_changes/29030.compatibility.rst diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst new file mode 100644 index 000000000000..cf08551e28ee --- /dev/null +++ b/doc/release/upcoming_changes/29030.compatibility.rst @@ -0,0 +1,6 @@ +* NumPy's C extension modules have begun to use multi-phase initialisation, + as defined by :pep:`489`. As part of this, a new explicit check has been added + that each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ead6d84a73a2..1b6850145bc8 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4688,57 +4688,54 @@ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } #if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); - return NULL; + return -1; } #endif @@ -4748,10 +4745,30 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index 5f6233f1c5cb..7b0324802611 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,23 +1,23 @@ +import subprocess import sys -from importlib.util import LazyLoader, find_spec, module_from_spec +import textwrap import pytest +from numpy.testing import IS_WASM -# Warning raised by _reload_guard() in numpy/__init__.py -@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_lazy_load(): # gh-22045. lazyload doesn't import submodule names into the namespace - # muck with sys.modules to test the importing system - old_numpy = sys.modules.pop("numpy") - numpy_modules = {} - for mod_name, mod in list(sys.modules.items()): - if mod_name[:6] == "numpy.": - numpy_modules[mod_name] = mod - sys.modules.pop(mod_name) + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec - try: # create lazy load of numpy as np spec = find_spec("numpy") module = module_from_spec(spec) @@ -31,8 +31,12 @@ def test_lazy_load(): # test triggering the import of the package np.ndarray - - finally: - if old_numpy: - sys.modules["numpy"] = old_numpy - sys.modules.update(numpy_modules) + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index c21dc007b232..3e6ded326941 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -48,27 +48,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout From 7ae80cd29f1ff734d7f42038ca89cb27d6a81a62 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:42:26 +0100 Subject: [PATCH 0056/1718] MAINT: Convert dummymodule to multi-phase init (PEP 489) --- numpy/_core/src/dummymodule.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } From 0145d7c379e245bfd259bf1c519441da4bea089b Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Wed, 28 May 2025 14:47:45 +0100 Subject: [PATCH 0057/1718] MAINT: Convert lapack_lite to multi-phase init (PEP 489) --- numpy/linalg/lapack_litemodule.c | 62 ++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index e5f3af05af22..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,28 +377,25 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); @@ -409,10 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; - return m; +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } From 3363b38d99f87601869c2c9d68c2b16e54509675 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 28 May 2025 16:06:24 +0100 Subject: [PATCH 0058/1718] TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` (#29012) --- numpy/__init__.pyi | 14 +- numpy/ma/core.pyi | 202 +++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 255 ++++++++++++++++++++++++++ 3 files changed, 462 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0359e605a1c3..0e8b4625e7d4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2809,6 +2809,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2856,6 +2857,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2903,6 +2905,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2940,6 +2943,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3359,6 +3363,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. + # Keep in sync with `MaskedArray.__iadd__` @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3386,7 +3391,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__isub__` @overload def __isub__( self: NDArray[unsignedinteger], @@ -3404,7 +3409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__imul__` @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3426,6 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__ipow__` @overload def __ipow__( self: NDArray[unsignedinteger], @@ -3441,7 +3447,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # + # Keep in sync with `MaskedArray.__itruediv__` @overload def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3449,7 +3455,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - # keep in sync with `__imod__` + # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload def __ifloordiv__( self: NDArray[unsignedinteger], diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 7e87611037b9..388619e1a654 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,7 +2,7 @@ # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 from collections.abc import Sequence -from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload +from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar @@ -19,17 +19,23 @@ from numpy import ( bool_, bytes_, character, + complex128, complexfloating, datetime64, dtype, dtypes, expand_dims, + float16, + float32, float64, floating, generic, + inexact, int_, + integer, intp, ndarray, + number, object_, signedinteger, str_, @@ -40,14 +46,21 @@ from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, NDArray, + _32Bit, + _64Bit, _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, _ArrayLikeStr_co, _ArrayLikeString_co, _ArrayLikeTD64_co, @@ -247,8 +260,18 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] + _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -463,10 +486,179 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... + + # Keep in sync with `ndarray.__add__` + @overload + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __mul__(self, other): ... def __rmul__(self, other): ... def __truediv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index deda19c3d743..97f833b6a488 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -12,21 +12,35 @@ class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] MAR_dt64: MaskedArray[np.datetime64] MAR_td64: MaskedArray[np.timedelta64] MAR_o: MaskedArray[np.object_] MAR_s: MaskedArray[np.str_] MAR_byte: MaskedArray[np.bytes_] MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclass @@ -368,3 +382,244 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) From f9baafb4e0c529939e94218ec500ccc3ee5d5dd4 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 28 May 2025 22:18:28 +0200 Subject: [PATCH 0059/1718] review comments --- numpy/_core/tests/test_indexing.py | 3 ++- numpy/_core/tests/test_mem_overlap.py | 7 +++++-- numpy/_typing/_dtype_like.py | 2 +- numpy/lib/tests/test_histograms.py | 6 ++++-- ruff.toml | 8 ++++++-- 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 1d42cde48682..757b8d72782f 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1352,7 +1352,8 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together # noqa: E501 + # This used to incorrectly give a ValueError: operands could not be + # broadcast together idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index b437a7e14298..78b943854679 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -165,8 +165,9 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", # noqa: E501 + f"base_a - base_b = {base_delta!r}", f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -402,7 +403,9 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) # noqa: E501 + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index d341db5dc23a..c406b3098384 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -55,7 +55,7 @@ def dtype(self) -> _DTypeT_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # noqa: E501 +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] # Would create a dtype[np.void] diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index be5268d9813a..4ba953f462fc 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -554,7 +554,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" # noqa: E501 + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -565,7 +566,8 @@ def nbins_ratio(seed, size): geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. # noqa: E501 + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/ruff.toml b/ruff.toml index b4190fe41787..deb52e834df9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,6 +68,7 @@ ignore = [ "_tempita.py" = ["B909"] "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] + "benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] @@ -90,15 +91,18 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/_typing/_array_like.py" = ["E501"] "numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] -"numpy*pyi" = ["E501"] "numpy/f2py/*py" = ["E501"] +# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length +"numpy/_typing/_array_like.py" = ["E501"] +"numpy/_typing/_dtype_like.py" = ["E501"] +"numpy*pyi" = ["E501"] + "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] "numpy/_core/defchararray.py" = ["F403", "F405"] From 5ab56f649972b77daf20f7f5aa3e796232c05a00 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 08:41:39 +0100 Subject: [PATCH 0060/1718] Convert pocketfft_umath to multi-phase init (PEP 489) (#29028) --- numpy/fft/_pocketfft_umath.cpp | 64 +++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 525b5e5a23da..ab8af5aa522e 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -388,41 +388,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } From e107f24f32254d063249fc84e34aa707d826b054 Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+AA-Turner@users.noreply.github.com> Date: Thu, 29 May 2025 13:04:38 +0100 Subject: [PATCH 0061/1718] MAINT: Convert multiarray to multi-phase init (PEP 489) (#29022) --- numpy/_core/__init__.py | 4 + .../src/multiarray/_multiarray_tests.c.src | 59 ++++--- numpy/_core/src/multiarray/multiarraymodule.c | 166 +++++++++--------- 3 files changed, 125 insertions(+), 104 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 7b19cefb2f93..b0be8d1cbab6 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -23,6 +23,10 @@ except ImportError as exc: import sys + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + # Basically always, the problem should be that the C module is wrong/missing... if ( isinstance(exc, ModuleNotFoundError) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fc73a64b19a0..8012a32b070e 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2413,41 +2413,56 @@ static PyMethodDef Multiarray_TestsMethods[] = { }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); if (init_argparse_mutex() < 0) { - return NULL; + return -1; } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; - return m; +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..022a54fe17da 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4773,36 +4773,27 @@ initialize_thread_unsafe_state(void) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4818,62 +4809,62 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; } if (initialize_thread_unsafe_state() < 0) { - goto err; + return -1; } if (init_import_mutex() < 0) { - goto err; + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -4881,28 +4872,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -4923,43 +4914,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -4999,39 +4990,39 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; } /* @@ -5040,7 +5031,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyDataMem_DefaultHandler = PyCapsule_New( &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { - goto err; + return -1; } /* @@ -5049,32 +5040,32 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { */ current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); if (current_handler == NULL) { - goto err; + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; } // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); if (npy_static_pydata.ndarray_array_finalize == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); if (npy_static_pydata.ndarray_array_ufunc == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); if (npy_static_pydata.ndarray_array_function == NULL) { - goto err; + return -1; } /* @@ -5090,13 +5081,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - goto err; + return -1; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); @@ -5104,13 +5095,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { - goto err; + return -1; } ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); if (verify_static_structs_initialized() < 0) { - goto err; + return -1; } /* @@ -5120,33 +5111,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; - return m; +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } From b37f7616879b8ce977deca81069c886a4096511d Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 29 May 2025 17:33:36 +0300 Subject: [PATCH 0062/1718] BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 (#29039) * BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel build] * Update requirements/ci_requirements.txt Co-authored-by: Sebastian Berg * use pip to install anaconda-client on win-arm64 [wheel build] * allow noblas in win32 wheels, use scipy-openblas32 on win-arm64 [wheel build] * improve runner arch detection logic [wheel build] * remove win_arm64 cibuildwheel override * remove 'strip' before calling delvewheel [wheel build] * use openblas 0.3.29.265 only on win-arm64 [wheel build] * add comment about lack of win-arm64 openblas64 wheels [wheel build] --------- Co-authored-by: Sebastian Berg Co-authored-by: Joe Rickerby --- .github/workflows/wheels.yml | 13 ++++++++++++- pyproject.toml | 9 ++------- requirements/ci32_requirements.txt | 3 ++- requirements/ci_requirements.txt | 6 ++++-- tools/wheels/cibw_before_build.sh | 26 ++++++++++++++++++-------- tools/wheels/repair_windows.sh | 23 ----------------------- 6 files changed, 38 insertions(+), 42 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fa2c1cb5ae97..097efe8e7225 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -186,7 +186,8 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + - name: install micromamba + uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to @@ -200,6 +201,16 @@ jobs: create-args: >- anaconda-client + - name: win-arm64 install anaconda client + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + run: | + # Rust installation needed for rpds-py. + Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe + .\rustup-init.exe -y + $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" + pip install anaconda-client + + - name: Upload wheels if: success() && github.repository == 'numpy/numpy' shell: bash -el {0} diff --git a/pyproject.toml b/pyproject.toml index 5cf75b20a6b6..1e08544ced75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -178,21 +178,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" +# This does not work, use CIBW_ENVIRONMENT_WINDOWS +environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*-win_arm64" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" -repair-wheel-command = "" - [[tool.cibuildwheel.overrides]] select = "*pyodide*" before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5a7be719214a..74c9a51ec111 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,4 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index adf7d86558f0..b6ea06c812c8 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,6 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 -scipy-openblas64==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 +scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 3e1d4498fe7c..e41e5d37316b 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,9 +22,6 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false -elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then - echo "No BLAS used for ARM64 wheels" - export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true @@ -32,20 +29,33 @@ fi # Install Openblas from scipy-openblas64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - echo PKG_CONFIG_PATH $PKG_CONFIG_PATH + # by default, use scipy-openblas64 + OPENBLAS=openblas64 + # Possible values for RUNNER_ARCH in github are + # X86, X64, ARM, or ARM64 + # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() + # when wheel build is run outside github? + # On 32-bit platforms, use scipy_openblas32 + # On win-arm64 use scipy_openblas32 + if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then + OPENBLAS=openblas32 + elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then + OPENBLAS=openblas32 + fi + echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} PKG_CONFIG_PATH=$PROJECT_DIR/.openblas rm -rf $PKG_CONFIG_PATH mkdir -p $PKG_CONFIG_PATH python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc + python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will # pull these into the wheel. Use python to avoid windows/posix problems python < Date: Thu, 29 May 2025 01:10:35 -0400 Subject: [PATCH 0063/1718] CI: bump to cibuildwheel 3.0.0b4 [wheel build] This bumps to cibuildwheel 3.0.0b4, which contains CPython 3.14.0b2, and removes the directory changing workaround. Signed-off-by: Henry Schreiner --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- tools/wheels/cibw_test_command.sh | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index fea77068e128..86628f6882cd 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 097efe8e7225..3736f28cbd8c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@90a0ddeff0f23eebc21630e65d66d0f4955e9b94 # v3.0.0b1 + uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 2d39687a861b..60e90ef5beb6 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -4,10 +4,6 @@ set -xe PROJECT_DIR="$1" -if [ -d tools ]; then - cd tools -fi - python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" From bcf8a99e8ca8c068f9b51a7a086000b53632c3cd Mon Sep 17 00:00:00 2001 From: Henry Schreiner Date: Wed, 28 May 2025 13:02:47 -0400 Subject: [PATCH 0064/1718] CI: clean up cibuildwheel config a bit [wheel build] This simplifies the configuration a bit: * Combine pyodide blocks * Use tables/lists for config-settings and skip * Remove a few repeated lines * Use a list for select Signed-off-by: Henry Schreiner --- pyproject.toml | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1e08544ced75..b0e58705ebd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,14 +142,17 @@ tracker = "https://github.com/numpy/numpy/issues" # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. build-frontend = "build" -skip = "*_i686 *_ppc64le *_s390x *_universal2" +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" + [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" manylinux-aarch64-image = "manylinux_2_28" @@ -157,7 +160,14 @@ musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" [tool.cibuildwheel.pyodide] -config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] [tool.cibuildwheel.linux.environment] # RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too @@ -178,22 +188,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" # This does not work, use CIBW_ENVIRONMENT_WINDOWS environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*pyodide*" -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' From 3318fbd3a137a44d8d54ab304a16fd359d80887b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 29 May 2025 09:31:10 -0600 Subject: [PATCH 0065/1718] MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in f2py --- numpy/f2py/src/fortranobject.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 4e2aa370b643..5c2b4bdf0931 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -363,6 +363,8 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); if (v == NULL && PyErr_Occurred()) { return NULL; @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) From 336d661e958065f8c95a559a24e143b5fc747c1d Mon Sep 17 00:00:00 2001 From: crusaderky Date: Thu, 29 May 2025 20:10:18 +0100 Subject: [PATCH 0066/1718] Ignore all build-* directories --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index df7f084e3645..c4de68c1a9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,7 @@ GTAGS ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory From d27f6618923f6b65841d7eaebcd21062ecb5a8aa Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:18:11 +0200 Subject: [PATCH 0067/1718] TYP: fix `NDArray[integer]` inplace operator mypy issue --- numpy/__init__.pyi | 94 +++------------------ numpy/ma/core.pyi | 57 +++---------- numpy/typing/tests/data/fail/arithmetic.pyi | 2 - 3 files changed, 23 insertions(+), 130 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0e8b4625e7d4..df72ce3d877a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3367,13 +3367,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3393,13 +3387,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__isub__` @overload - def __isub__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3414,15 +3402,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: ndarray[Any, dtype[signedinteger | character] | dtypes.StringDType], - other: _ArrayLikeInt_co, - /, + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3433,13 +3413,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__ipow__` @overload - def __ipow__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3457,13 +3431,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload - def __ifloordiv__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3471,13 +3439,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__ifloordiv__` @overload - def __imod__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3491,25 +3453,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `__irshift__` @overload - def __ilshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # keep in sync with `__ilshift__` @overload - def __irshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3517,13 +3467,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iand__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3531,13 +3475,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ixor__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3545,13 +3483,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ior__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -3559,9 +3491,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 388619e1a654..da4ad3b333db 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -668,20 +668,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... - # Keep in sync with `ndarray.__iadd__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__iadd__` @overload def __iadd__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -707,16 +700,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__isub__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __isub__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__isub__` @overload - def __isub__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / @@ -734,20 +720,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__imul__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__imul__` @overload def __imul__( self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: MaskedArray[Any, dtype[signedinteger] | dtype[character] | dtypes.StringDType], - other: _ArrayLikeInt_co, / + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( @@ -762,16 +742,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ifloordiv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. - @overload - def __ifloordiv__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `ndarray.__ifloordiv__` @overload - def __ifloordiv__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ifloordiv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -781,8 +754,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__itruediv__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__itruediv__` @overload def __itruediv__( self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / @@ -798,16 +770,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[object_], other: Any, / ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # Keep in sync with `ndarray.__ipow__`, except that `_MaskedArray[unsignedinteger]` does not accept - # _IntLike_co for `other`. + # Keep in sync with `ndarray.__ipow__` @overload - def __ipow__( - self: _MaskedArray[unsignedinteger], other: _ArrayLikeUInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[signedinteger], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... @overload def __ipow__( self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e94861a3eba7..e696083b8614 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -85,7 +85,6 @@ AR_b *= AR_LIKE_f # type: ignore[arg-type] AR_b *= AR_LIKE_c # type: ignore[arg-type] AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # type: ignore[arg-type] AR_u *= AR_LIKE_f # type: ignore[arg-type] AR_u *= AR_LIKE_c # type: ignore[arg-type] AR_u *= AR_LIKE_m # type: ignore[arg-type] @@ -105,7 +104,6 @@ AR_b **= AR_LIKE_i # type: ignore[misc] AR_b **= AR_LIKE_f # type: ignore[misc] AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # type: ignore[arg-type] AR_u **= AR_LIKE_f # type: ignore[arg-type] AR_u **= AR_LIKE_c # type: ignore[arg-type] From 998f561a1af90083099e4410412a842c49b4f993 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 18:32:14 +0200 Subject: [PATCH 0068/1718] TYP: regression tests for `NDArray[integer]` inplace ops --- numpy/typing/tests/data/pass/arithmetic.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index b50d28e5fca5..3b2901cf2b51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast import numpy as np import numpy.typing as npt import pytest @@ -61,6 +61,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -282,6 +283,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -314,6 +319,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i From 8c35aaad673e64c8806ec13141124667eb450469 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 May 2025 17:14:40 +0000 Subject: [PATCH 0069/1718] MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.1 to 2.4.2. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/f49aabe0b5af0936a0987cfb85d86b75731b0186...05b42c624433fc40578a4040d5cf5e36ddca8cde) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-version: 2.4.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 11a5be5f488a..f4e06677f804 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif From 32cbf09a31fc859f98b8ed7178f5ea8d95310b39 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 22:13:07 +0200 Subject: [PATCH 0070/1718] MAINT: bump `mypy` to `1.16.0` --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 91585a8dcb13..d2964bf78368 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.15.0 + - mypy=1.16.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..4fb1d47bf50d 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -13,7 +13,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.15.0; platform_python_implementation != "PyPy" +mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 87e769a196336ca1427eb2215925bf987154b316 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:22:31 +0200 Subject: [PATCH 0071/1718] TYP: run mypy in strict mode --- numpy/typing/tests/data/mypy.ini | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index bca203260efa..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,9 +1,8 @@ [mypy] +strict = True enable_error_code = deprecated, ignore-without-code, truthy-bool -strict_bytes = True -warn_unused_ignores = True -implicit_reexport = False disallow_any_unimported = True -disallow_any_generics = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True pretty = True From 51f4ac8f6188a4cd7dbb81ee4fff0cf7aef34e3e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:23:22 +0200 Subject: [PATCH 0072/1718] TYP: disable mypy's `no-untyped-call` errors in the `MaskedArray` type-tests --- numpy/typing/tests/data/pass/ma.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index e7915a583210..b9be2b2e4384 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -7,6 +7,8 @@ _ScalarT = TypeVar("_ScalarT", bound=np.generic) MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +# mypy: disable-error-code=no-untyped-call + MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) From 57296fbbcba96beaf923e5b7c45bfef547d1484a Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 30 May 2025 23:24:07 +0200 Subject: [PATCH 0073/1718] TYP: remove problematic runtime code from a `.pyi` test module --- numpy/typing/tests/data/reveal/nbit_base_example.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 33229660b6f8..66470b95bf15 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -7,8 +7,7 @@ from numpy._typing import _32Bit, _64Bit T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... i8: np.int64 i4: np.int32 From f925c0cefc05e4619bd7554eb6c301e7e5934685 Mon Sep 17 00:00:00 2001 From: Guido Imperiale Date: Sat, 31 May 2025 09:26:07 +0100 Subject: [PATCH 0074/1718] BUG: f2py: thread-safe forcomb (#29091) --- numpy/f2py/cfuncs.py | 39 ++++++++++++++++++++++----------------- numpy/f2py/rules.py | 5 +++-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 6c48c1ef0175..b2b1cad3d867 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -598,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -634,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index c10d2afdd097..667ef287f92b 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1184,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; From ecb2f40970fb28b997a3e781dd2733500ebf9a6f Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Sat, 31 May 2025 04:32:42 -0400 Subject: [PATCH 0075/1718] PERF: Use dict instead of list to make NpzFile member existence checks constant time (#29098) Use dict instead of list to convert the passed key to the name used in the archive. --- numpy/lib/_npyio_impl.py | 64 ++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e588b8454b44..f284eeb74834 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -195,16 +195,13 @@ def __init__(self, fid, own_fid=False, allow_pickle=False, # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -240,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read(key) def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` From 358a13c861d728e11f1efc18e6c04b915c682ccf Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Sat, 31 May 2025 01:48:27 -0700 Subject: [PATCH 0076/1718] BENCH: Increase array sizes for ufunc and sort benchmarks (#29084) --- benchmarks/benchmarks/bench_function_base.py | 2 +- benchmarks/benchmarks/bench_ufunc_strides.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 9b770aeb60bf..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -236,7 +236,7 @@ class Sort(Benchmark): param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 95df16e2cb5e..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -10,7 +10,7 @@ class _AbstractBinary(Benchmark): params = [] param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -63,7 +63,7 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False From 79ee0b208345696f77697c0fd6671e2763dfe51b Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sun, 1 Jun 2025 01:08:58 +0000 Subject: [PATCH 0077/1718] Fix some incorrect reST markups --- doc/changelog/1.21.5-changelog.rst | 2 +- doc/neps/nep-0021-advanced-indexing.rst | 6 +++--- doc/neps/nep-0030-duck-array-protocol.rst | 4 ++-- doc/source/dev/internals.code-explanations.rst | 2 +- doc/source/reference/c-api/types-and-structures.rst | 2 +- doc/source/reference/maskedarray.generic.rst | 4 ++-- doc/source/reference/simd/build-options.rst | 2 +- doc/source/release/1.11.0-notes.rst | 2 +- doc/source/release/1.13.0-notes.rst | 2 +- doc/source/release/1.14.0-notes.rst | 4 ++-- doc/source/release/1.15.0-notes.rst | 6 +++--- doc/source/release/1.18.0-notes.rst | 4 ++-- doc/source/release/1.21.5-notes.rst | 2 +- doc/source/user/c-info.ufunc-tutorial.rst | 12 ++++++------ doc/source/user/how-to-io.rst | 2 +- numpy/lib/_polynomial_impl.py | 4 ++-- numpy/lib/_version.py | 3 +-- 17 files changed, 31 insertions(+), 32 deletions(-) diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index badd41875af2..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 7fb8c9734900..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 1790c4f4d04d..3f16b5f4dbc4 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1618,7 +1618,7 @@ NumPy C-API and C complex When you use the NumPy C-API, you will have access to complex real declarations ``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C standard types from ``complex.h``. Unfortunately, ``complex.h`` contains -`#define I ...`` (where the actual definition depends on the compiler), which +``#define I ...`` (where the actual definition depends on the compiler), which means that any downstream user that does ``#include `` could get ``I`` defined, and using something like declaring ``double I;`` in their code will result in an obscure compiler error like diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 9c44ebcbc589..3fbe25d5b03c 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -66,7 +66,7 @@ attributes and methods are described in more details in the .. try_examples:: -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma @@ -521,7 +521,7 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: .. try_examples:: diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 524a1532ca57..2b7039136e75 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -234,7 +234,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..f6fe84a4b17f 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..a90dbb7a67d9 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..76e8af63462f 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -157,7 +157,7 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` +To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` in the same folder. Then ``python setup.py build`` will build the module to import, or ``python setup.py install`` will install the module to your site-packages directory. @@ -240,8 +240,8 @@ and then the ``setup.py`` file used to create the module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,7 +339,7 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module +This is a ``setup.py`` file for the above code. As before, the module can be build via calling ``python setup.py build`` at the command prompt, or installed to site-packages via ``python setup.py install``. The module can also be placed into a local folder e.g. ``npufunc_directory`` below @@ -408,8 +408,8 @@ sections we first give the ``.c`` file and then the corresponding ``setup.py`` file. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index a90fbecfdec4..81055d42b9ac 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -206,7 +206,7 @@ Human-readable :func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or -2-dimensional, and there's no ` savetxtz` for multiple files. +2-dimensional, and there's no ``savetxtz`` for multiple files. Large arrays ------------ diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index a58ca76ec2b0..de4c01ecb95c 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -520,9 +520,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index f7a353868fd2..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -22,8 +22,7 @@ class NumpyVersion: - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other From 5e3aa37b1af5207a7aee113cfc5153ace3c67238 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 2 Jun 2025 08:07:35 +0200 Subject: [PATCH 0078/1718] MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs See https://github.com/scipy/scipy/issues/23061 for details. [skip ci] --- tools/wheels/LICENSE_linux.txt | 4 ++-- tools/wheels/LICENSE_osx.txt | 4 ++-- tools/wheels/LICENSE_win32.txt | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 9e2d9053b8a7..db488c6cff47 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs/libgfortran*.so Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 7ef2e381874e..5cea18441b35 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index c8277e7710a2..aed96845583b 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify From 3b465b051616c2ffa299fb0313a9ef609862cf38 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 3 Jun 2025 14:04:23 +0300 Subject: [PATCH 0079/1718] MAINT: cleanup from finalized concatenate deprecation (#29115) --- numpy/_core/src/multiarray/multiarraymodule.c | 27 +++++-------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 022a54fe17da..7724756ba351 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -530,8 +530,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -647,12 +646,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -698,7 +696,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -743,7 +741,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -2489,7 +2487,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2499,22 +2496,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2526,7 +2511,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } From d52bccb9b1ccc80b5dbd06f1b4bfeaeba901affd Mon Sep 17 00:00:00 2001 From: Danis <96629796+DanisNone@users.noreply.github.com> Date: Tue, 3 Jun 2025 19:11:17 +0500 Subject: [PATCH 0080/1718] TYP: minor ufunc alias fixes in ``__init__.pyi`` (#29120) --- numpy/__init__.pyi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index df72ce3d877a..41d7411dfdd8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4784,19 +4784,17 @@ arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] @@ -4839,7 +4837,6 @@ matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"] matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] @@ -4863,7 +4860,6 @@ square: _UFunc_Nin1_Nout1[L['square'], L[18], None] subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] @@ -4878,10 +4874,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power +true_divide = divide class errstate: def __init__( From 2306bced4d3234e7cc8be8631b2b72773eabf7f3 Mon Sep 17 00:00:00 2001 From: abhishek-fujitsu Date: Wed, 14 May 2025 15:17:51 +0530 Subject: [PATCH 0081/1718] update windows-2019 to windows-2022 and meson flag[wheel build] Co-authored-by: Charles Harris --- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 4 ++-- .github/workflows/windows_arm64.yml | 2 +- azure-pipelines.yml | 2 +- meson_cpu/x86/meson.build | 2 ++ numpy/_core/tests/test_umath.py | 7 +++++++ 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3736f28cbd8c..7e034779fd0b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -89,13 +89,13 @@ jobs: # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] + - [windows-2022, win_amd64, ""] + - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] + - buildplat: [windows-2022, win32, ""] python: "pp311" # Don't build PyPy arm64 windows - buildplat: [windows-11-arm, win_arm64, ""] diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6c02563150da..e760e37780a7 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ permissions: jobs: python64bit_openblas: name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' strategy: @@ -92,7 +92,7 @@ jobs: fail-fast: false matrix: include: - - os: windows-2019 + - os: windows-2022 architecture: x86 - os: windows-11-arm architecture: arm64 diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 0a691bff9b21..71fa9dd88d3b 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -15,7 +15,7 @@ permissions: jobs: windows_arm: - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 36362f6cacc7..af6e5cf52ac4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -75,7 +75,7 @@ stages: - job: Windows timeoutInMinutes: 120 pool: - vmImage: 'windows-2019' + vmImage: 'windows-2022' strategy: maxParallel: 3 matrix: diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8c7a0fb59a57..1276e922ff2a 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -212,6 +212,8 @@ if compiler_id == 'msvc' endif endforeach FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 + FMA3.update(args: {'val': '/fp:contract'}) AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) endif diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 4b698ce82bc6..001a7bffbcc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1879,8 +1879,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) From 9328c192239e8f288b54ecae98210440f11a7b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Jun 2025 17:13:25 +0000 Subject: [PATCH 0082/1718] MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.18 to 3.28.19. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ff0a06e83cb2de871e5a09832bc6a81e7276941f...fca7ace96b7d713c7035871441bd52efbe39e27e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.19 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fb0dd766a1d8..e0318652d2af 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f4e06677f804..9e21251f87c8 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v2.1.27 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 with: sarif_file: results.sarif From 69948f351fcdf7a873fdb360eeaa4d84673ea90f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 3 Jun 2025 19:28:34 +0200 Subject: [PATCH 0083/1718] DOC: remove very outdated info on ATLAS (#29119) * DOC: remove very outdated info on ATLAS ATLAS hasn't been developed for years, there is no reason to ever use it instead of OpenBLAS, BLIS, or MKL. So remove mentions of it. The troubleshooting instructions haven't been relevant in quite a while either. Addresses a comment on gh-29108 [skip cirrus] [skip github] [skip azp] * accept review suggestion [skip ci] Co-authored-by: Matti Picus --------- Co-authored-by: Matti Picus --- INSTALL.rst | 12 ++-------- .../user/troubleshooting-importerror.rst | 22 ------------------- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 017e4de8c9d4..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 6be8831d9c2a..da456dd17e36 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -83,28 +83,6 @@ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- From 8944b2b800f5bfcf48a6b0134a2588f3a5a54d66 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Tue, 3 Jun 2025 23:21:09 +0200 Subject: [PATCH 0084/1718] DOC: fix typo in Numpy's module structure --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..83e697ac04fe 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either has special-purpose or niche namespaces. Main namespaces From 6822a7b10f21c895ca0037a9b9060bb944fb7f96 Mon Sep 17 00:00:00 2001 From: Marc Redemske Date: Wed, 4 Jun 2025 07:16:33 +0200 Subject: [PATCH 0085/1718] update according to review --- doc/source/reference/module_structure.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 83e697ac04fe..98e3dda54e7b 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either has +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces From 6ab549012852157727097c52328bad56cfd85d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 18:00:32 +0000 Subject: [PATCH 0086/1718] MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.1.1 to 3.2.0. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/505e6394dae86d6a5c7fbb6e3fb8938e3e863830...835234971496cad1653abb28a638a281cf32541f) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-version: 3.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 1388a756d216..418dc7d52fc1 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -55,7 +55,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 7e034779fd0b..df0c779241f3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -280,7 +280,7 @@ jobs: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: # for installation of anaconda-client, required for upload to # anaconda.org From a88d0149ae91b301696f507f350499207d11f442 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 5 Jun 2025 11:00:59 +0200 Subject: [PATCH 0087/1718] MAINT: Bump ``scipy-doctest`` to 1.8 (#29085) * MAINT: check-docs: require scipy-doctest >= 1.8.0 See for the discussion https://discuss.scientific-python.org/t/scipy-doctest-select-only-doctests-or-both-doctests-and-unit-tests/1950 [docs only] * CI: bump scipy-doctest version on CI [skip azp] [skip actions] [skip cirrus] * MAINT: tweak doctests to not fail --- .github/workflows/linux.yml | 2 +- .spin/cmds.py | 3 +++ numpy/_core/_add_newdocs.py | 2 +- numpy/lib/introspect.py | 2 +- requirements/doc_requirements.txt | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 742ca5c34144..668c1191d055 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest==1.6.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas # spin check-docs -v # spin check-tutorials -v diff --git a/.spin/cmds.py b/.spin/cmds.py index e5ae29d4a6a2..66885de630e0 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -196,6 +196,8 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): import scipy_doctest # noqa: F401 except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") if (not pytest_args): pytest_args = ('--pyargs', 'numpy') @@ -203,6 +205,7 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): # turn doctesting on: doctest_args = ( '--doctest-modules', + '--doctest-only-doctests=true', '--doctest-collect=api' ) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 8f5de4b7bd89..597d5c6deaf3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -5944,7 +5944,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + {'name': (dtype('>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 330f0f7ac8b9..23a0e6deb60f 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -19,7 +19,7 @@ toml # for doctests, also needs pytz which is in test_requirements -scipy-doctest==1.6.0 +scipy-doctest>=1.8.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility From 7fbc71c354c21c2264d79e3657829eb2608f4dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Jun 2025 17:05:30 +0000 Subject: [PATCH 0088/1718] MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 2.0.4 to 2.0.5. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/0dea6379afdaffa5d528b3d1dabc45da37f443fc...b09ef9b599704322748535812ca03efb2625677b) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-version: 2.0.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- .github/workflows/windows_arm64.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index df0c779241f3..f74be5f4a455 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -187,7 +187,7 @@ jobs: path: ./wheelhouse/*.whl - name: install micromamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 71fa9dd88d3b..3eaf02eb062c 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -174,7 +174,7 @@ jobs: path: ./*.whl - name: Setup Mamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b with: # for installation of anaconda-client, required for upload to # anaconda.org From e81abf7f0ab8c0eba1541e35d048469731c4c6a2 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Jun 2025 10:01:04 -0600 Subject: [PATCH 0089/1718] MAINT: Update main after 2.3.0 release. - Forwardport 2.3.0-changelog.rst - Forwardport 2.3.0-notes.rst - Forwardport .mailmap [skip azp] [skip cirrus] [skip actions] --- .mailmap | 39 +- doc/changelog/2.3.0-changelog.rst | 704 +++++++++++++++++++++++++++++ doc/source/release/2.3.0-notes.rst | 517 ++++++++++++++++++++- 3 files changed, 1254 insertions(+), 6 deletions(-) create mode 100644 doc/changelog/2.3.0-changelog.rst diff --git a/.mailmap b/.mailmap index f33dfddb6492..e3e3bb56ecdf 100644 --- a/.mailmap +++ b/.mailmap @@ -11,6 +11,7 @@ !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> !Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -21,6 +22,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!amotzop !bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> @@ -34,6 +36,7 @@ !hutauf !jbCodeHub !juztamau5 +!karl3wm !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -51,6 +54,7 @@ !pmvz !pojaghi <36278217+pojaghi@users.noreply.github.com> !pratiklp00 +!samir539 !sfolje0 !spacescientist !stefan6419846 @@ -59,12 +63,15 @@ !tautaus !undermyumbrella1 !vahidmech +!wenlong2 !xoviat <49173759+xoviat@users.noreply.github.com> !xoviat <49173759+xoviat@users.noreply.github.com> !yan-wyb !yetanothercheer Aaron Baecker Adrin Jalali +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Abraham Medina Arun Kota Arun Kota Arun Kota @@ -140,6 +147,8 @@ Anton Prosekin Anže Starič Arfy Slowy Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota @@ -190,6 +199,8 @@ Carl Kleffner Carl Leake Carlos Henrique Hermanny Moreira da Silva Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -200,6 +211,8 @@ Chris Burns Chris Fu (傅立业) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris Christian Clauss Christopher Dahlin @@ -270,6 +283,7 @@ Eric Fode Eric Fode Eric Quintero Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -300,8 +314,11 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Gyeongjae Choi Habiba Hye Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -311,6 +328,10 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong @@ -363,6 +384,7 @@ Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Jiuding Tan (谭九鼎) <109224573@qq.com> Johann Faouzi Johann Rohwer Johann Rohwer jmrohwer @@ -447,10 +469,13 @@ Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -508,6 +533,7 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (渡邉 美希) Miles Cranmer @@ -516,9 +542,12 @@ Milica Dančuk love-bees <33499899+love-bees@users.noreply.g Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -571,6 +600,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -591,6 +621,8 @@ Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -660,6 +692,7 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P Sylvain Ferriol Takanori Hirano @@ -696,6 +729,8 @@ Vinith Kishore <85550536+vinith2@users.noreply.github Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨旺) +Wang Yang (杨旺) <1113177880@qq.com> Wansoo Kim Warrick Ball Warrick Ball @@ -711,11 +746,11 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau -Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -723,6 +758,8 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* Clément Robert +* Colin Gilgenbach + +* Craig Peters + +* Cédric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九鼎) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* Théotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨旺) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index 74f11a0b4537..faad9ffcc8eb 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -4,16 +4,523 @@ NumPy 2.3.0 Release Notes ========================== +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +(`gh-28713 `__) -.. **Content from release note snippets in doc/release/upcoming_changes:** +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. -.. include:: notes-towncrier.rst +(`gh-28741 `__) From 3467b99ad3ff2d04e272acb2eb6cc62c78dcc7f7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 7 Jun 2025 23:54:21 +0200 Subject: [PATCH 0090/1718] TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` --- numpy/_pyinstaller/hook-numpy.pyi | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi index 2642996dad7e..6da4914d7e5a 100644 --- a/numpy/_pyinstaller/hook-numpy.pyi +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... From f8204f5e0f338ffad588e56704e596e6678dc4f4 Mon Sep 17 00:00:00 2001 From: Tim Hoffmann <2836374+timhoffm@users.noreply.github.com> Date: Sun, 8 Jun 2025 00:01:43 +0200 Subject: [PATCH 0091/1718] DOC: Document assertion comparison behavior between scalar and empty array Noted in #27457. The minimal thing we should do is document this behavior. While at it, I slightly homogenized the notes. --- numpy/testing/_private/utils.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..9bc60d28b160 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -293,9 +293,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): Notes ----- - By default, when one of `actual` and `desired` is a scalar and the other is - an array, the function checks that each element of the array is equal to - the scalar. This behaviour can be disabled by setting ``strict==True``. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -980,9 +981,10 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Notes ----- - When one of `actual` and `desired` is a scalar and the other is array_like, - the function checks that each element of the array_like object is equal to - the scalar. This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- @@ -1651,10 +1653,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Notes ----- - When one of `actual` and `desired` is a scalar and the other is - array_like, the function performs the comparison as if the scalar were - broadcasted to the shape of the array. - This behaviour can be disabled with the `strict` parameter. + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. Examples -------- From 7bf85704f2e52589ce18b5124e29778227a36feb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 02:40:31 +0200 Subject: [PATCH 0092/1718] TYP: add missing ``numpy.lib`` exports --- numpy/lib/__init__.pyi | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 8532ef8d9fb9..6185a494d035 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,15 +1,30 @@ from numpy._core.function_base import add_newdoc from numpy._core.multiarray import add_docstring, tracemalloc_domain -from . import ( # noqa: F401 - array_utils, - format, - introspect, - mixins, - npyio, - scimath, - stride_tricks, -) +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion @@ -18,6 +33,7 @@ __all__ = [ "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", From 128ab7f72ae83735dca2f27d2682503915e9d578 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:35:16 +0200 Subject: [PATCH 0093/1718] TYP: add ``containsderivedtypes`` to ``f2py.auxfuncs.__all__`` --- numpy/f2py/auxfuncs.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 1212f229c660..f2ff09faf33b 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -10,6 +10,7 @@ from .cfuncs import errmess __all__ = [ "applyrules", "containscommon", + "containsderivedtypes", "debugcapi", "dictappend", "errmess", @@ -200,7 +201,7 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # -def containsderivedtypes(rout: _ROut) -> _Bool: ... +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... From cd538067a4c6747aeddd68e5b0df9543da49a21c Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 03:37:24 +0200 Subject: [PATCH 0094/1718] TYP: remove ``run_command`` annotations in ``f2py.diagnose`` --- numpy/f2py/diagnose.pyi | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi index 29cc2b4988b3..b88194ac6bff 100644 --- a/numpy/f2py/diagnose.pyi +++ b/numpy/f2py/diagnose.pyi @@ -1,4 +1 @@ -from _typeshed import StrOrBytesPath - -def run_command(cmd: StrOrBytesPath) -> None: ... def run() -> None: ... From 61512bcef563f799dafff6266631985764386ffa Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Jun 2025 04:10:59 +0200 Subject: [PATCH 0095/1718] BUG: Missing array-api ``capabilities()`` key --- numpy/_array_api_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 6ea9e13587f4..067e38798718 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): From 21c2e707dad89fb2498040c4badf231882f0404d Mon Sep 17 00:00:00 2001 From: V-R-S Date: Sun, 8 Jun 2025 17:02:03 +0000 Subject: [PATCH 0096/1718] TST: migrating from pytz to zoneinfo + tzdata (where needed) For migration from pytz to zoneinfo function get_tzoffset_from_pytzinfo from numpy/_core/src/multiarray/datetime.c is modified to use astimezone instead of fromutc. As the object ZoneInfo is not directly compatible to be used with datetime object. Hence, something like this would result in an exception. from datetime import datetime from zoneinfo import ZoneInfo a = datetime(2025, 6, 7, 10, 0, 0) zoneInfo = ZoneInfo("US/Central") b = zoneInfo.fromutc(a) ValueError: fromutc: dt.tzinfo is not self The function astimezone can be used with both pytz.timezone object and zoneinfo.ZoneInfo object But, if we want to use the datetime object consistently we cannot let it be a naive type i.e. without a timezone. As the default behaviour of astimezone would take the system timezone if the datetime object is not timezone aware. Hence, I had to also change the call to create datetime object to take UTC timezone. See #29064 --- environment.yml | 2 +- numpy/_core/src/multiarray/_datetime.h | 4 ++-- numpy/_core/src/multiarray/datetime.c | 10 +++++----- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 3 ++- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/environment.yml b/environment.yml index d2964bf78368..5f1ee5e81a5f 100644 --- a/environment.yml +++ b/environment.yml @@ -49,4 +49,4 @@ dependencies: - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index dd25e1ffd6cc..112c57433094 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9c024dbcd91c..d820474532ca 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2245,8 +2245,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2255,14 +2255,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..019a69da687a 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 pytest==7.4.0 -pytz==2023.3.post1 +tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4fb1d47bf50d..17260753db4a 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -4,7 +4,6 @@ setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 -pytz==2023.3.post1 pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" @@ -17,3 +16,5 @@ mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer +tzdata + From e48fdef5d7bf8ba2f90972c531cdfa07481ae535 Mon Sep 17 00:00:00 2001 From: David Seifert Date: Sun, 8 Jun 2025 19:24:04 +0200 Subject: [PATCH 0097/1718] BUG: remove `NPY_ALIGNMENT_REQUIRED` * This machinery requires strict-aliasing UB and isn't needed anymore with any GCC from the last 15 years. This might also fix #25004. Fixes: #28991 --- numpy/_core/include/numpy/npy_cpu.h | 12 ------------ numpy/_core/src/multiarray/common.h | 15 ++++----------- numpy/_core/src/multiarray/compiled_base.c | 14 +++++--------- numpy/_core/src/multiarray/item_selection.c | 7 +++++-- .../src/multiarray/lowlevel_strided_loops.c.src | 6 +----- 5 files changed, 15 insertions(+), 39 deletions(-) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 72f7331a0267..52e9d5996bd1 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -120,16 +120,4 @@ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index e356b8251931..a18f74bda71a 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -11,6 +11,7 @@ #include "npy_static_data.h" #include "npy_import.h" #include +#include #ifdef __cplusplus extern "C" { @@ -230,15 +231,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -259,11 +251,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 86b60cf75944..fee0d4a61a78 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1620,19 +1620,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index d2db10633810..5c036b704774 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -2525,11 +2526,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 01ffd225274f..0c4eb3dd9a8d 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -33,11 +33,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) From f0e082de06ee5ce1ad120a91a5d3053b1a6032d9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 8 Jun 2025 19:24:05 +0200 Subject: [PATCH 0098/1718] DOC: Document the removal of the NPY_ALIGNMENT_REQUIRED macro. --- doc/release/upcoming_changes/29094.compatibility.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/29094.compatibility.rst diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst new file mode 100644 index 000000000000..961ee6504dae --- /dev/null +++ b/doc/release/upcoming_changes/29094.compatibility.rst @@ -0,0 +1,7 @@ +The Macro NPY_ALIGNMENT_REQUIRED has been removed +------------------------------------------------- +The macro was defined in the `npy_cpu.h` file, so might be regarded as +semipublic. As it turns out, with modern compilers and hardware it is almost +always the case that alignment is required, so numpy no longer uses the macro. +It is unlikely anyone uses it, but you might want to compile with the `-Wundef` +flag or equivalent to be sure. From da3c2b43453786194f04b7c1253794f19e54256e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:02:26 -0300 Subject: [PATCH 0099/1718] DOC: Remove version switcher colors [skip actions][skip azp][skip cirrus] --- doc/source/_static/numpy.css | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9df2f6c546c5..d086215dd638 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -22,18 +22,6 @@ body { /* Version switcher colors from PyData Sphinx Theme */ -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} - .version-switcher__menu a.list-group-item { font-size: small; } From a8d35009a6a3294423448b63233870f128b7002a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Mon, 9 Jun 2025 12:14:18 -0300 Subject: [PATCH 0100/1718] MAINT: Update comment for clarity --- doc/source/_static/numpy.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index d086215dd638..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -20,7 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; From aed7a608f579511e60bd036119990e4035cbe916 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 9 Jun 2025 17:14:19 +0200 Subject: [PATCH 0101/1718] CI: Run mypy with Python 3.13 --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..81fa57239b9b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,9 +46,9 @@ jobs: fail-fast: false matrix: os_python: + - [macos-latest, '3.13'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] - - [macos-latest, '3.11'] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: From 7d950ec5525fbdaf8b9752364bf52de3cd127436 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 9 Jun 2025 22:18:29 +0300 Subject: [PATCH 0102/1718] DOC: tweak release walkthrough for numpy.org news blurb [skip actions][skip azp][skip cirrus] --- doc/RELEASE_WALKTHROUGH.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 702803172477..6d2194b5c4e6 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -344,6 +344,8 @@ This assumes that you have forked ``_:: to the previous links for example. - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. +- Edit the newsHeader and date fields at the top of news.md +- Also edit the butttonText on line 14 in content/en/config.yaml commit and push:: From 81b2a67fac4782498796d4012208593640faa5ec Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 11:40:25 +0000 Subject: [PATCH 0103/1718] Adding refactored tests . . --- numpy/_core/tests/test_datetime.py | 27 ++++++++++---------------- numpy/_core/tests/test_deprecations.py | 6 ------ 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 1cbacb8a26a8..a452259ec5c9 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +from zoneinfo import ZoneInfo import pytest @@ -16,13 +17,6 @@ suppress_warnings, ) -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False - try: RecursionError except NameError: @@ -1886,7 +1880,6 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1901,29 +1894,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d90c15565c22..cb552357fc96 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -13,12 +13,6 @@ import numpy as np from numpy.testing import assert_raises, temppath -try: - import pytz # noqa: F401 - _has_pytz = True -except ImportError: - _has_pytz = False - class _DeprecationTestCase: # Just as warning: warnings uses re.match, so the start of this message From 0f1a6f81d3e2684802f59cd0d3da5b4ceac770f3 Mon Sep 17 00:00:00 2001 From: V-R-S Date: Tue, 10 Jun 2025 14:44:48 +0000 Subject: [PATCH 0104/1718] Removing additional references to pytz . . --- .github/workflows/linux.yml | 2 +- numpy/_core/multiarray.py | 6 +++--- numpy/_core/tests/test_datetime.py | 8 +++++++- requirements/doc_requirements.txt | 2 -- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 668c1191d055..a0e549d86775 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas # spin check-docs -v # spin check-tutorials -v diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 236ca7e7c9aa..5599494720b6 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1723,7 +1723,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1736,9 +1736,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='=1.8.0 # interactive documentation utilities From 29c85f376bc41095cdc0c7060bb38605e34e040b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 19:58:40 +0200 Subject: [PATCH 0105/1718] TYP: Accept dispatcher function with optional returns in ``_core.overrides`` (#29171) Co-authored-by: Sebastian Berg --- numpy/_core/overrides.pyi | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 05453190efd4..91d624203e81 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,11 +1,11 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeVar - -from numpy._typing import _SupportsArrayFunc +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar _T = TypeVar("_T") _Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] ### @@ -18,14 +18,11 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks @@ -33,11 +30,11 @@ def verify_matching_signatures( # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + dispatcher: _Dispatcher[_Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... # def array_function_from_dispatcher( @@ -45,4 +42,4 @@ def array_function_from_dispatcher( module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... From a17726a2d6ce58d137a68e13711b6fb3e98989bb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:23:19 +0200 Subject: [PATCH 0106/1718] TYP: ``lib._iotools`` annotation improvements (#29177) --- numpy/lib/_iotools.pyi | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 21cfc3b19503..82275940e137 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -13,11 +13,12 @@ from typing import ( import numpy as np import numpy.typing as npt +from numpy._typing._dtype_like import _DTypeLikeNested _T = TypeVar("_T") @type_check_only -class _ValidationKwargs(TypedDict, total=False): +class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None deletechars: Iterable[str] | None case_sensitive: Literal["upper", "lower"] | bool | None @@ -25,7 +26,7 @@ class _ValidationKwargs(TypedDict, total=False): ### -__docformat__: Final[str] = "restructuredtext en" +__docformat__: Final = "restructuredtext en" class ConverterError(Exception): ... class ConverterLockError(ConverterError): ... @@ -98,17 +99,18 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... +def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... @overload def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... @overload def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... - -# -def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... -def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... def easy_dtype( - ndtype: npt.DTypeLike, - names: Iterable[str] | None = None, + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, defaultfmt: str = "f%i", - **validationargs: Unpack[_ValidationKwargs], + **validationargs: Unpack[_NameValidatorKwargs], ) -> np.dtype[np.void]: ... From 9cbddda01babcc67b9aa72e707ea0320cf0ad22f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:27:11 +0200 Subject: [PATCH 0107/1718] TYP: ``any(None)`` and ``all(None)`` (#29176) --- numpy/_core/fromnumeric.pyi | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 050eb9f75c40..f0f83093c3b1 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -819,9 +819,10 @@ def sum( where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -830,7 +831,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -839,7 +840,7 @@ def all( ) -> Incomplete: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -848,7 +849,7 @@ def all( ) -> _ArrayT: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -856,9 +857,10 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -867,7 +869,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -876,7 +878,7 @@ def any( ) -> Incomplete: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -885,7 +887,7 @@ def any( ) -> _ArrayT: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -893,6 +895,7 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# @overload def cumsum( a: _ArrayLike[_ScalarT], From 42b875f72a44beb6acf19b6b39f2628c0cf6599a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:28:32 +0200 Subject: [PATCH 0108/1718] TYP: Fix invalid inline annotations in ``lib._function_base_impl`` (#29175) * TYP: Fix invalid inline annotations in ``lib._function_base_impl`` * TYP: ``ruff check --fix`` * TYP: prevent inline annotation from causing a circular import error --- numpy/lib/_function_base_impl.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 63346088b6e2..f217af64fb4b 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4711,14 +4711,14 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int | None = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4784,13 +4784,13 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce From 1512865bea80182f360318eb471335326897b707 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:31:11 +0200 Subject: [PATCH 0109/1718] TYP: ``numpy._NoValue`` (#29170) --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 41d7411dfdd8..ae26ac2f19f8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -432,6 +432,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue from numpy.lib import ( scimath as emath, @@ -495,8 +497,6 @@ from numpy.lib._function_base_impl import ( quantile, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, From 7d8b1d5d49b6be5bd47ce2a2016283ffc5b43f0d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:33:36 +0200 Subject: [PATCH 0110/1718] TYP: ``out=...`` in ufuncs (#29169) --- numpy/_typing/_ufunc.pyi | 179 ++++++++++++++++++++++----------------- 1 file changed, 99 insertions(+), 80 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 104307da89db..790149d9c7fb 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,8 +4,9 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from types import EllipsisType from typing import ( Any, Generic, @@ -102,8 +103,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -115,8 +117,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -128,8 +131,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -176,7 +180,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -185,9 +189,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: NDArray[Any], /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -195,10 +199,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: NDArray[Any], x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -209,7 +213,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -220,7 +224,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -239,7 +243,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., where: _ArrayLikeBool_co = ..., @@ -250,7 +254,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... def reduceat( @@ -259,7 +263,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @overload # (scalar, scalar) -> scalar @@ -269,7 +273,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: _ScalarLike_co, /, *, - out: None = None, + out: EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... @@ -277,21 +281,21 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: NDArray[Any], /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: NDArray[Any], B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -302,7 +306,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -313,7 +317,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any] | Any: ... @@ -340,10 +344,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -354,11 +360,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -369,11 +376,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: _SupportsArrayUFunc, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -410,11 +418,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -425,12 +435,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -468,9 +479,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -482,9 +494,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -556,7 +569,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -564,7 +577,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -580,7 +593,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> Any: ... @@ -611,7 +624,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -620,7 +633,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -638,7 +651,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -647,7 +660,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -656,11 +669,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduce( self, + /, array: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - /, keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -685,7 +698,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., *, keepdims: Literal[True], initial: _ScalarLike_co = ..., @@ -698,7 +711,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -707,12 +720,12 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduceat( self, + /, array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def reduceat( @@ -733,7 +746,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload def reduceat( @@ -743,21 +756,22 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., @@ -771,7 +785,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -779,8 +793,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -788,8 +803,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -797,7 +813,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, + /, + *, out: _ArrayT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ArrayT: ... @@ -806,8 +823,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -815,8 +833,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -841,7 +860,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co: ... @overload @@ -852,7 +871,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -874,7 +893,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> Any: ... @@ -903,7 +922,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co]: ... @overload @@ -912,7 +931,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... @overload @@ -930,7 +949,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | None = ..., + out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> Any: ... From 7bb70e10fa25d61c6328f6fecff41be647de7cf3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:36:46 +0200 Subject: [PATCH 0111/1718] TYP: Simplified ``dtype.__new__`` overloads (#29168) --- numpy/__init__.pyi | 271 ++++++++++++----------- numpy/typing/tests/data/reveal/dtype.pyi | 19 +- 2 files changed, 151 insertions(+), 139 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ae26ac2f19f8..0801e97b7061 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -866,8 +866,6 @@ _SignedIntegerCType: TypeAlias = type[ ] # fmt: skip _FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] _IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor @@ -1067,10 +1065,6 @@ class _SupportsFileMethods(SupportsFlush, Protocol): @type_check_only class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... -@type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... - @type_check_only class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... @@ -1177,7 +1171,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[float64] | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ... @@ -1207,36 +1201,31 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` @overload def __new__( cls, - dtype: type[int | int_ | np.bool], + dtype: type[int], # also accepts `type[builtins.bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. @overload def __new__( cls, - dtype: type[float | float64 | int_ | np.bool] | None, + dtype: type[float], # also accepts `type[int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], + dtype: type[complex], # also accepts `type[float | int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1244,7 +1233,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` + dtype: type[bytes | ct.c_char] | _BytesCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1252,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` + dtype: type[str] | _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1266,7 +1255,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1276,127 +1265,182 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... - @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload @@ -1460,35 +1504,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number]: ... - @overload - def __new__( - cls, - dtype: _CharacterCodes | type[ct.c_char], + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload @@ -1501,10 +1521,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 721d2708737f..1794c944b3ae 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -14,12 +14,8 @@ dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int] -py_float_co: type[float] -py_complex_co: type[complex] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] @@ -48,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -75,12 +68,9 @@ assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) -assert_type(np.dtype(cs_number), np.dtype[np.number]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -90,7 +80,7 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) @@ -99,6 +89,7 @@ assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) From ceb858fe26aa39ddc973539d0271886e336198f1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:44:30 +0200 Subject: [PATCH 0112/1718] TYP: fix and improve ``numpy.lib._utils_impl`` (#29181) --- numpy/lib/_utils_impl.pyi | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 00ed47c9fb67..6fbd26f88084 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,10 +1,17 @@ +from typing import LiteralString + from _typeshed import SupportsWrite +from typing_extensions import TypeVar -from numpy._typing import DTypeLike +import numpy as np __all__ = ["get_include", "info", "show_runtime"] -def get_include() -> str: ... +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) + +def get_include() -> LiteralString: ... def show_runtime() -> None: ... -def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... -def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... From ea91f1847bc82cd7379102571bdb02bb0cd396bc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 20:47:23 +0200 Subject: [PATCH 0113/1718] TYP: ``double = float64`` and ``cdouble = complex128`` (#29155) --- numpy/__init__.pyi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0801e97b7061..d71a76569ad7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4236,9 +4236,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. @@ -4336,7 +4336,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex64: TypeAlias = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] @overload @@ -4397,9 +4397,9 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property From 18cbd6d1303b6c445ba451a707017be4723d28d0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 21:25:53 +0200 Subject: [PATCH 0114/1718] TYP: Fix missing ``_core.numeric`` (re-)exports (#29166) * TYP: Sync `_core/numerictypes.pyi` with NumType's `numpy-stubs` * TYP: Fix missing ``_core.numeric`` (re-)exports * TYP: appease ``ruff`` --- numpy/_core/numeric.pyi | 784 +++++++++++++++++++++++++++-------- numpy/_core/numerictypes.pyi | 87 ++-- 2 files changed, 661 insertions(+), 210 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 919fe1917197..b54fa856b007 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,3 +1,4 @@ +from builtins import bool as py_bool from collections.abc import Callable, Sequence from typing import ( Any, @@ -20,32 +21,12 @@ from numpy import ( True_, _OrderCF, _OrderKACF, - # re-exports bitwise_not, - broadcast, - complexfloating, - dtype, - flatiter, - float64, - floating, - from_dlpack, - # other - generic, inf, - int_, - intp, little_endian, - matmul, nan, - ndarray, - nditer, newaxis, - object_, - signedinteger, - timedelta64, ufunc, - unsignedinteger, - vecdot, ) from numpy._typing import ( ArrayLike, @@ -67,38 +48,113 @@ from numpy._typing import ( _SupportsArrayFunc, _SupportsDType, ) +from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple -from .fromnumeric import all as all -from .fromnumeric import any as any -from .fromnumeric import argpartition as argpartition -from .fromnumeric import matrix_transpose as matrix_transpose -from .fromnumeric import mean as mean +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .multiarray import ALLOW_THREADS as ALLOW_THREADS +from .multiarray import BUFSIZE as BUFSIZE +from .multiarray import CLIP as CLIP +from .multiarray import MAXDIMS as MAXDIMS +from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS +from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT +from .multiarray import RAISE as RAISE +from .multiarray import WRAP as WRAP from .multiarray import ( - # other _Array, _ConstructorEmpty, _KwargsEmpty, - # re-exports arange, array, asanyarray, asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, promote_types, putmask, @@ -108,85 +164,473 @@ from .multiarray import ( where, zeros, ) +from .multiarray import normalize_axis_index as normalize_axis_index +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) _AnyShapeT = TypeVar( @@ -201,88 +645,90 @@ _AnyShapeT = TypeVar( _CorrelateMode: TypeAlias = L["valid", "same", "full"] +# keep in sync with `ones_like` @overload def zeros_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def zeros_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] +# keep in sync with `zeros_like` @overload def ones_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def ones_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview @@ -389,46 +835,46 @@ def full( @overload def full_like( a: _ArrayT, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def full_like( a: _ArrayLike[_ScalarT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, + a: object, + fill_value: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @@ -441,10 +887,10 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... +def isfortran(a: NDArray[Any] | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... @@ -836,31 +1282,31 @@ def identity( def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... @overload def isclose( a: _ScalarLike_co, b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... @overload def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> NDArray[np.bool]: ... -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... @overload def astype( @@ -868,8 +1314,8 @@ def astype( dtype: _DTypeLike[_ScalarT], /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( @@ -877,6 +1323,6 @@ def astype( dtype: DTypeLike, /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype]: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 753fe34800d5..b649b8f91cd1 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,5 +1,5 @@ -import builtins -from typing import Any, TypedDict, type_check_only +from builtins import bool as py_bool +from typing import Final, TypedDict, type_check_only from typing import Literal as L import numpy as np @@ -13,6 +13,8 @@ from numpy import ( clongdouble, complex64, complex128, + complex192, + complex256, complexfloating, csingle, datetime64, @@ -22,6 +24,8 @@ from numpy import ( float16, float32, float64, + float96, + float128, floating, generic, half, @@ -59,9 +63,8 @@ from numpy import ( void, ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import complex192, complex256, float96, float128 -from ._type_aliases import sctypeDict # noqa: F401 +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -152,41 +155,43 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[long], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[ulong], + type[ulonglong], + type[void], + ] +] = ... +typeDict: Final = sctypeDict From 8ce860ffaeecf2a8d3939a6201994a44e58584e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jun 2025 13:56:41 -0600 Subject: [PATCH 0115/1718] MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 (#29180) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0b4 to 3.0.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971...5f22145df44122af0f5a201f93cf0207171beca7) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.0 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 86628f6882cd..453a67088adf 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f74be5f4a455..68352eb1fc7c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -177,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@cf078b0954f3fd08b8445a7bf2c3fb83ab3bb971 # v3.0.0b4 + uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From a3ec9af71069a88d511c71d56993b4cc24f22bdd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:29:37 +0200 Subject: [PATCH 0116/1718] CI: Bump `array-api-tests` to `v2025.05.23` (#29149) * CI: Bump `array-api-tests` to `v2025.05.23` * TST: xfail new array-api test failures * TST: increase ``--max-examples`` from 100 to 500 for ``array-api-tests`` * CI: apply review suggestions Co-authored-by: Evgeni Burovski --------- Co-authored-by: Evgeni Burovski --- .github/workflows/linux.yml | 10 +++++----- tools/ci/array-api-xfails.txt | 15 +++++++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a0e549d86775..3452724841c3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -156,7 +156,7 @@ jobs: # TODO: gcov env: PYTHONOPTIMIZE: 2 - + aarch64_test: needs: [smoke_test] @@ -204,7 +204,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - + - name: Creates new container run: | docker run --name the_container --interactive \ @@ -221,7 +221,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && spin build '" - name: Meson Log @@ -324,7 +324,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: repository: data-apis/array-api-tests - ref: 'c48410f96fc58e02eea844e6b7f6cc01680f77ce' # Latest commit as of 2025-04-01 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' persist-credentials: false @@ -346,7 +346,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 98c3895ced06..8370099015c5 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -1,5 +1,20 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] +array_api_tests/test_data_type_functions.py::test_finfo[complex64] + +# finfo: data type not inexact +array_api_tests/test_data_type_functions.py::test_finfo[float64] +array_api_tests/test_data_type_functions.py::test_finfo[complex128] + +# iinfo: Invalid integer data type 'O' +array_api_tests/test_data_type_functions.py::test_iinfo[int8] +array_api_tests/test_data_type_functions.py::test_iinfo[uint8] +array_api_tests/test_data_type_functions.py::test_iinfo[int16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[int32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[int64] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] From 3d1cdb476d12402f28548a32db3f35c4212bf3e2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 22:53:25 +0200 Subject: [PATCH 0117/1718] TYP: fix ``ravel_multi_index`` false rejections (#29184) --- numpy/_core/multiarray.pyi | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index e4f869b9beae..13a3f0077ce0 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -499,34 +499,28 @@ def array( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( # type: ignore[misc] From 2c190768902c4dcf6454bc41df1b0e378099d42c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 11 Jun 2025 23:07:11 +0200 Subject: [PATCH 0118/1718] STY: ruff/isort config tweaks (#29183) * DEV: import-related ruff config tweaks * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/_core/_dtype.pyi | 1 - numpy/_core/_internal.pyi | 1 - numpy/_core/_ufunc_config.pyi | 3 +-- numpy/_core/arrayprint.pyi | 1 - numpy/_core/defchararray.pyi | 1 - numpy/_core/fromnumeric.pyi | 3 +-- numpy/_core/function_base.pyi | 3 +-- numpy/_core/multiarray.pyi | 3 +-- numpy/_core/records.pyi | 3 +-- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_argparse.py | 4 ++-- numpy/_core/tests/test_array_coercion.py | 4 ++-- numpy/_core/tests/test_arraymethod.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 2 +- numpy/_core/tests/test_conversion_utils.py | 2 +- numpy/_core/tests/test_cpu_dispatcher.py | 3 +-- numpy/_core/tests/test_cpu_features.py | 1 + numpy/_core/tests/test_custom_dtypes.py | 4 ++-- numpy/_core/tests/test_deprecations.py | 4 ++-- numpy/_core/tests/test_dtype.py | 4 ++-- numpy/_core/tests/test_extint128.py | 2 +- numpy/_core/tests/test_hashtable.py | 1 + numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_mem_overlap.py | 2 +- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_nditer.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_scalarbuffer.py | 4 ++-- numpy/_core/tests/test_scalarmath.py | 2 +- numpy/_core/tests/test_simd.py | 2 +- numpy/_core/tests/test_ufunc.py | 6 +++--- numpy/_core/tests/test_umath_accuracy.py | 2 +- numpy/_core/tests/test_umath_complex.py | 5 +++-- numpy/_typing/_nbit_base.pyi | 1 - numpy/_utils/__init__.pyi | 3 +-- numpy/_utils/_inspect.pyi | 3 +-- numpy/_utils/_pep440.pyi | 1 - numpy/ctypeslib/_ctypeslib.pyi | 3 +-- numpy/dtypes.pyi | 1 - numpy/f2py/_backends/_meson.pyi | 1 - numpy/f2py/_src_pyf.pyi | 3 +-- numpy/f2py/auxfuncs.pyi | 3 +-- numpy/f2py/crackfortran.pyi | 3 +-- numpy/f2py/f2py2e.pyi | 1 - numpy/f2py/rules.pyi | 1 - numpy/f2py/symbolic.pyi | 1 - numpy/fft/helper.pyi | 1 - numpy/lib/_arraysetops_impl.pyi | 1 - numpy/lib/_arrayterator_impl.pyi | 1 - numpy/lib/_datasource.pyi | 3 +-- numpy/lib/_format_impl.pyi | 3 +-- numpy/lib/_function_base_impl.pyi | 3 +-- numpy/lib/_index_tricks_impl.pyi | 3 +-- numpy/lib/_npyio_impl.pyi | 15 +++++++-------- numpy/lib/_shape_base_impl.pyi | 1 - numpy/lib/_type_check_impl.pyi | 3 +-- numpy/lib/_user_array_impl.pyi | 3 +-- numpy/lib/_utils_impl.pyi | 3 +-- numpy/lib/recfunctions.pyi | 3 +-- numpy/ma/core.pyi | 3 +-- numpy/polynomial/_polybase.pyi | 1 - numpy/random/bit_generator.pyi | 3 +-- numpy/testing/_private/utils.pyi | 5 ++--- numpy/testing/overrides.pyi | 1 - numpy/testing/print_coercion_tables.pyi | 1 - ruff.toml | 15 +++++++++++++++ 66 files changed, 80 insertions(+), 104 deletions(-) diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 6cdd77b22e07..adb38583783f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,6 +1,5 @@ from typing import Final, TypeAlias, TypedDict, overload, type_check_only from typing import Literal as L - from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 3038297b6328..04c26ca284db 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,7 +2,6 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1a6613154072..86df9827d652 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,8 +1,7 @@ +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, Literal, TypeAlias, TypedDict, type_check_only -from _typeshed import SupportsWrite - from numpy import errstate as errstate _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index fec03a6f265c..967cc09e6a25 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -13,7 +13,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 43005745bfab..26a5af432824 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,6 +1,5 @@ from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f0f83093c3b1..2aedc727e6dc 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import deprecated import numpy as np diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 44d1311f5b44..600265b1fd0a 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from typing import Literal as L from typing import SupportsIndex, TypeAlias, TypeVar, overload -from _typeshed import Incomplete - import numpy as np from numpy._typing import ( DTypeLike, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 13a3f0077ce0..91ff666688bb 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,5 +1,6 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -18,8 +19,6 @@ from typing import ( from typing import ( Literal as L, ) - -from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 93177b2d3f75..ead165918478 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,5 +1,6 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Any, @@ -11,8 +12,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import StrOrBytesPath from typing_extensions import TypeVar import numpy as np diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 25990536809b..d427ac0399a2 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,10 +1,10 @@ import sys import pytest -from numpy._core._rational_tests import rational import numpy as np import numpy._core.umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, assert_, diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 7f949c1059eb..0c49ec00277e 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -14,14 +14,14 @@ def func(arg1, /, arg2, *, arg3): import threading import pytest + +import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, ) from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) - -import numpy as np from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 883aee63ac3a..a3939daa8904 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -6,12 +6,12 @@ from itertools import permutations, product -import numpy._core._multiarray_umath as ncu import pytest -from numpy._core._rational_tests import rational from pytest import param import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy._core._rational_tests import rational from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index d8baef7e7fbf..5b3d51585718 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -7,9 +7,9 @@ from typing import Any import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl class TestResolveDescriptors: diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index f8441ea9d0d7..91ecc0dc75b0 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -12,9 +12,9 @@ import textwrap import pytest -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_array_equal diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 03ba33957821..d63ca9e58df5 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -3,9 +3,9 @@ """ import re -import numpy._core._multiarray_tests as mt import pytest +import numpy._core._multiarray_tests as mt from numpy._core.multiarray import CLIP, RAISE, WRAP from numpy.testing import assert_raises diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 0a47685d0397..fc9d5e3147e0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,10 +1,9 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, __cpu_features__, ) - -from numpy._core import _umath_tests from numpy.testing import assert_equal diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index d1e3dc610d49..ecc806e9c0e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -6,6 +6,7 @@ import sys import pytest + from numpy._core._multiarray_umath import ( __cpu_baseline__, __cpu_dispatch__, diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 66e6de35b427..3336286d8c98 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,12 +1,12 @@ from tempfile import NamedTemporaryFile import pytest + +import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, ) from numpy._core._multiarray_umath import _get_sfloat_dtype - -import numpy as np from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index cb552357fc96..c4acbf9d2d69 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -6,11 +6,11 @@ import contextlib import warnings -import numpy._core._struct_ufunc_tests as struct_ufunc import pytest -from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 import numpy as np +import numpy._core._struct_ufunc_tests as struct_ufunc +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 from numpy.testing import assert_raises, temppath diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 684672a9b71f..d9bd17c48434 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -11,11 +11,11 @@ import hypothesis import pytest from hypothesis.extra import numpy as hynp -from numpy._core._multiarray_tests import create_custom_field_dtype -from numpy._core._rational_tests import rational import numpy as np import numpy.dtypes +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, IS_PYSTON, diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index 1a05151ac6be..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -2,10 +2,10 @@ import itertools import operator -import numpy._core._multiarray_tests as mt import pytest import numpy as np +import numpy._core._multiarray_tests as mt from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 74be5219a287..25a7158aaf6f 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ import random import pytest + from numpy._core._multiarray_tests import identityhash_tester diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 757b8d72782f..81ba85ea4648 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -5,9 +5,9 @@ from itertools import product import pytest -from numpy._core._multiarray_tests import array_indexing import numpy as np +from numpy._core._multiarray_tests import array_indexing from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 78b943854679..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,10 +1,10 @@ import itertools import pytest -from numpy._core._multiarray_tests import internal_overlap, solve_diophantine import numpy as np from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7603449ba28e..146431c7a7c0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -21,11 +21,11 @@ from datetime import datetime, timedelta from decimal import Decimal -import numpy._core._multiarray_tests as _multiarray_tests import pytest -from numpy._core._rational_tests import rational import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +from numpy._core._rational_tests import rational from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index ec28e48c5046..a29a49bfb71a 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -2,10 +2,10 @@ import sys import textwrap -import numpy._core._multiarray_tests as _multiarray_tests import pytest import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests import numpy._core.umath as ncu from numpy import all, arange, array, nditer from numpy.testing import ( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8e786bf13d9e..8a72e4bfa65d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -9,11 +9,11 @@ from hypothesis import given from hypothesis import strategies as st from hypothesis.extra import numpy as hynp -from numpy._core._rational_tests import rational import numpy as np from numpy import ma from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index c957aec4f9b2..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -2,10 +2,10 @@ Test scalar buffer interface adheres to PEP 3118 """ import pytest -from numpy._core._multiarray_tests import get_buffer_info -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index fc37897bb7f7..746b410f79d2 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -9,9 +9,9 @@ from hypothesis import given, settings from hypothesis.extra import numpy as hynp from hypothesis.strategies import sampled_from -from numpy._core._rational_tests import rational import numpy as np +from numpy._core._rational_tests import rational from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 697d89bcc26c..acea4315e679 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -6,8 +6,8 @@ import re import pytest -from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._multiarray_umath import __cpu_baseline__ from numpy._core._simd import clear_floatstatus, get_floatstatus, targets diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index f2b3f5a35a37..21ebc02c2625 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -4,13 +4,13 @@ import sys import warnings -import numpy._core._operand_flag_tests as opflag_tests -import numpy._core._rational_tests as _rational_tests -import numpy._core._umath_tests as umt import pytest from pytest import param import numpy as np +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt import numpy._core.umath as ncu import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 5707e9279d5b..da9419d63a8a 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -4,9 +4,9 @@ from os import path import pytest -from numpy._core._multiarray_umath import __cpu_features__ import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index a97af475def4..8f6f5c682a91 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,11 +1,12 @@ import platform import sys -# import the c-extension module directly since _arg is not exported via umath -import numpy._core._multiarray_umath as ncu import pytest import numpy as np + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu from numpy.testing import ( assert_almost_equal, assert_array_equal, diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index ccf8f5ceac45..d88c9f4d9fd9 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -3,7 +3,6 @@ # mypy: disable-error-code=misc from typing import final - from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index f3472df9a554..2ed4e88b3e32 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,8 +1,7 @@ +from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from _typeshed import IdentityFunction - from ._convertions import asbytes as asbytes from ._convertions import asunicode as asunicode diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index d53c3c40fcf5..40546d2f4497 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,8 +1,7 @@ import types +from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping from typing import Any, Final, TypeAlias, TypeVar, overload - -from _typeshed import SupportsLenAndGetItem from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 29dd4c912aa9..2c338d4e5b14 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -13,7 +13,6 @@ from typing import ( from typing import ( Literal as L, ) - from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e26d6052eaae..aecb3899bdf5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,6 +1,7 @@ # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type import ctypes +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp from typing import ( @@ -13,8 +14,6 @@ from typing import ( ) from typing import Literal as L -from _typeshed import StrOrBytesPath - import numpy as np from numpy import ( byte, diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 007dc643c0e3..f76b08fc28dc 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -12,7 +12,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index b9f959537214..67baf9b76845 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable from pathlib import Path from typing import Final from typing import Literal as L - from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi index f5aecbf1decd..50ddd07bf638 100644 --- a/numpy/f2py/_src_pyf.pyi +++ b/numpy/f2py/_src_pyf.pyi @@ -1,9 +1,8 @@ import re +from _typeshed import StrOrBytesPath from collections.abc import Mapping from typing import Final -from _typeshed import StrOrBytesPath - routine_start_re: Final[re.Pattern[str]] = ... routine_end_re: Final[re.Pattern[str]] = ... function_start_re: Final[re.Pattern[str]] = ... diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index f2ff09faf33b..dfbae5c7d94d 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,10 +1,9 @@ +from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show from typing import Any, Final, Never, TypeAlias, TypeVar, overload from typing import Literal as L -from _typeshed import FileDescriptorOrPath - from .cfuncs import errmess __all__ = [ diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 6b08f8784f01..c5f4fd7585ba 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,10 +1,9 @@ import re +from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload from typing import Literal as L -from _typeshed import StrOrBytesPath, StrPath - from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index dd1d0c39e8a5..03aeffc5dcdd 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -3,7 +3,6 @@ import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType from typing import Any, Final, NotRequired, TypedDict, type_check_only - from typing_extensions import TypeVar, override from .__version__ import version diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index aa91e942698a..58614060ba87 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Iterable, Mapping from typing import Any, Final, TypeAlias from typing import Literal as L - from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 74e7a48ab327..e7b14f751dc3 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable, Mapping from enum import Enum from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 887cbe7e27c9..7cf391a12e1d 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,6 +1,5 @@ from typing import Any from typing import Literal as L - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index a7ad5b9d91e7..4279b809f78e 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,6 +1,5 @@ from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index e1a9e056a6e1..5fd589a3ac36 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -3,7 +3,6 @@ from collections.abc import Generator from types import EllipsisType from typing import Any, Final, TypeAlias, overload - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index 9f91fdf893a0..ad52b7f67af0 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,7 @@ +from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path from typing import IO, Any, TypeAlias -from _typeshed import OpenBinaryMode, OpenTextMode - _Mode: TypeAlias = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index 870c2d761bb0..b45df02796d7 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,7 +1,6 @@ import os -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard - from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard import numpy as np import numpy.typing as npt diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 090fb233dde1..cb6e18b53fa4 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -13,8 +14,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 7ac2b3a093e0..c4509d9aa3ad 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 40369c55f63d..94f014ccd52d 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,5 +1,12 @@ import types import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern from typing import ( @@ -14,14 +21,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import ( - StrOrBytesPath, - StrPath, - SupportsKeysAndGetItem, - SupportsRead, - SupportsWrite, -) from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index a50d372bb97e..0206d95109fa 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -9,7 +9,6 @@ from typing import ( overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 944015e423bb..b9ab2a02f5f5 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Protocol, TypeAlias, overload, type_check_only from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 13c0a0163421..0aeec42129af 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar, override import numpy as np diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 6fbd26f88084..7a34f273c423 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,6 +1,5 @@ -from typing import LiteralString - from _typeshed import SupportsWrite +from typing import LiteralString from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 073642918af3..a0b49ba1df00 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,7 +1,6 @@ +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence from typing import Any, Literal, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index da4ad3b333db..de6db7873faa 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,10 +1,9 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload - -from _typeshed import Incomplete from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 6d71a8cb8d2c..30c906fa3b4b 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -13,7 +13,6 @@ from typing import ( TypeAlias, overload, ) - from typing_extensions import TypeIs, TypeVar import numpy as np diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 6ce4f4b9d6a1..ee4499dee1f3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,4 +1,5 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock from typing import ( @@ -12,8 +13,6 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4e3b60a0ef70..59a7539b69f1 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -23,10 +24,8 @@ from typing import ( type_check_only, ) from typing import Literal as L -from unittest.case import SkipTest - -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from typing_extensions import TypeVar +from unittest.case import SkipTest import numpy as np from numpy._typing import ( diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi index 3fefc3f350da..916154c155b1 100644 --- a/numpy/testing/overrides.pyi +++ b/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi index c859305f2350..f463a18c05e4 100644 --- a/numpy/testing/print_coercion_tables.pyi +++ b/numpy/testing/print_coercion_tables.pyi @@ -1,6 +1,5 @@ from collections.abc import Iterable from typing import ClassVar, Generic, Self - from typing_extensions import TypeVar import numpy as np diff --git a/ruff.toml b/ruff.toml index deb52e834df9..7454c6c05e5b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -16,6 +16,9 @@ extend-exclude = [ line-length = 88 +[format] +line-ending = "lf" + [lint] preview = true extend-select = [ @@ -115,3 +118,15 @@ ignore = [ "numpy/ma/core.pyi" = ["F403", "F405"] "numpy/matlib.py" = ["F405"] "numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] From 1ea01c57b9dfe7948eb05c13d9a379459e0896b4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 02:25:09 +0200 Subject: [PATCH 0119/1718] MAINT: bump ``ruff`` to ``0.11.13`` (#29186) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 5f1ee5e81a5f..770a83218133 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.9 + - ruff=0.11.13 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 0716b235ec9c..45319571b561 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.9 +ruff==0.11.13 GitPython>=3.1.30 From 303095ed53a46110b14ccb36e0f683cb961a1746 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 12 Jun 2025 09:21:18 +0300 Subject: [PATCH 0120/1718] BUG: fix matmul with transposed out arg (#29179) * BUG: fix matmul with transposed out arg * DOC: add release note * fixes from review --- doc/release/upcoming_changes/29179.change.rst | 4 ++++ doc/source/release/2.3.0-notes.rst | 6 ++++++ numpy/_core/src/umath/matmul.c.src | 2 +- numpy/_core/tests/test_multiarray.py | 6 ++++++ 4 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/29179.change.rst diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst new file mode 100644 index 000000000000..12eb6804d3dd --- /dev/null +++ b/doc/release/upcoming_changes/29179.change.rst @@ -0,0 +1,4 @@ +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause +memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index faad9ffcc8eb..4c3c923b3b5e 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -414,6 +414,12 @@ the best performance. (`gh-28769 `__) +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) Changes ======= diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index d9be7b1d6826..02c4fde56bf2 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -596,7 +596,7 @@ NPY_NO_EXPORT void * Use transpose equivalence: * matmul(a, b, o) == matmul(b.T, a.T, o.T) */ - if (o_f_blasable) { + if (o_transpose) { @TYPE@_matmul_matrixmatrix( ip2_, is2_p_, is2_n_, ip1_, is1_n_, is1_m_, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 146431c7a7c0..11bb80ac1985 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,6 +7317,12 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # matrix matrix, issue 29164 + if [len(args[0].shape), len(args[1].shape)] == [2, 2]: + out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') + r4 = np.matmul(*args, out=out_f[::2, ::2]) + assert_equal(r2, r4) + def test_matmul_object(self): import fractions From a811c05394a2a052a5b95b130f415bea4ccd542e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 12 Jun 2025 14:36:00 +0200 Subject: [PATCH 0121/1718] TYP: add ``__all__`` in ``numpy._core.__init__`` (#29187) Ported from numpy/numtype#156 --- numpy/_core/__init__.pyi | 668 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 666 insertions(+), 2 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..a8884917f34a 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .fromnumeric import transpose as permute_dims +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numeric import concatenate as concat +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) +from .umath import absolute as abs +from .umath import arccos as acos +from .umath import arccosh as acosh +from .umath import arcsin as asin +from .umath import arcsinh as asinh +from .umath import arctan as atan +from .umath import arctan2 as atan2 +from .umath import arctanh as atanh +from .umath import invert as bitwise_invert +from .umath import left_shift as bitwise_left_shift +from .umath import power as pow +from .umath import right_shift as bitwise_right_shift + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] From b88c248f92442a0df7ecd24ad887053501af979c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 17:36:23 +0000 Subject: [PATCH 0122/1718] MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.19 to 3.29.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fca7ace96b7d713c7035871441bd52efbe39e27e...ce28f5bb42b7a9f2c824e633a3f6ee835bab6858) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e0318652d2af..0342fd92c924 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9e21251f87c8..e64789006e2c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v2.1.27 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 with: sarif_file: results.sarif From 8c2dec1f824ecd366f88e423ef86a6507b1a329f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Rozet?= Date: Fri, 13 Jun 2025 11:44:29 +0200 Subject: [PATCH 0123/1718] BUG: Revert `np.vectorize` casting to legacy behavior (#29196) Revert use of no `dtype=object` to ensure correct cast behavior when the output dtype is discovered. Co-authored-by: Sebastian Berg --- numpy/lib/_function_base_impl.py | 4 +++- numpy/lib/tests/test_function_base.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f217af64fb4b..096043e6316f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2573,6 +2573,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2618,8 +2619,9 @@ def _vectorize_call(self, func, args): elif not args: res = func() else: - args = [asanyarray(a, dtype=object) for a in args] ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*args, out=...) if ufunc.nout == 1: diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 50c61e6e04fa..f2dba193c849 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1732,6 +1732,15 @@ def test_string_ticket_1892(self): s = '0123456789' * 10 assert_equal(s, f(s)) + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] From 79deb8f21ebe83c0d5e0f4bf2fb53887a31ad5cf Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Fri, 13 Jun 2025 08:07:53 -0600 Subject: [PATCH 0124/1718] DOC: Suppress distutils doc build warnings for python 3.12+ (#29160) Enables an error free build of the docs for python 3.12+. Excludes related files, supresses all warnings for excluded files, and ignores case-by-case individual references. Closes #29131. [skip azp][skip cirrus][skip actions] --- doc/source/conf.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e3146bf768c9..eba0bd014fb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -150,8 +150,26 @@ class PyTypeObject(ctypes.Structure): exclude_dirs = [] exclude_patterns = [] +suppress_warnings = [] +nitpick_ignore = [] + if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] + exclude_patterns += [ + "reference/distutils.rst", + "reference/distutils/misc_util.rst", + ] + suppress_warnings += [ + 'toc.excluded', # Suppress warnings about excluded toctree entries + ] + nitpicky = True + nitpick_ignore += [ + ('ref', 'numpy-distutils-refguide'), + # The first ignore is not catpured without nitpicky = True. + # These three ignores are required once nitpicky = True is set. + ('py:mod', 'numpy.distutils'), + ('py:class', 'Extension'), + ('py:class', 'numpy.distutils.misc_util.Configuration'), + ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -612,7 +630,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore = [ +nitpick_ignore += [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), From 0b74c624e2791d5ee9ae3bec59e5b101d7e2f864 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 13 Jun 2025 21:04:39 +0200 Subject: [PATCH 0125/1718] TYP: fix ``ndarray.__array__`` annotation for ``copy`` (#29204) --- numpy/__init__.pyi | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d71a76569ad7..272e52f88e83 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2069,13 +2069,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DTypeT, /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... def __array_ufunc__( self, From 470943bf78824b12081df5b8c097c3511d42175f Mon Sep 17 00:00:00 2001 From: Michael Date: Fri, 13 Jun 2025 21:11:29 +0200 Subject: [PATCH 0126/1718] TST: additional tests for matmul with non-contiguous input and output (#29197) --- numpy/_core/tests/test_multiarray.py | 32 +++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 11bb80ac1985..f66109a3d8b5 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7317,11 +7317,33 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) - # matrix matrix, issue 29164 - if [len(args[0].shape), len(args[1].shape)] == [2, 2]: - out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') - r4 = np.matmul(*args, out=out_f[::2, ::2]) - assert_equal(r2, r4) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes]*3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) def test_matmul_object(self): import fractions From 2f4ace73ee323932f2e5b9e32da696b47e7bda51 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 15 Jun 2025 15:21:03 +0300 Subject: [PATCH 0127/1718] BUG: fix linting (#29210) --- numpy/_core/tests/test_multiarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index f66109a3d8b5..b164f1dada3b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7333,7 +7333,7 @@ def apply_mode(m, mode): (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order )[::2, ::2] retval[...] = m - return retval + return retval is_complex = np.issubdtype(dtype, np.complexfloating) m1 = self.m1.astype(dtype) + (1j if is_complex else 0) @@ -7341,7 +7341,7 @@ def apply_mode(m, mode): dot_res = np.dot(m1, m2) mo = np.zeros_like(dot_res) - for mode in itertools.product(*[modes]*3): + for mode in itertools.product(*[modes] * 3): m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) From aea8869549c110c46afbe75ed2e162b702d3d106 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:09:16 +0530 Subject: [PATCH 0128/1718] CI: Add WoA validation setup to windows.yml --- .github/workflows/windows.yml | 74 +++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e760e37780a7..18d02081fd67 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -85,6 +85,80 @@ jobs: run: | spin test -- --timeout=600 --durations=10 + python64bit_openblas_winarm64: + name: arm64, LPARM64 OpenBLAS + runs-on: windows-11-arm + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + strategy: + fail-fast: false + matrix: + compiler-pyversion: + - ["MSVC", "3.11"] + - ["Clang-cl", "3.14t-dev"] + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: ${{ matrix.compiler-pyversion[1] }} + architecture: arm64 + + - name: Setup MSVC + if: matrix.compiler-pyversion[0] == 'MSVC' + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: arm64 + + - name: Install build dependencies from PyPI + run: | + pip install -r requirements/build_requirements.txt + + - name: Install pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV + + - name: Install Clang-cl + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + uses: ./.github/windows_arm64_steps + + - name: Install NumPy (MSVC) + if: matrix.compiler-pyversion[0] == 'MSVC' + run: | + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv + + - name: Install NumPy (Clang-cl) + if: matrix.compiler-pyversion[0] == 'Clang-cl' + run: | + "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-arm64.ini -Encoding ascii + pip install -r requirements/ci_requirements.txt + spin build --with-scipy-openblas=32 -j2 -- --vsenv --native-file=$PWD/clang-cl-arm64.ini + + - name: Meson Log + shell: bash + if: ${{ failure() }} + run: | + cat build/meson-logs/meson-log.txt + + - name: Install test dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + python -m pip install threadpoolctl + + - name: Run test suite + run: | + spin test -- --timeout=600 --durations=10 + msvc_python_no_openblas: name: MSVC, ${{ matrix.architecture }} Python , no BLAS runs-on: ${{ matrix.os }} From d8c4f2ba8604fbb0e7f22a5df8ea3a355a53385e Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:12:41 +0530 Subject: [PATCH 0129/1718] CI: Create action.yml for LLVM Win-ARM64 as reusable blocks --- .github/windows_arm64_steps /action.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/windows_arm64_steps /action.yml diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps /action.yml new file mode 100644 index 000000000000..86517b6246e7 --- /dev/null +++ b/.github/windows_arm64_steps /action.yml @@ -0,0 +1,16 @@ +name: Build Dependencies(Win-ARM64) +description: "Setup LLVM for Win-ARM64 builds" + +runs: + using: "composite" + steps: + - name: Install LLVM + shell: pwsh + run: | + Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait + echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + From f02bb58cb634286739a1c5eb9dc35bc0e086efa7 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:16:56 +0530 Subject: [PATCH 0130/1718] CI: Modify wheel.yml to use clang-cl for Win-ARM64 --- .github/workflows/wheels.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 68352eb1fc7c..ab6bbb618899 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -127,11 +127,9 @@ jobs: with: architecture: 'x86' - - name: Setup MSVC arm64 + - name: Setup LLVM for Windows ARM64 if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: 'arm64' + uses: ./.github/windows_arm64_steps - name: pkg-config-for-win run: | From fb0dff6061addfa3b7d2cba9ecc4dfedf5dad954 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:32:03 +0530 Subject: [PATCH 0131/1718] CI: fix action.yml naming --- .github/{windows_arm64_steps => windows_arm64_steps}/action.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{windows_arm64_steps => windows_arm64_steps}/action.yml (100%) diff --git a/.github/windows_arm64_steps /action.yml b/.github/windows_arm64_steps/action.yml similarity index 100% rename from .github/windows_arm64_steps /action.yml rename to .github/windows_arm64_steps/action.yml From f36c0daa423244ca6887ab426e3c6149a13a2667 Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Tue, 17 Jun 2025 16:40:39 +0530 Subject: [PATCH 0132/1718] CI: Fix reusable LLVM block --- .github/workflows/windows.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 18d02081fd67..e723b787a5de 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -128,8 +128,7 @@ jobs: - name: Install Clang-cl if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - uses: ./.github/windows_arm64_steps + uses: ./.github/windows_arm64_steps - name: Install NumPy (MSVC) if: matrix.compiler-pyversion[0] == 'MSVC' From 03d57303675386998ad005d28c7c3ca7177694d1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 17 Jun 2025 17:49:15 +0200 Subject: [PATCH 0133/1718] MAINT: Fix some undef warnings (#29216) As noted by Chuck in gh-29138, there are some undef warnings that seem not nice this should fix them. The fact that `NPY_LONG`, etc. are not defined at macro expansion time is a bit of a trap, maybe it would be nice to have CI fail for this... --- numpy/_core/src/_simd/_simd.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/umath/_operand_flag_tests.c | 2 +- numpy/_core/src/umath/_rational_tests.c | 2 +- numpy/_core/src/umath/_struct_ufunc_tests.c | 2 +- numpy/_core/src/umath/_umath_tests.c.src | 2 +- .../_core/tests/examples/limited_api/limited_api_latest.c | 8 ++++---- numpy/f2py/rules.py | 2 +- numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 2f0a5df6375c..d25d7bbf1c38 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -88,7 +88,7 @@ PyMODINIT_FUNC PyInit__simd(void) NPY_MTARGETS_CONF_DISPATCH(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY_MTARGETS_CONF_BASELINE(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 5708e5c6ecb7..f520e3c4bceb 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2095,7 +2095,7 @@ static PyMemberDef arraydescr_members[] = { {"alignment", T_PYSSIZET, offsetof(PyArray_Descr, alignment), READONLY, NULL}, {"flags", -#if NPY_ULONGLONG == NPY_UINT64 +#if NPY_SIZEOF_LONGLONG == 8 T_ULONGLONG, offsetof(PyArray_Descr, flags), READONLY, NULL}, #else #error Assuming long long is 64bit, if not replace with getter function. diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 9747b7946512..5cdff6220280 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -77,7 +77,7 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index a95c89b373df..d257bc22d051 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1355,7 +1355,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 8edbdc00b6f3..56c4be117e44 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -157,7 +157,7 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 9f2818d14526..845f51ebc94f 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -944,7 +944,7 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c index 13668f2f0ebf..92d83ea977a1 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api_latest.c +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -1,11 +1,11 @@ -#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 - # error "Py_LIMITED_API not defined to Python major+minor version" -#endif - #include #include #include +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + static PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "limited_api_latest" diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 667ef287f92b..68c49e60028e 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -286,7 +286,7 @@ #initcommonhooks# #interface_usercode# -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m , #gil_used#); #endif diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index b66672a43e21..25866f1a40ec 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -223,7 +223,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif -#if Py_GIL_DISABLED +#ifdef Py_GIL_DISABLED // signal whether this module supports running with the GIL disabled PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif From 3f99204758206967cce4af0f460de4946443f62d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:30:32 +0200 Subject: [PATCH 0134/1718] MAINT: bump `mypy` to `1.16.1` (#29219) --- environment.yml | 2 +- requirements/test_requirements.txt | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/environment.yml b/environment.yml index 770a83218133..75c6626abaf8 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.0 + - mypy=1.16.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 17260753db4a..e50919ef4f7b 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,9 +12,8 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.0; platform_python_implementation != "PyPy" +mypy==1.16.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata - From f6a17f099ba3ae2acabc288bf2297fd821725ffc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 17 Jun 2025 19:31:38 +0200 Subject: [PATCH 0135/1718] TYP: Workaround for a mypy issue in ``ndarray.__iter__`` (#29218) --- numpy/__init__.pyi | 12 ++++++++---- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 10 ++++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 272e52f88e83..ce879e208e49 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -807,6 +807,7 @@ _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) _NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] _NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] @@ -2572,10 +2573,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... - @overload # == 1-d & object_ - def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... - @overload # == 1-d - def __iter__(self: ndarray[tuple[int], dtype[_ScalarT]], /) -> Iterator[_ScalarT]: ... + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... @overload # >= 2-d def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... @overload # ?-d diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 465ce7679b49..4cbb90621ca9 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,6 +6,7 @@ function-based counterpart in `../from_numeric.py`. """ +from collections.abc import Iterator import ctypes as ct import operator from types import ModuleType @@ -29,6 +30,10 @@ AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -235,3 +240,8 @@ assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) assert_type(f8.__array_namespace__(), ModuleType) assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D From 32f4afaad7cb035e1b46987285d34c27c57dbe57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Gro=C3=9F?= Date: Tue, 17 Jun 2025 19:34:26 +0200 Subject: [PATCH 0136/1718] ENH: improve Timsort with powersort merge-policy (#29208) Implement the improved merge policy for Timsort, as developed by Munro and Wild. Benchmarks show a significant improvement in performance. --- numpy/_core/src/npysort/timsort.cpp | 110 +++++++++++++--------------- 1 file changed, 51 insertions(+), 59 deletions(-) diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 0f0f5721e7cf..9ecf88c0aeb9 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -39,8 +39,9 @@ #include #include -/* enough for 32 * 1.618 ** 128 elements */ -#define TIMSORT_STACK_SIZE 128 +/* enough for 32 * 1.618 ** 128 elements. + If powersort was used in all cases, 90 would suffice, as 32 * 2 ** 90 >= 32 * 1.618 ** 128 */ +#define RUN_STACK_SIZE 128 static npy_intp compute_min_run(npy_intp num) @@ -58,6 +59,7 @@ compute_min_run(npy_intp num) typedef struct { npy_intp s; /* start pointer */ npy_intp l; /* length */ + int power; /* node "level" for powersort merge strategy */ } run; /* buffer for argsort. Declared here to avoid multiple declarations. */ @@ -383,60 +385,51 @@ merge_at_(type *arr, const run *stack, const npy_intp at, buffer_ *buffer) return 0; } -template +/* See https://github.com/python/cpython/blob/ea23c897cd25702e72a04e06664f6864f07a7c5d/Objects/listsort.txt +* for a detailed explanation. +* In CPython, *num* is called *n*, but we changed it for consistency with the NumPy implementation. +*/ static int -try_collapse_(type *arr, run *stack, npy_intp *stack_ptr, buffer_ *buffer) +powerloop(npy_intp s1, npy_intp n1, npy_intp n2, npy_intp num) { - int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = merge_at_(arr, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = merge_at_(arr, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } + int result = 0; + npy_intp a = 2 * s1 + n1; /* 2*a */ + npy_intp b = a + n1 + n2; /* 2*b */ + for (;;) { + ++result; + if (a >= num) { /* both quotient bits are 1 */ + a -= num; + b -= num; } - else if (1 < top && B <= C) { - ret = merge_at_(arr, stack, top - 2, buffer); + else if (b >= num) { /* a/num bit is 0, b/num bit is 1 */ + break; + } + a <<= 1; + b <<= 1; + } + return result; +} +template +static int +found_new_run_(type *arr, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_ *buffer) +{ + int ret; + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = merge_at_(arr, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -491,7 +484,7 @@ timsort_(void *start, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_ buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -499,15 +492,14 @@ timsort_(void *start, npy_intp num) for (l = 0; l < num;) { n = count_run_((type *)start, l, num, minrun); + ret = found_new_run_((type *)start, stack, &stack_ptr, n, num, &buffer); + if (NPY_UNLIKELY(ret < 0)) + goto cleanup; + + // Push the new run onto the stack. stack[stack_ptr].s = l; stack[stack_ptr].l = n; ++stack_ptr; - ret = try_collapse_((type *)start, stack, &stack_ptr, &buffer); - - if (NPY_UNLIKELY(ret < 0)) { - goto cleanup; - } - l += n; } @@ -897,7 +889,7 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) int ret; npy_intp l, n, stack_ptr, minrun; buffer_intp buffer; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer.pw = NULL; buffer.size = 0; stack_ptr = 0; @@ -1371,7 +1363,7 @@ string_timsort_(void *start, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; string_buffer_ buffer; /* Items that have zero size don't make sense to sort */ @@ -1800,7 +1792,7 @@ string_atimsort_(void *start, npy_intp *tosort, npy_intp num, void *varr) size_t len = elsize / sizeof(type); int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ @@ -2253,7 +2245,7 @@ npy_timsort(void *start, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_char buffer; /* Items that have zero size don't make sense to sort */ @@ -2689,7 +2681,7 @@ npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr) PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; int ret; npy_intp l, n, stack_ptr, minrun; - run stack[TIMSORT_STACK_SIZE]; + run stack[RUN_STACK_SIZE]; buffer_intp buffer; /* Items that have zero size don't make sense to sort */ From d1c67573569885087a253120c1a9f2caf3ccf084 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 14:56:21 -0400 Subject: [PATCH 0137/1718] ENH: Detect CPU features on OpenBSD ARM and PowerPC64 Also while looking at this I noticed due to a compiler warning that npy__cpu_init_features_linux() was not enabled on FreeBSD with the original commit c47e9621ebf76f8085ff5ec8b01c07921d14f6a7 thus the code was doing nothing on FreeBSD ARM. --- numpy/_core/src/common/npy_cpu_features.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..617225ec9eff 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -562,7 +562,7 @@ npy__cpu_init_features(void) #elif defined(NPY_CPU_PPC64) || defined(NPY_CPU_PPC64LE) -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __FreeBSD__ #include // defines PPC_FEATURE_HAS_VSX #endif @@ -585,7 +585,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & PPC_FEATURE_HAS_VSX) == 0) @@ -612,7 +612,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VSX2] = (hwcap & PPC_FEATURE2_ARCH_2_07) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX3] = (hwcap & PPC_FEATURE2_ARCH_3_00) != 0; npy__cpu_have[NPY_CPU_FEATURE_VSX4] = (hwcap & PPC_FEATURE2_ARCH_3_1) != 0; -// TODO: AIX, OpenBSD +// TODO: AIX #else npy__cpu_have[NPY_CPU_FEATURE_VSX] = 1; #if defined(NPY_CPU_PPC64LE) || defined(NPY_HAVE_VSX2) @@ -698,7 +698,7 @@ npy__cpu_init_features_arm8(void) npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = 1; } -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* * we aren't sure of what kind kernel or clib we deal with * so we play it safe @@ -709,7 +709,7 @@ npy__cpu_init_features_arm8(void) #if defined(__linux__) __attribute__((weak)) unsigned long getauxval(unsigned long); // linker should handle it #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) __attribute__((weak)) int elf_aux_info(int, void *, int); // linker should handle it static unsigned long getauxval(unsigned long k) @@ -807,7 +807,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) if (npy__cpu_init_features_linux()) return; #endif From b6a6740e106277f4b1081210bbd158fed3e0d20c Mon Sep 17 00:00:00 2001 From: Mugundanmcw Date: Wed, 18 Jun 2025 18:16:30 +0530 Subject: [PATCH 0138/1718] CI: Add conditions to check hash of LLVM package --- .github/windows_arm64_steps/action.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml index 86517b6246e7..8ecb3b8a0cdd 100644 --- a/.github/windows_arm64_steps/action.yml +++ b/.github/windows_arm64_steps/action.yml @@ -4,10 +4,16 @@ description: "Setup LLVM for Win-ARM64 builds" runs: using: "composite" steps: - - name: Install LLVM + - name: Install LLVM with checksum verification shell: pwsh run: | Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe + $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" + $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash + if ($fileHash -ne $expectedHash) { + Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." + exit 1 + } Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append From d52b36ee0b8bf3e7ca9eec19d8c4b41ff2c04f59 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 18 Jun 2025 19:32:32 +0200 Subject: [PATCH 0139/1718] strides comparison performance fix, compare discussion #29179 (#29188) --- numpy/_core/src/umath/matmul.c.src | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 02c4fde56bf2..6f54aeb4d968 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -27,6 +27,8 @@ ***************************************************************************** */ +#define ABS(x) ((x) < 0 ? -(x) : (x)) + #if defined(HAVE_CBLAS) /* * -1 to be conservative, in case blas internally uses a for loop with an @@ -554,9 +556,9 @@ NPY_NO_EXPORT void } else { /* matrix @ matrix * copy if not blasable, see gh-12365 & gh-23588 */ - npy_bool i1_transpose = is1_m < is1_n, - i2_transpose = is2_n < is2_p, - o_transpose = os_m < os_p; + npy_bool i1_transpose = ABS(is1_m) < ABS(is1_n), + i2_transpose = ABS(is2_n) < ABS(is2_p), + o_transpose = ABS(os_m) < ABS(os_p); npy_intp tmp_is1_m = i1_transpose ? sz : sz*dn, tmp_is1_n = i1_transpose ? sz*dm : sz, From 2b0eda38f099203bb72572a8b640b37165fc7dc5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 17 Jun 2025 17:49:17 +0200 Subject: [PATCH 0140/1718] TYP: Support iteration of ``StringDType`` arrays --- numpy/__init__.pyi | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ce879e208e49..bcbe95accee6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2577,11 +2577,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. # This way the bug only occurs for 9-D arrays, which are probably not very common. @overload - def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... From f15a116ee12ec539c7ed1fafb3a0abfb82005e0f Mon Sep 17 00:00:00 2001 From: Developer-Ecosystem-Engineering <65677710+Developer-Ecosystem-Engineering@users.noreply.github.com> Date: Wed, 18 Jun 2025 13:00:27 -0700 Subject: [PATCH 0141/1718] BUG: Address interaction between SME and FPSR (#29223) * BUG: Address interaction between SME and FPSR This is intended to resolve https://github.com/numpy/numpy/issues/28687 The root cause is an interaction between Arm Scalable Matrix Extension (SME) and the floating point status register (FPSR). As noted in Arm docs for FPSR, "On entry to or exit from Streaming SVE mode, FPSR.{IOC, DZC, OFC, UFC, IXC, IDC, QC} are set to 1 and the remaining bits are set to 0". This means that floating point status flags are all raised when SME is used, regardless of values or operations performed. These are manifesting now because Apple Silicon M4 supports SME and macOS 15.4 enables SME codepaths for Accelerate BLAS / LAPACK. However, SME / FPSR behavior is not specific to Apple Silicon M4 and will occur on non-Apple chips using SME as well. Changes add compile and runtime checks to determine whether BLAS / LAPACK might use SME (macOS / Accelerate only at the moment). If so, special handling of floating-point error (FPE) is added, which includes: - clearing FPE after some BLAS calls - short-circuiting FPE read after some BLAS calls All tests pass Performance is similar Another approach would have been to wrap all BLAS / LAPACK calls with save / restore FPE. However, it added a lot of overhead for the inner loops that utilize BLAS / LAPACK. Some benchmarks were 8x slower. * add blas_supports_fpe and ifdef check Address the linker & linter failures --- numpy/_core/meson.build | 1 + numpy/_core/src/common/blas_utils.c | 134 ++++++++++++++++++ numpy/_core/src/common/blas_utils.h | 30 ++++ numpy/_core/src/common/cblasfuncs.c | 3 +- numpy/_core/src/multiarray/multiarraymodule.c | 5 + numpy/_core/src/umath/matmul.c.src | 36 ++++- numpy/_core/tests/test_multiarray.py | 6 + numpy/testing/_private/utils.py | 10 ++ 8 files changed, 217 insertions(+), 8 deletions(-) create mode 100644 numpy/_core/src/common/blas_utils.c create mode 100644 numpy/_core/src/common/blas_utils.h diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a4d2050122c6..cd46a20b0246 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1117,6 +1117,7 @@ src_multiarray_umath_common = [ ] if have_blas src_multiarray_umath_common += [ + 'src/common/blas_utils.c', 'src/common/cblasfuncs.c', 'src/common/python_xerbla.c', ] diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c new file mode 100644 index 000000000000..aaf976ed70e4 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.c @@ -0,0 +1,134 @@ +#include +#include +#include + +#ifdef __APPLE__ +#include +#endif + +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "blas_utils.h" + +#if NPY_BLAS_CHECK_FPE_SUPPORT + +/* Return whether we're running on macOS 15.4 or later + */ +static inline bool +is_macOS_version_15_4_or_later(void){ +#if !defined(__APPLE__) + return false; +#else + char *osProductVersion = NULL; + size_t size = 0; + bool ret = false; + + // Query how large OS version string should be + if(-1 == sysctlbyname("kern.osproductversion", NULL, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion = malloc(size + 1); + + // Get the OS version string + if(-1 == sysctlbyname("kern.osproductversion", osProductVersion, &size, NULL, 0)){ + goto cleanup; + } + + osProductVersion[size] = '\0'; + + // Parse the version string + int major = 0, minor = 0; + if(2 > sscanf(osProductVersion, "%d.%d", &major, &minor)) { + goto cleanup; + } + + if(major >= 15 && minor >= 4){ + ret = true; + } + +cleanup: + if(osProductVersion){ + free(osProductVersion); + } + + return ret; +#endif +} + +/* ARM Scalable Matrix Extension (SME) raises all floating-point error flags + * when it's used regardless of values or operations. As a consequence, + * when SME is used, all FPE state is lost and special handling is needed. + * + * For NumPy, SME is not currently used directly, but can be used via + * BLAS / LAPACK libraries. This function does a runtime check for whether + * BLAS / LAPACK can use SME and special handling around FPE is required. + */ +static inline bool +BLAS_can_use_ARM_SME(void) +{ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) + // ARM SME can be used by Apple's Accelerate framework for BLAS / LAPACK + // - macOS 15.4+ + // - Apple silicon M4+ + + // Does OS / Accelerate support ARM SME? + if(!is_macOS_version_15_4_or_later()){ + return false; + } + + // Does hardware support SME? + int has_SME = 0; + size_t size = sizeof(has_SME); + if(-1 == sysctlbyname("hw.optional.arm.FEAT_SME", &has_SME, &size, NULL, 0)){ + return false; + } + + if(has_SME){ + return true; + } +#endif + + // default assume SME is not used + return false; +} + +/* Static variable to cache runtime check of BLAS FPE support. + */ +static bool blas_supports_fpe = true; + +#endif // NPY_BLAS_CHECK_FPE_SUPPORT + + +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + return blas_supports_fpe; +#else + return true; +#endif +} + +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + blas_supports_fpe = !BLAS_can_use_ARM_SME(); +#endif +} + +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void) +{ +#if NPY_BLAS_CHECK_FPE_SUPPORT + if(!blas_supports_fpe){ + // BLAS does not support FPE and we need to return FPE state. + // Instead of clearing and then grabbing state, just return + // that no flags are set. + return 0; + } +#endif + char *param = NULL; + return npy_get_floatstatus_barrier(param); +} diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h new file mode 100644 index 000000000000..8c1437f88899 --- /dev/null +++ b/numpy/_core/src/common/blas_utils.h @@ -0,0 +1,30 @@ +#include + +#include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN + +/* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check + * for floating-point error (FPE) support in BLAS. + */ +#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) +#define NPY_BLAS_CHECK_FPE_SUPPORT 1 +#else +#define NPY_BLAS_CHECK_FPE_SUPPORT 0 +#endif + +/* Initialize BLAS environment, if needed + */ +NPY_VISIBILITY_HIDDEN void +npy_blas_init(void); + +/* Runtime check if BLAS supports floating-point errors. + * true - BLAS supports FPE and one can rely on them to indicate errors + * false - BLAS does not support FPE. Special handling needed for FPE state + */ +NPY_VISIBILITY_HIDDEN bool +npy_blas_supports_fpe(void); + +/* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). + * Otherwise, we can't rely on FPE state and need special handling. + */ +NPY_VISIBILITY_HIDDEN int +npy_get_floatstatus_after_blas(void); diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index f9d683d812d4..66a215dfeb64 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -12,6 +12,7 @@ #include "numpy/arrayobject.h" #include "numpy/npy_math.h" #include "numpy/ufuncobject.h" +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" #include "common.h" @@ -693,7 +694,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, NPY_END_ALLOW_THREADS; } - int fpes = npy_get_floatstatus_barrier((char *) result); + int fpes = npy_get_floatstatus_after_blas(); if (fpes && PyUFunc_GiveFloatingpointErrors("dot", fpes) < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 7724756ba351..dcfb1226a0ab 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -43,6 +43,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "arraytypes.h" #include "arrayobject.h" #include "array_converter.h" +#include "blas_utils.h" #include "hashdescr.h" #include "descriptor.h" #include "dragon4.h" @@ -4781,6 +4782,10 @@ _multiarray_umath_exec(PyObject *m) { return -1; } +#if NPY_BLAS_CHECK_FPE_SUPPORT + npy_blas_init(); +#endif + #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 6f54aeb4d968..11e014acec7f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -16,6 +16,7 @@ +#include "blas_utils.h" #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ @@ -122,7 +123,7 @@ static inline void } } -NPY_NO_EXPORT void +static void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, void *op, npy_intp op_m, @@ -158,7 +159,7 @@ NPY_NO_EXPORT void is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@)); } -NPY_NO_EXPORT void +static void @name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_p, void *op, npy_intp os_m, npy_intp os_p, @@ -262,7 +263,7 @@ NPY_NO_EXPORT void * #IS_HALF = 0, 0, 0, 1, 0*13# */ -NPY_NO_EXPORT void +static void @TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -320,7 +321,7 @@ NPY_NO_EXPORT void } /**end repeat**/ -NPY_NO_EXPORT void +static void BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -359,7 +360,7 @@ BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, } } -NPY_NO_EXPORT void +static void OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n, void *_ip2, npy_intp is2_n, npy_intp is2_p, void *_op, npy_intp os_m, npy_intp os_p, @@ -631,6 +632,11 @@ NPY_NO_EXPORT void #endif } #if @USEBLAS@ && defined(HAVE_CBLAS) +#if NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif if (allocate_buffer) free(tmp_ip12op); #endif } @@ -655,7 +661,7 @@ NPY_NO_EXPORT void * #prefix = c, z, 0# * #USE_BLAS = 1, 1, 0# */ -NPY_NO_EXPORT void +static void @name@_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n, void *NPY_UNUSED(ignore)) { @@ -751,6 +757,7 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp * CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT# * #DOT = dot*15, dotc*4# * #CHECK_PYERR = 0*18, 1# + * #CHECK_BLAS = 1*2, 0*13, 1*2, 0*2# */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, @@ -774,6 +781,11 @@ NPY_NO_EXPORT void } #endif } +#if @CHECK_BLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -789,7 +801,7 @@ NPY_NO_EXPORT void * #step1 = &oneF, &oneD# * #step0 = &zeroF, &zeroD# */ -NPY_NO_EXPORT void +static void @name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, void *ip2, npy_intp is2_n, npy_intp is2_m, void *op, npy_intp os_m, @@ -880,6 +892,11 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ @@ -945,5 +962,10 @@ NPY_NO_EXPORT void #endif } } +#if @USEBLAS@ && NPY_BLAS_CHECK_FPE_SUPPORT + if (!npy_blas_supports_fpe()) { + npy_clear_floatstatus_barrier((char*)args); + } +#endif } /**end repeat**/ diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index b164f1dada3b..81e55deb3daf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -31,6 +31,7 @@ from numpy.exceptions import AxisError, ComplexWarning from numpy.lib.recfunctions import repack_fields from numpy.testing import ( + BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, IS_PYPY, @@ -3363,6 +3364,11 @@ def test_dot(self): @pytest.mark.parametrize("dtype", [np.half, np.double, np.longdouble]) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_dot_errstate(self, dtype): + # Some dtypes use BLAS for 'dot' operation and + # not all BLAS support floating-point errors. + if not BLAS_SUPPORTS_FPE and dtype == np.double: + pytest.skip("BLAS does not support FPE") + a = np.array([1, 1], dtype=dtype) b = np.array([-np.inf, np.inf], dtype=dtype) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index d7ceaeab72cc..65f4059f98fd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -42,6 +42,7 @@ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', ] @@ -89,6 +90,15 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = True +if platform.system() == 'Darwin' or platform.machine() == 'arm64': + try: + blas = np.__config__.CONFIG['Build Dependencies']['blas'] + if blas['name'] == 'accelerate': + BLAS_SUPPORTS_FPE = False + except KeyError: + pass + HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 IS_MUSL = False From 5cbc0bcebbe031140b63d2ffba06b73e471b43b2 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Tue, 17 Jun 2025 19:57:18 -0400 Subject: [PATCH 0142/1718] ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. --- numpy/_core/src/common/npy_cpu_features.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index f15f636cdb1e..06c82fe41e27 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -846,22 +846,30 @@ npy__cpu_init_features(void) #elif defined(__riscv) && __riscv_xlen == 64 -#include +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) + #include -#ifndef HWCAP_RVV - // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 - #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) + #endif #endif static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) +#ifdef __linux__ unsigned int hwcap = getauxval(AT_HWCAP); +#else + unsigned long hwcap; + elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap)); +#endif if (hwcap & COMPAT_HWCAP_ISA_V) { npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; } +#endif } /*********** Unsupported ARCH ***********/ From 7a74e12558789ecf0252dad56a77560db7411d13 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 19 Jun 2025 01:18:27 -0600 Subject: [PATCH 0143/1718] BUG: avoid negating unsigned integers in resize implementation (#29230) The negation of an unsigned int underflows and creates a large positive repeats, which leads to allocations failures and/or swapping. --- numpy/_core/fromnumeric.py | 3 ++- numpy/_core/tests/test_numeric.py | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 73dcd1ddc11d..e20d774d014d 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1607,7 +1607,8 @@ def resize(a, new_shape): # First case must zero fill. The second would have repeats == 0. return np.zeros_like(a, shape=new_shape) - repeats = -(-new_size // a.size) # ceil division + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size a = concatenate((a,) * repeats)[:new_size] return reshape(a, new_shape) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8a72e4bfa65d..65da65ddc9f9 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -79,6 +79,13 @@ def test_negative_resize(self): with pytest.raises(ValueError, match=r"negative"): np.resize(A, new_shape=new_shape) + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + def test_subclass(self): class MyArray(np.ndarray): __array_priority__ = 1. From 35079afa808ea0fc49bb13c878150ef20c4f64fa Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 14:21:05 +0200 Subject: [PATCH 0144/1718] TST: Fix test that uses unininitialized memory (#29232) Tests should avoid this generally, this one is worse, since it can even fail due to warnings. --- numpy/_core/tests/test_ufunc.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 21ebc02c2625..a1cd63aec523 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2123,9 +2123,9 @@ class ArrayPriorityMinus1000b(ArrayPriorityBase): class ArrayPriorityMinus2000(ArrayPriorityBase): __array_priority__ = -2000 - x = ArrayPriorityMinus1000(2) - xb = ArrayPriorityMinus1000b(2) - y = ArrayPriorityMinus2000(2) + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) assert np.add(x, y) is ArrayPriorityMinus1000 assert np.add(y, x) is ArrayPriorityMinus1000 From 4575abf725d9f14af73ee0442b05c01d0d46a7c1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 19 Jun 2025 16:43:25 +0200 Subject: [PATCH 0145/1718] MAINT: bump ``ruff`` to ``0.12.0`` (#29220) --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 75c6626abaf8..17de8d3eeb5e 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.11.13 + - ruff=0.12.0 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 45319571b561..05319a9bdb8a 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,3 @@ # keep in sync with `environment.yml` -ruff==0.11.13 +ruff==0.12.0 GitPython>=3.1.30 From 7b30ce7432d0b42bb805fbf5575b0d0ba5be98ab Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 19 Jun 2025 16:47:49 +0200 Subject: [PATCH 0146/1718] BUG: Enforce integer limitation in concatenate (#29231) * BUG: Enforce integer limitation in concatenate Concatenate internals only deal with integer many arrays, that should be fine in practice, but a SystemError (or in principle maybe also a harder crash?) is not really. * skip 32bit systems --- numpy/_core/src/multiarray/multiarraymodule.c | 11 +++++++++-- numpy/_core/tests/test_shape_base.py | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index dcfb1226a0ab..d4766b5af7b4 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -669,10 +669,17 @@ PyArray_ConcatenateInto(PyObject *op, } /* Convert the input list into arrays */ - narrays = PySequence_Size(op); - if (narrays < 0) { + Py_ssize_t narrays_true = PySequence_Size(op); + if (narrays_true < 0) { return NULL; } + else if (narrays_true > NPY_MAX_INT) { + PyErr_Format(PyExc_ValueError, + "concatenate() only supports up to %d arrays but got %zd.", + NPY_MAX_INT, narrays_true); + return NULL; + } + narrays = (int)narrays_true; arrays = PyArray_malloc(narrays * sizeof(arrays[0])); if (arrays == NULL) { PyErr_NoMemory(); diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index f7b944be08b7..8de24278fc5d 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,3 +1,5 @@ +import sys + import pytest import numpy as np @@ -29,6 +31,7 @@ assert_raises, assert_raises_regex, ) +from numpy.testing._private.utils import requires_memory class TestAtleast1d: @@ -290,6 +293,17 @@ def test_exceptions(self): # No arrays to concatenate raises ValueError assert_raises(ValueError, concatenate, ()) + @pytest.mark.slow + @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @requires_memory(2 * np.iinfo(np.intc).max) + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + def test_concatenate_axis_None(self): a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) From 1da34ea0daaedb94d2b673c70a47f4a57883a713 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 07:43:13 +0200 Subject: [PATCH 0147/1718] DEP: Deprecate setting the strides attribute of a numpy array (#28925) Deprecate setting strides (mutating) on an array. --------- Co-authored-by: Charles Harris Co-authored-by: Sebastian Berg Co-authored-by: Joren Hammudoglu --- .../upcoming_changes/28925.deprecation.rst | 9 +++++ numpy/__init__.pyi | 3 +- numpy/_core/src/multiarray/getset.c | 22 ++++++++---- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_deprecations.py | 7 ++++ numpy/_core/tests/test_half.py | 6 ++-- numpy/_core/tests/test_multiarray.py | 36 ++++++++++++------- numpy/_core/tests/test_nditer.py | 4 +-- numpy/_core/tests/test_regression.py | 23 ++++++------ numpy/lib/_npyio_impl.py | 2 +- 10 files changed, 75 insertions(+), 40 deletions(-) create mode 100644 doc/release/upcoming_changes/28925.deprecation.rst diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst new file mode 100644 index 000000000000..a421839394fa --- /dev/null +++ b/doc/release/upcoming_changes/28925.deprecation.rst @@ -0,0 +1,9 @@ +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: +* `np.lib.stride_tricks.strided_window_view` if applicable, +* `np.lib.stride_tricks.as_strided` for the general case, +* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. + diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bcbe95accee6..2e9dea06ce6a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -217,7 +217,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar +from typing_extensions import CapsuleType, TypeVar, deprecated from numpy import ( char, @@ -2169,6 +2169,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") @strides.setter def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 8482b6006e3e..48da52dd3178 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -85,7 +85,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) /* Free old dimensions and strides */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = nd; - ((PyArrayObject_fields *)self)->dimensions = _dimensions; + ((PyArrayObject_fields *)self)->dimensions = _dimensions; ((PyArrayObject_fields *)self)->strides = _dimensions + nd; if (nd) { @@ -95,7 +95,7 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) } else { /* Free old dimensions and strides */ - npy_free_cache_dim_array(self); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; @@ -116,6 +116,19 @@ array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static int array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) { + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array strides"); + return -1; + } + + /* Deprecated NumPy 2.4, 2025-05-11 */ + if (DEPRECATE("Setting the strides on a NumPy array has been deprecated in NumPy 2.4.\n" + "As an alternative, you can create a new view using np.lib.stride_tricks.as_strided." + ) < 0 ) { + return -1; + } + PyArray_Dims newstrides = {NULL, -1}; PyArrayObject *new; npy_intp numbytes = 0; @@ -124,11 +137,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj, void *NPY_UNUSED(ignored)) npy_intp upper_offset = 0; Py_buffer view; - if (obj == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array strides"); - return -1; - } if (!PyArray_OptionalIntpConverter(obj, &newstrides) || newstrides.len == -1) { PyErr_SetString(PyExc_TypeError, "invalid strides"); diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index d427ac0399a2..bb21d79c472d 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -5,6 +5,7 @@ import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational +from numpy.lib import stride_tricks from numpy.testing import ( HAS_REFCOUNT, assert_, @@ -558,7 +559,7 @@ def check_copy_result(x, y, ccontig, fcontig, strides=False): def test_contiguous_flags(): a = np.ones((4, 4, 1))[::2, :, :] - a.strides = a.strides[:2] + (-123,) + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index c4acbf9d2d69..7d4875d6d149 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -406,6 +406,13 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index e2d6e6796db4..711c13655b7a 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -21,7 +21,7 @@ class TestHalf: def setup_method(self): # An array of all possible float16 values self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16.dtype = float16 + self.all_f16 = self.all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): @@ -32,7 +32,7 @@ def setup_method(self): self.nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16.dtype = float16 + self.nonan_f16 = self.nonan_f16.view(float16) self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) @@ -218,7 +218,7 @@ def test_half_values(self): 0x0001, 0x8001, 0x0000, 0x8000, 0x7c00, 0xfc00], dtype=uint16) - b.dtype = float16 + b = b.view(dtype=float16) assert_equal(a, b) def test_half_rounding(self): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 81e55deb3daf..0faf35c64b98 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -29,6 +29,7 @@ from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks from numpy.lib.recfunctions import repack_fields from numpy.testing import ( BLAS_SUPPORTS_FPE, @@ -382,7 +383,8 @@ def make_array(size, offset, strides): offset=offset * x.itemsize) except Exception as e: raise RuntimeError(e) - r.strides = strides = strides * x.itemsize + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) @@ -392,24 +394,28 @@ def make_array(size, offset, strides): assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): - arr.strides = strides + with pytest.warns(DeprecationWarning): + arr.strides = strides assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) # Test for offset calculations: - x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] - a.strides = 1 - a[::2].strides = 2 + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 # test 0d arr_0d = np.array(0) - arr_0d.strides = () + with pytest.warns(DeprecationWarning): + arr_0d.strides = () assert_raises(TypeError, set_strides, arr_0d, None) def test_fill(self): @@ -3635,7 +3641,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) @@ -3644,7 +3650,7 @@ def test_ravel(self): a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 - a.strides = strides + a = stride_tricks.as_strided(a, strides=strides) assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) @@ -3657,7 +3663,7 @@ def test_ravel(self): # 1-element tidy strides test: a = np.array([[1]]) - a.strides = (123, 432) + a = stride_tricks.as_strided(a, strides=(123, 432)) if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) @@ -4546,7 +4552,8 @@ def test_datetime64_byteorder(self): original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]') original_byte_reversed = original.copy(order='K') - original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S') + new_dtype = original_byte_reversed.dtype.newbyteorder('S') + original_byte_reversed = original_byte_reversed.view(dtype=new_dtype) original_byte_reversed.byteswap(inplace=True) new = pickle.loads(pickle.dumps(original_byte_reversed)) @@ -8366,10 +8373,13 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): # noqa: B008 + def test_relaxed_strides(self, c=stride_tricks.as_strided( # noqa: B008 + np.ones((1, 10, 10), dtype='i8'), # noqa: B008 + strides=(-1, 80, 8) + ) + ): # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). - c.strides = (-1, 80, 8) # strides need to be fixed at export assert_(memoryview(c).strides == (800, 80, 8)) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index a29a49bfb71a..f71130f16331 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -858,7 +858,7 @@ def test_iter_nbo_align_contig(): # Unaligned input a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] - a.dtype = 'f4' + a = a.view('f4') a[:] = np.arange(6, dtype='f4') assert_(not a.flags.aligned) # Without 'aligned', shouldn't copy @@ -1803,7 +1803,7 @@ def test_iter_buffering(): arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] - a.dtype = 'i4' + a = a.view('i4') a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index fbfa9311a1dc..3d44728aafaa 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -12,6 +12,7 @@ import numpy as np from numpy._utils import asbytes, asunicode from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib.stride_tricks import as_strided from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, @@ -208,7 +209,7 @@ def test_mem_dot(self): # Dummy array to detect bad memory access: _z = np.ones(10) _dummy = np.empty((0, 10)) - z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + z = as_strided(_z, _dummy.shape, _dummy.strides) np.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) # Do the same for the built-in dot: @@ -438,19 +439,16 @@ def test_lexsort_zerolen_custom_strides(self): xs = np.array([], dtype='i8') assert np.lexsort((xs,)).shape[0] == 0 # Works - xs.strides = (16,) + xs = as_strided(xs, strides=(16,)) assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError def test_lexsort_zerolen_custom_strides_2d(self): xs = np.array([], dtype='i8') + xt = as_strided(xs, shape=(0, 2), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 0 - xs.shape = (0, 2) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 0 - - xs.shape = (2, 0) - xs.strides = (16, 16) - assert np.lexsort((xs,), axis=0).shape[0] == 2 + xt = as_strided(xs, shape=(2, 0), strides=(16, 16)) + assert np.lexsort((xt,), axis=0).shape[0] == 2 def test_lexsort_invalid_axis(self): assert_raises(AxisError, np.lexsort, (np.arange(1),), axis=2) @@ -644,7 +642,7 @@ def test_reshape_order(self): def test_reshape_zero_strides(self): # Issue #380, test reshaping of zero strided arrays a = np.ones(1) - a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + a = as_strided(a, shape=(5,), strides=(0,)) assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self): @@ -1654,7 +1652,7 @@ def test_eq_string_and_object_array(self): def test_nonzero_byteswap(self): a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) - a.dtype = np.float32 + a = a.view(np.float32) assert_equal(a.nonzero()[0], [1]) a = a.byteswap() a = a.view(a.dtype.newbyteorder()) @@ -1878,7 +1876,8 @@ def test_alignment_update(self): # Check that alignment flag is updated on stride setting a = np.arange(10) assert_(a.flags.aligned) - a.strides = 3 + with pytest.warns(DeprecationWarning): + a.strides = 3 assert_(not a.flags.aligned) def test_ticket_1770(self): diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index f284eeb74834..36ead97a1aae 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1730,7 +1730,7 @@ def fromregex(file, regexp, dtype, encoding=None): # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) - output.dtype = dtype + output = output.view(dtype) else: output = np.array(seq, dtype=dtype) From 20d034fff6931b97780e9f22172309d81a5e8322 Mon Sep 17 00:00:00 2001 From: Koki Watanabe <56009584+math-hiyoko@users.noreply.github.com> Date: Fri, 20 Jun 2025 22:56:31 +0900 Subject: [PATCH 0148/1718] ENH: np.unique: support hash based unique for string dtype (#28767) * Support NPY_STRING, NPY_UNICODE * unique for NPY_STRING and NPY_UNICODE * fix construct array * remove unneccessary include * refactor * refactoring * comment * feature: unique for NPY_VSTRING * refactoring * remove unneccessary include * add test * add error message * linter * linter * reserve bucket * remove emoji from testcase * fix testcase * remove error * fix testcase * fix testcase name * use basic_string * fix testcase * add ValueError * fix testcase * fix memory error * remove multibyte char * refactoring * add multibyte char * refactoring * fix memory error * fix GIL * fix strlen * remove PyArray_GETPTR1 * refactoring * refactoring * use optional * refactoring * refactoring * refactoring * refactoring * fix comment * linter * add doc * DOC: fix * DOC: fix format * MNT: refactoring * MNT: refactoring * ENH: Store pointers to strings in the set instead of the strings themselves. * FIX: length in memcmp * ENH: refactoring * DOC: 49sec -> 34sec * Update numpy/lib/_arraysetops_impl.py Co-authored-by: Nathan Goldbaum * DOC: Mention that hash-based np.unique returns unsorted strings * ENH: support medium and long vstrings * FIX: comment * ENH: use RAII wrapper * FIX: error handling of string packing * FIX: error handling of string packing * FIX: change default bucket size * FIX: include * FIX: cast * ENH: support equal_nan=False * FIX: function equal * FIX: check the case if pack_status douesn't return NULL * FIX: check the case if pack_status douesn't return NULL * FIX: stderr * ENH: METH_VARARGS -> METH_FASTCALL * FIX: log * FIX: release allocator * FIX: comment * FIX: delete log * ENH: implemented FNV-1a as hash function * bool -> npy_bool * FIX: cast * 34sec -> 35.1sec * fix: lint * fix: cast using const void * * fix: fix fnv1a hash * fix: lint * 35.1sec -> 33.5sec * enh: define macro HASH_TABLE_INITIAL_BUCKETS * enh: error handling of NpyString_load * enh: delete comments on GIL * fix: PyErr_SetString when NpyString_load failed * fix: PyErr_SetString -> npy_gil_error --------- Co-authored-by: Nathan Goldbaum --- doc/release/upcoming_changes/28767.change.rst | 10 + .../upcoming_changes/28767.performance.rst | 10 + numpy/_core/meson.build | 1 + numpy/_core/src/multiarray/fnv.c | 85 ++++ numpy/_core/src/multiarray/fnv.h | 26 ++ numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/unique.cpp | 368 ++++++++++++++---- numpy/_core/src/multiarray/unique.h | 3 +- numpy/lib/_arraysetops_impl.py | 3 +- numpy/lib/tests/test_arraysetops.py | 186 ++++++++- 10 files changed, 605 insertions(+), 89 deletions(-) create mode 100644 doc/release/upcoming_changes/28767.change.rst create mode 100644 doc/release/upcoming_changes/28767.performance.rst create mode 100644 numpy/_core/src/multiarray/fnv.c create mode 100644 numpy/_core/src/multiarray/fnv.h diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst new file mode 100644 index 000000000000..ec173c3672b0 --- /dev/null +++ b/doc/release/upcoming_changes/28767.change.rst @@ -0,0 +1,10 @@ +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst new file mode 100644 index 000000000000..ef8ac1c3a45d --- /dev/null +++ b/doc/release/upcoming_changes/28767.performance.rst @@ -0,0 +1,10 @@ +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides +an order-of-magnitude speedup on large string arrays. +In an internal benchmark with about 1 billion string elements, +the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method +– about 15× faster for unsorted unique operations on strings. +This improvement greatly reduces the time to find unique values +in very large string datasets. diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index cd46a20b0246..6098986618e4 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1207,6 +1207,7 @@ src_multiarray = multiarray_gen_headers + [ # Remove this `arm64_exports.c` file once scipy macos arm64 build correctly # links to the arm64 npymath library, see gh-22673 'src/npymath/arm64_exports.c', + 'src/multiarray/fnv.c', ] src_umath = umath_gen_headers + [ diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c new file mode 100644 index 000000000000..2b7848519e61 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.c @@ -0,0 +1,85 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include "numpy/npy_common.h" +#include "fnv.h" + + +#define FNV1A_32_INIT ((npy_uint32)0x811c9dc5) +#define FNV1A_64_INIT ((npy_uint64)0xcbf29ce484222325ULL) + +/* + Compute a 32-bit FNV-1a hash of buffer + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_32a.c +*/ +npy_uint32 +npy_fnv1a_32(const void *buf, size_t len, npy_uint32 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint32)*bp++; + + /* multiply by the 32 bit FNV magic prime */ + /* hval *= 0x01000193; */ + hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24); + } + + return hval; +} + +/* + Compute a 64-bit FNV-1a hash of the given data + original implementation from: + https://github.com/lcn2/fnv/blob/b7fcbee95538ee6a15744e756e7e7f1c02862cb0/hash_64a.c +*/ +npy_uint64 +npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) +{ + const unsigned char *bp = (const unsigned char *)buf; /* start of buffer */ + const unsigned char *be = bp + len; /* beyond end of buffer */ + + /* + FNV-1a hash each octet in the buffer + */ + while (bp < be) { + + /* xor the bottom with the current octet */ + hval ^= (npy_uint64)*bp++; + + /* multiply by the 64 bit FNV magic prime */ + /* hval *= 0x100000001b3ULL; */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + + return hval; +} + +/* + * Compute a size_t FNV-1a hash of the given data + * This will use 32-bit or 64-bit hash depending on the size of size_t + */ +size_t +npy_fnv1a(const void *buf, size_t len) +{ +#if NPY_SIZEOF_SIZE_T == 8 + return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); +#else /* NPY_SIZEOF_SIZE_T == 4 */ + return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); +#endif +} diff --git a/numpy/_core/src/multiarray/fnv.h b/numpy/_core/src/multiarray/fnv.h new file mode 100644 index 000000000000..c76f54a645b9 --- /dev/null +++ b/numpy/_core/src/multiarray/fnv.h @@ -0,0 +1,26 @@ +/* + FNV-1a hash algorithm implementation + Based on the implementation from: + https://github.com/lcn2/fnv +*/ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ + + +/* + Compute a size_t FNV-1a hash of the given data + This will use 32-bit or 64-bit hash depending on the size of size_t + + Parameters: + ----------- + buf - pointer to the data to be hashed + len - length of the data in bytes + + Returns: + ----------- + size_t hash value +*/ +size_t npy_fnv1a(const void *buf, size_t len); + +#endif // NUMPY_CORE_INCLUDE_NUMPY_MULTIARRAY_FNV_H_ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d4766b5af7b4..e80c6c0cd45c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4579,7 +4579,7 @@ static struct PyMethodDef array_module_methods[] = { {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_unique_hash", (PyCFunction)array__unique_hash, - METH_O, "Collect unique values via a hash map."}, + METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index f36acfdef49a..636f1ef0137c 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,21 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#define HASH_TABLE_INITIAL_BUCKETS 1024 #include -#include +#include +#include #include +#include #include #include "numpy/arrayobject.h" +#include "gil_utils.h" +extern "C" { + #include "fnv.h" + #include "npy_argparse.h" +} // This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. // Adapted from https://stackoverflow.com/a/25510879/2536294 @@ -18,77 +26,128 @@ struct FinalAction { private: F clean_; }; - template FinalAction finally(F f) { return FinalAction(f); } -template +template static PyObject* -unique(PyArrayObject *self) +unique_integer(PyArrayObject *self, npy_bool equal_nan) { - /* This function takes a numpy array and returns a numpy array containing - the unique values. - - It assumes the numpy array includes data that can be viewed as unsigned integers - of a certain size (sizeof(T)). - - It doesn't need to know the actual type, since it needs to find unique values - among binary representations of the input data. This means it won't apply to - custom or complicated dtypes or string values. + /* + * Returns a new NumPy array containing the unique values of the input array of integer. + * This function uses hashing to identify uniqueness efficiently. */ NPY_ALLOW_C_API_DEF; - std::unordered_set hashset; - - NpyIter *iter = NpyIter_New(self, NPY_ITER_READONLY | - NPY_ITER_EXTERNAL_LOOP | - NPY_ITER_REFS_OK | - NPY_ITER_ZEROSIZE_OK | - NPY_ITER_GROWINNER, - NPY_KEEPORDER, NPY_NO_CASTING, - NULL); - // Making sure the iterator is deallocated when the function returns, with - // or w/o an exception - auto iter_dealloc = finally([&]() { NpyIter_Deallocate(iter); }); - if (iter == NULL) { - return NULL; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset(std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS)); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(*(T *)idata); } - NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL); - if (iternext == NULL) { + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); + + if (res_obj == NULL) { return NULL; } - char **dataptr = NpyIter_GetDataPtrArray(iter); - npy_intp *strideptr = NpyIter_GetInnerStrideArray(iter); - npy_intp *innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); - - // release the GIL - PyThreadState *_save; - _save = PyEval_SaveThread(); - // Making sure the GIL is re-acquired when the function returns, with - // or w/o an exception - auto grab_gil = finally([&]() { PyEval_RestoreThread(_save); }); - // first we put the data in a hash map - - if (NpyIter_GetIterSize(iter) > 0) { - do { - char* data = *dataptr; - npy_intp stride = *strideptr; - npy_intp count = *innersizeptr; - - while (count--) { - hashset.insert(*((T *) data)); - data += stride; - } - } while (iternext(iter)); + NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + *(T *)odata = *it; } - npy_intp length = hashset.size(); + return res_obj; +} +template +static PyObject* +unique_string(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; PyArray_Descr *descr = PyArray_DESCR(self); Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the string + npy_intp itemsize = descr->elsize; + npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { + return npy_fnv1a(value, num_chars * sizeof(T)); + }; + auto equal = [itemsize](const T *lhs, const T *rhs) -> bool { + return std::memcmp(lhs, rhs, itemsize) == 0; + }; + + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert((T *)idata); + } + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; PyObject *res_obj = PyArray_NewFromDescr( &PyArray_Type, descr, @@ -100,18 +159,147 @@ unique(PyArrayObject *self) NPY_ARRAY_WRITEABLE, // flags NULL // obj ); + + if (res_obj == NULL) { + return NULL; + } NPY_DISABLE_C_API; + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } + + return res_obj; +} + +static PyObject* +unique_vstring(PyArrayObject *self, npy_bool equal_nan) +{ + /* + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ + NPY_ALLOW_C_API_DEF; + NPY_ALLOW_C_API; + PyArray_Descr *descr = PyArray_DESCR(self); + Py_INCREF(descr); + NPY_DISABLE_C_API; + + PyThreadState *_save1 = PyEval_SaveThread(); + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + + // variables for the vstring + npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); + auto hash = [equal_nan](const npy_static_string *value) -> size_t { + if (value->buf == NULL) { + if (equal_nan) { + return 0; + } else { + return std::hash{}(value); + } + } + return npy_fnv1a(value->buf, value->size * sizeof(char)); + }; + auto equal = [equal_nan](const npy_static_string *lhs, const npy_static_string *rhs) -> bool { + if (lhs->buf == NULL && rhs->buf == NULL) { + if (equal_nan) { + return true; + } else { + return lhs == rhs; + } + } + if (lhs->buf == NULL || rhs->buf == NULL) { + return false; + } + if (lhs->size != rhs->size) { + return false; + } + return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; + }; + // Reserve hashset capacity in advance to minimize reallocations and collisions. + // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: + // - Reserving for all elements (isize) may over-allocate when there are few unique values. + // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). + // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); + + // Input array is one-dimensional, enabling efficient iteration using strides. + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + std::vector unpacked_strings(isize, {0, NULL}); + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; + int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); + if (is_null == -1) { + npy_gil_error(PyExc_RuntimeError, + "Failed to load string from packed static string. "); + return NULL; + } + hashset.insert(&unpacked_strings[i]); + } + + NpyString_release_allocator(in_allocator); + + npy_intp length = hashset.size(); + + PyEval_RestoreThread(_save1); + NPY_ALLOW_C_API; + PyObject *res_obj = PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + // This flag is needed to be able to call .sort on it. + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ); if (res_obj == NULL) { return NULL; } + PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); + Py_INCREF(res_descr); + NPY_DISABLE_C_API; + + PyThreadState *_save2 = PyEval_SaveThread(); + auto save2_dealloc = finally([&]() { + PyEval_RestoreThread(_save2); + }); + + npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); + auto out_allocator_dealloc = finally([&]() { + NpyString_release_allocator(out_allocator); + }); - // then we iterate through the map's keys to get the unique values - T* data = (T *)PyArray_DATA((PyArrayObject *)res_obj); - auto it = hashset.begin(); - size_t i = 0; - for (; it != hashset.end(); it++, i++) { - data[i] = *it; + char *odata = PyArray_BYTES((PyArrayObject *)res_obj); + npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; + // Output array is one-dimensional, enabling efficient iteration using strides. + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(out_allocator, packed_string); + } else { + pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } return res_obj; @@ -119,27 +307,30 @@ unique(PyArrayObject *self) // this map contains the functions used for each item size. -typedef std::function function_type; +typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique}, - {NPY_UBYTE, unique}, - {NPY_SHORT, unique}, - {NPY_USHORT, unique}, - {NPY_INT, unique}, - {NPY_UINT, unique}, - {NPY_LONG, unique}, - {NPY_ULONG, unique}, - {NPY_LONGLONG, unique}, - {NPY_ULONGLONG, unique}, - {NPY_INT8, unique}, - {NPY_INT16, unique}, - {NPY_INT32, unique}, - {NPY_INT64, unique}, - {NPY_UINT8, unique}, - {NPY_UINT16, unique}, - {NPY_UINT32, unique}, - {NPY_UINT64, unique}, - {NPY_DATETIME, unique}, + {NPY_BYTE, unique_integer}, + {NPY_UBYTE, unique_integer}, + {NPY_SHORT, unique_integer}, + {NPY_USHORT, unique_integer}, + {NPY_INT, unique_integer}, + {NPY_UINT, unique_integer}, + {NPY_LONG, unique_integer}, + {NPY_ULONG, unique_integer}, + {NPY_LONGLONG, unique_integer}, + {NPY_ULONGLONG, unique_integer}, + {NPY_INT8, unique_integer}, + {NPY_INT16, unique_integer}, + {NPY_INT32, unique_integer}, + {NPY_INT64, unique_integer}, + {NPY_UINT8, unique_integer}, + {NPY_UINT16, unique_integer}, + {NPY_UINT32, unique_integer}, + {NPY_UINT64, unique_integer}, + {NPY_DATETIME, unique_integer}, + {NPY_STRING, unique_string}, + {NPY_UNICODE, unique_string}, + {NPY_VSTRING, unique_vstring}, }; @@ -154,14 +345,21 @@ std::unordered_map unique_funcs = { * type is unsupported or `NULL` with an error set. */ extern "C" NPY_NO_EXPORT PyObject * -array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) +array__unique_hash(PyObject *NPY_UNUSED(module), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - if (!PyArray_Check(arr_obj)) { - PyErr_SetString(PyExc_TypeError, - "_unique_hash() requires a NumPy array input."); + PyArrayObject *arr = NULL; + npy_bool equal_nan = NPY_TRUE; // default to True + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, + "arr", &PyArray_Converter, &arr, + "|equal_nan", &PyArray_BoolConverter, &equal_nan, + NULL, NULL, NULL + ) < 0 + ) { return NULL; } - PyArrayObject *arr = (PyArrayObject *)arr_obj; try { auto type = PyArray_TYPE(arr); @@ -170,7 +368,7 @@ array__unique_hash(PyObject *NPY_UNUSED(module), PyObject *arr_obj) Py_RETURN_NOTIMPLEMENTED; } - return unique_funcs[type](arr); + return unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); diff --git a/numpy/_core/src/multiarray/unique.h b/numpy/_core/src/multiarray/unique.h index 3e258405e8f4..7b3fb143ada4 100644 --- a/numpy/_core/src/multiarray/unique.h +++ b/numpy/_core/src/multiarray/unique.h @@ -5,7 +5,8 @@ extern "C" { #endif -PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), PyObject *args); +PyObject* array__unique_hash(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #ifdef __cplusplus } diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index ef0739ba486f..c4788385b924 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -368,7 +368,8 @@ def _unique1d(ar, return_index=False, return_inverse=False, conv = _array_converter(ar) ar_, = conv - if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: if sorted: hash_unique.sort() # We wrap the result back in case it was a subclass of numpy.ndarray. diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 7865e1b16ee9..b3e2bfa279b0 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -5,6 +5,7 @@ import numpy as np from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType from numpy.exceptions import AxisError from numpy.testing import ( assert_array_equal, @@ -813,7 +814,9 @@ def test_unique_1d(self): def test_unique_zero_sized(self): # test for zero-sized arrays - for dt in self.get_types(): + types = self.get_types() + types.extend('SU') + for dt in types: a = np.array([], dt) b = np.array([], dt) i1 = np.array([], np.int64) @@ -838,6 +841,187 @@ class Subclass(np.ndarray): bb = Subclass(b.shape, dtype=dt, buffer=b) self.check_all(aa, bb, i1, i2, c, dt) + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'café', 'cafe', 'café', 'naïve', 'naive', + 'résumé', 'naïve', 'resume', 'résumé', + ] + unq_sorted = ['cafe', 'café', 'naive', 'naïve', 'resume', 'résumé'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) def test_unsupported_hash_based(self, arg): """These currently never use the hash-based solution. However, From 7c91551b85650c5465d21956acf66a47d9d0d02b Mon Sep 17 00:00:00 2001 From: specsy Date: Fri, 20 Jun 2025 20:29:10 +0530 Subject: [PATCH 0149/1718] DOC: Update CONTRIBUTING.rst (#28158) * Update CONTRIBUTING.rst fixes #19778 Updating the contibution section so that contributors avoid doing mkstakes while asking questions, instead they focus on doing contribution and work on project right after. * Update CONTRIBUTING.rst Shortened the length of the sentence. * Update CONTRIBUTING.rst --- CONTRIBUTING.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6e019983a0a2..0919790c65d1 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -7,8 +7,9 @@ Whether you're new to open source or experienced, your contributions help us grow. Pull requests (PRs) are always welcome, but making a PR is just the -start. Please respond to comments and requests for changes to help -move the process forward. Please follow our +start. Please respond to comments and requests for changes to help move the process forward. +Skip asking for an issue to be assigned to you on GitHub—send in your PR, explain what you did and ask for a review. It makes collaboration and support much easier. +Please follow our `Code of Conduct `__, which applies to all interactions, including issues and PRs. From e3e875ab478a855d2c4198df652f1dc34fc09f17 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:28:38 +0200 Subject: [PATCH 0150/1718] MAIN: Enable linting with E501 --- numpy/_core/tests/test_api.py | 9 ++++--- numpy/_core/tests/test_shape_base.py | 9 ++++--- numpy/lib/tests/test_function_base.py | 36 ++++++++++++++++----------- ruff.toml | 4 +-- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index bb21d79c472d..da4a8f423bc5 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -591,11 +591,12 @@ def check_contig(a, ccontig, fcontig): def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1, 2, 3)], dtype='u4,u4,u4') - b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) @pytest.mark.parametrize(["shape", "fill_value", "expected_output"], [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 8de24278fc5d..9a6febaf51f0 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,13 +294,15 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="only problematic on 64bit platforms") @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) max_int = np.iinfo(np.intc).max arrs = (a,) * (max_int + 1) - msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") with pytest.raises(ValueError, match=msg): np.concatenate(arrs) @@ -379,7 +381,8 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif(IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython") def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f2dba193c849..8f79e3d88b8c 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -65,6 +65,7 @@ suppress_warnings, ) +np_floats = [np.half, np.single, np.double, np.longdouble] def get_mat(n): data = np.arange(n) @@ -309,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -2520,7 +2521,7 @@ def test_extreme(self): assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) assert_(np.all(np.abs(c) <= 1.0)) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_corrcoef_dtype(self, test_type): cast_A = self.A.astype(test_type) res = corrcoef(cast_A, dtype=test_type) @@ -2626,7 +2627,7 @@ def test_unit_fweights_and_aweights(self): aweights=self.unit_weights), self.res1) - @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + @pytest.mark.parametrize("test_type", np_floats) def test_cov_dtype(self, test_type): cast_x1 = self.x1.astype(test_type) res = cov(cast_x1, dtype=test_type) @@ -2648,7 +2649,8 @@ def test_simple(self): # need at least one test above 8, as the implementation is piecewise A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) - expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) assert_almost_equal(i0(A), expected) assert_almost_equal(i0(-A), expected) @@ -3141,23 +3143,27 @@ def test_non_finite_any_nan(self, sc): def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ - assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ - assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) - assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..70bef8fb095d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,7 +73,6 @@ ignore = [ "test*.py" = ["B015", "B018", "E201", "E714"] "benchmarks/benchmarks/bench_linalg.py" = ["E501"] -"numpy/_core/tests/test_api.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -88,13 +87,12 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_shape_base.py" = ["E501"] "numpy/_core/tests/test_simd*.py" = ["E501"] "numpy/_core/tests/test_strings.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -"numpy/lib/tests/test_function_base.py" = ["E501"] +#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] From dea4451c945f878f20402ebf2bc8261b9c000e59 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:30:38 +0200 Subject: [PATCH 0151/1718] MAIN: Enable linting with E501 --- numpy/lib/tests/test_io.py | 9 ++++++--- ruff.toml | 3 +-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 79fca0dd690b..48f579d7ddc7 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -645,7 +645,8 @@ def check_large_zip(memoryerror_raised): raise MemoryError("Child process raised a MemoryError exception") # -9 indicates a SIGKILL, probably an OOM. if p.exitcode == -9: - pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) assert p.exitcode == 0 class LoadTxtBase: @@ -1673,7 +1674,8 @@ def test_dtype_with_converters_and_usecols(self): conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv, encoding="bytes") - control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', @@ -1886,7 +1888,8 @@ def test_user_missing_values(self): # basekwargs['dtype'] = mdtype test = np.genfromtxt(TextIO(data), - missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) control = ma.array([(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)], mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], diff --git a/ruff.toml b/ruff.toml index 70bef8fb095d..75a2f54a6734 100644 --- a/ruff.toml +++ b/ruff.toml @@ -92,9 +92,8 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] -#"numpy/lib/tests/test_function_base.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -"numpy/lib/tests/test_io.py" = ["E501"] +#"numpy/lib/tests/test_io.py" = ["E501"] "numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] From 09eb6b78d6fbe017702d66c08057a5304dd38c9f Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 20 Jun 2025 21:33:43 +0200 Subject: [PATCH 0152/1718] MAIN: Enable linting with E501 --- numpy/lib/tests/test_polynomial.py | 15 ++++++++++----- ruff.toml | 2 -- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index c173ac321d74..65ad5c854639 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -61,7 +61,8 @@ def test_poly1d_math(self): assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) assert_equal(p + q, np.poly1d([4., 4., 4.])) assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) assert_equal(p.deriv(), np.poly1d([2., 2.])) @@ -131,12 +132,16 @@ def test_roots(self): for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1.01, i]) res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) @@ -249,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] + assert_( (p2.coeffs == expected).all()) def test_zero_dims(self): try: diff --git a/ruff.toml b/ruff.toml index 75a2f54a6734..a50900937073 100644 --- a/ruff.toml +++ b/ruff.toml @@ -93,8 +93,6 @@ ignore = [ "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] -#"numpy/lib/tests/test_io.py" = ["E501"] -"numpy/lib/tests/test_polynomial.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] From 1b3a0d9385aa1ded43885d966bb8733da839762f Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Sat, 21 Jun 2025 00:32:00 +0000 Subject: [PATCH 0153/1718] DOC: Fix some markup errors --- doc/source/f2py/buildtools/distutils-to-meson.rst | 2 +- doc/source/reference/random/multithreading.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index bf5da973e9fa..b24638e62239 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -117,7 +117,7 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\ :sub:`LIBRARY`\ \ :sub:`PATH`\ | Library file locations (Unix) | + | LD\_LIBRARY\_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 17c6a515cdbc..73d2fc9ee5ad 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,7 +9,7 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of:mod:`concurrent.futures` to fill an array using +This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. From 124ac8d41f2d913cbac1a05452cdae3d80d7e789 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 21 Jun 2025 07:40:15 -0600 Subject: [PATCH 0154/1718] MAINT: Update main after 2.1.0 release. - Add 2.3.1-notes.rst - Add 2.3.1-changelog.rst - Update release.rst [skip cirrus] [skip azp] [skip actions] --- doc/changelog/2.3.1-changelog.rst | 34 +++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.1-notes.rst | 53 ++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 doc/changelog/2.3.1-changelog.rst create mode 100644 doc/source/release/2.3.1-notes.rst diff --git a/doc/changelog/2.3.1-changelog.rst b/doc/changelog/2.3.1-changelog.rst new file mode 100644 index 000000000000..a1c840f8beda --- /dev/null +++ b/doc/changelog/2.3.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses unininitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c6a853b06f5..59e6dd07b002 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.1 2.3.0 2.2.6 2.2.5 diff --git a/doc/source/release/2.3.1-notes.rst b/doc/source/release/2.3.1-notes.rst new file mode 100644 index 000000000000..d8193f07671c --- /dev/null +++ b/doc/source/release/2.3.1-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.1 Release Notes +========================= + +The NumPy 2.3.1 release is a patch release with several bug fixes, annotation +improvements, and better support for OpenBSD. Highlights are: + +- Fix bug in ``matmul`` for non-contiguous out kwarg parameter +- Fix for Accelerate runtime warnings on M4 hardware +- Fix new in NumPy 2.3.0 ``np.vectorize`` casting errors +- Improved support of cpu features for FreeBSD and OpenBSD + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Brad Smith + +* Charles Harris +* Developer-Ecosystem-Engineering +* François Rozet +* Joren Hammudoglu +* Matti Picus +* Mugundan Selvanayagam +* Nathan Goldbaum +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#29140 `__: MAINT: Prepare 2.3.x for further development +* `#29191 `__: BUG: fix matmul with transposed out arg (#29179) +* `#29192 `__: TYP: Backport typing fixes and improvements. +* `#29205 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior (#29196) +* `#29222 `__: TYP: Backport typing fixes +* `#29233 `__: BUG: avoid negating unsigned integers in resize implementation... +* `#29234 `__: TST: Fix test that uses uninitialized memory (#29232) +* `#29235 `__: BUG: Address interaction between SME and FPSR (#29223) +* `#29237 `__: BUG: Enforce integer limitation in concatenate (#29231) +* `#29238 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29241 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29242 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. + From 1fefc5c6b767e86c2642641e7ba7a22ab0cf554b Mon Sep 17 00:00:00 2001 From: Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Date: Mon, 23 Jun 2025 23:26:14 +0530 Subject: [PATCH 0155/1718] DOC: Clarify dtype argument for __array__ in custom container guide (#29254) * DOC: Clarify dtype argument for __array__ in custom container guide --- doc/source/user/basics.dispatch.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index ae53995a3917..117d60f85467 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -46,6 +46,21 @@ array([[1., 0., 0., 0., 0.], [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) +The ``__array__`` method can optionally accept a `dtype` argument. If provided, +this argument specifies the desired data type for the resulting NumPy array. +Your implementation should attempt to convert the data to this `dtype` +if possible. If the conversion is not supported, it's generally best +to fall back to a default type or raise a `TypeError` or `ValueError`. + +Here's an example demonstrating its use with `dtype` specification: + +>>> np.asarray(arr, dtype=np.float32) +array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + If we operate on ``arr`` with a numpy function, numpy will again use the ``__array__`` interface to convert it to an array and then apply the function in the usual way. From 32b43214b4622c336258899744a9d5692c30c689 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 23 Jun 2025 22:23:50 +0200 Subject: [PATCH 0156/1718] Apply suggestions from code review Co-authored-by: Joren Hammudoglu --- numpy/_core/tests/test_shape_base.py | 12 ++++++++---- numpy/lib/tests/test_function_base.py | 24 ++++++++++++------------ numpy/lib/tests/test_polynomial.py | 4 ++-- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 9a6febaf51f0..1b9728e5c006 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -294,8 +294,10 @@ def test_exceptions(self): assert_raises(ValueError, concatenate, ()) @pytest.mark.slow - @pytest.mark.skipif(sys.maxsize < 2**32, - reason="only problematic on 64bit platforms") + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) @requires_memory(2 * np.iinfo(np.intc).max) def test_huge_list_error(self): a = np.array([1]) @@ -381,8 +383,10 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) - @pytest.mark.skipif(IS_PYPY, - reason="PYPY handles sq_concat, nb_add differently than cpython") + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 8f79e3d88b8c..eccf4bcfb019 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -310,7 +310,7 @@ def test_basic(self): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa: E501 + # github.com/scikit-learn/scikit-learn/commit/7842748 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -3145,9 +3145,9 @@ def test_non_finite_inf(self, sc): """ Test that interp between opposite infs gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) # unless the y values are equal assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) @@ -3156,14 +3156,14 @@ def test_non_finite_half_inf_xf(self, sc): """ Test that interp where both axes have a bound at inf gives nan """ inf = np.inf nan = np.nan - assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) - assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) def test_non_finite_half_inf_x(self, sc): """ Test interp where the x axis has a bound at inf """ diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 65ad5c854639..32547f8e6c18 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -254,8 +254,8 @@ def test_complex(self): def test_integ_coeffs(self): p = np.poly1d([3, 2, 1]) p2 = p.integ(3, k=[9, 7, 6]) - expected = [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6] - assert_( (p2.coeffs == expected).all()) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) def test_zero_dims(self): try: From 2d6a4d76062e20e6220401a1609345d433bd6c8b Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:35:57 +0100 Subject: [PATCH 0157/1718] TYP: Type ``MaskedArray.__{mul,rmul}__`` (#29265) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 86 +++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 97 +++++++++++++++++++++++++++ 3 files changed, 183 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2e9dea06ce6a..9fae513bdf39 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3007,6 +3007,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__mul__` @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3048,6 +3049,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index de6db7873faa..6c0de39594dd 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -658,8 +658,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __mul__(self, other): ... - def __rmul__(self, other): ... + # Keep in sync with `ndarray.__mul__` + @overload + def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self, other): ... def __rtruediv__(self, other): ... def __floordiv__(self, other): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 97f833b6a488..60ee04218489 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -623,3 +623,100 @@ assert_type(AR_LIKE_c - MAR_o, Any) assert_type(AR_LIKE_td64 - MAR_o, Any) assert_type(AR_LIKE_dt64 - MAR_o, Any) assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] From f8281b92e527564c390f834e3d467651026b27b9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 25 Jun 2025 11:39:05 -0600 Subject: [PATCH 0158/1718] BUG: fix fencepost error in StringDType internals (#29269) This makes all the comparisons with NPY_MEDIUM_STRING_MAX_SIZE use <= consistently: --- .../src/multiarray/stringdtype/static_string.c | 2 +- numpy/_core/tests/test_stringdtype.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 1c29bbb67f7e..89b53bcb8538 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -478,7 +478,7 @@ heap_or_arena_allocate(npy_string_allocator *allocator, if (*flags == 0) { // string isn't previously allocated, so add to existing arena allocation char *ret = arena_malloc(arena, allocator->realloc, sizeof(char) * size); - if (size < NPY_MEDIUM_STRING_MAX_SIZE) { + if (size <= NPY_MEDIUM_STRING_MAX_SIZE) { *flags = NPY_STRING_INITIALIZED; } else { diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 9bab810d4421..1c2b606dedf3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1193,6 +1193,24 @@ def test_growing_strings(dtype): assert_array_equal(arr, uarr) +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + UFUNC_TEST_DATA = [ "hello" * 10, "Ae¢☃€ 😊" * 20, From b89320a514ed121f8b21959307fc778165e53323 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 25 Jun 2025 20:03:33 +0200 Subject: [PATCH 0159/1718] STY: ruff/isort config tweaks - episode 2 (#29185) * DEV: enable ruff/isort ``combine-as-imports`` and ``split-on-trailing-comma`` * STY: run ``ruff check --fix`` to fix the new ``I001`` errors --- numpy/__config__.pyi | 10 +- numpy/__init__.py | 3 +- numpy/_core/_add_newdocs_scalars.py | 3 +- numpy/_core/_asarray.py | 6 +- numpy/_core/_dtype.pyi | 3 +- numpy/_core/_methods.py | 4 +- numpy/_core/_type_aliases.pyi | 3 +- numpy/_core/defchararray.py | 14 +- numpy/_core/defchararray.pyi | 31 ++- numpy/_core/fromnumeric.py | 5 +- numpy/_core/function_base.pyi | 3 +- numpy/_core/getlimits.py | 3 +- numpy/_core/multiarray.pyi | 8 +- numpy/_core/numeric.py | 3 +- numpy/_core/numeric.pyi | 20 +- numpy/_core/numerictypes.pyi | 3 +- numpy/_core/records.py | 3 +- numpy/_core/shape_base.py | 4 +- numpy/_core/strings.py | 28 +- numpy/_core/strings.pyi | 16 +- numpy/_core/tests/test_argparse.py | 2 - numpy/_core/tests/test_custom_dtypes.py | 2 +- numpy/_core/tests/test_indexerrors.py | 5 +- numpy/_core/tests/test_numeric.py | 3 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_stringdtype.py | 3 +- numpy/_core/tests/test_umath.py | 3 +- numpy/_typing/__init__.py | 262 +++++++++--------- numpy/_typing/_callable.pyi | 6 +- numpy/_typing/_dtype_like.py | 9 +- numpy/_utils/__init__.pyi | 3 +- numpy/_utils/_pep440.pyi | 4 +- numpy/ctypeslib/__init__.pyi | 18 -- numpy/ctypeslib/_ctypeslib.pyi | 10 +- numpy/dtypes.pyi | 2 +- numpy/f2py/__init__.pyi | 3 +- numpy/f2py/_backends/_meson.pyi | 3 +- numpy/f2py/auxfuncs.pyi | 3 +- numpy/f2py/crackfortran.pyi | 13 +- numpy/f2py/f2py2e.pyi | 3 +- numpy/f2py/rules.pyi | 3 +- numpy/f2py/symbolic.pyi | 3 +- numpy/f2py/tests/test_kind.py | 2 - numpy/fft/__init__.pyi | 7 +- numpy/fft/_helper.pyi | 3 +- numpy/fft/_pocketfft.pyi | 3 +- numpy/fft/helper.pyi | 3 +- numpy/lib/__init__.pyi | 52 ++-- numpy/lib/_arraypad_impl.pyi | 11 +- numpy/lib/_arraysetops_impl.pyi | 11 +- numpy/lib/_function_base_impl.py | 11 +- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_histograms_impl.pyi | 14 +- numpy/lib/_index_tricks_impl.pyi | 13 +- numpy/lib/_nanfunctions_impl.pyi | 6 +- numpy/lib/_npyio_impl.pyi | 2 +- numpy/lib/_polynomial_impl.pyi | 4 +- numpy/lib/_twodim_base_impl.pyi | 10 +- numpy/lib/_type_check_impl.pyi | 3 +- numpy/lib/array_utils.pyi | 6 - numpy/lib/format.pyi | 42 --- numpy/lib/introspect.py | 3 +- numpy/lib/mixins.pyi | 3 +- numpy/lib/npyio.pyi | 4 - numpy/lib/scimath.pyi | 18 -- numpy/lib/stride_tricks.pyi | 2 - numpy/lib/tests/test__iotools.py | 7 +- numpy/linalg/__init__.pyi | 4 +- numpy/linalg/_linalg.py | 30 +- numpy/linalg/_linalg.pyi | 2 +- numpy/linalg/_umath_linalg.pyi | 3 +- numpy/ma/core.py | 2 +- numpy/ma/extras.py | 3 +- numpy/ma/tests/test_mrecords.py | 14 +- numpy/ma/tests/test_old_ma.py | 6 +- numpy/matrixlib/tests/test_matrix_linalg.py | 2 +- numpy/polynomial/chebyshev.pyi | 3 +- numpy/polynomial/hermite.pyi | 3 +- numpy/polynomial/hermite_e.pyi | 3 +- numpy/polynomial/laguerre.pyi | 3 +- numpy/polynomial/legendre.pyi | 3 +- numpy/polynomial/polynomial.pyi | 3 +- numpy/polynomial/polyutils.pyi | 9 +- numpy/polynomial/tests/test_chebyshev.py | 7 +- numpy/polynomial/tests/test_classes.py | 7 +- numpy/polynomial/tests/test_hermite.py | 7 +- numpy/polynomial/tests/test_hermite_e.py | 7 +- numpy/polynomial/tests/test_laguerre.py | 7 +- numpy/polynomial/tests/test_legendre.py | 7 +- numpy/polynomial/tests/test_polyutils.py | 7 +- .../tests/test_randomstate_regression.py | 6 +- numpy/random/tests/test_regression.py | 6 +- numpy/testing/_private/utils.pyi | 2 +- numpy/tests/test_reloading.py | 8 +- numpy/tests/test_scripts.py | 3 +- ruff.toml | 2 + 96 files changed, 352 insertions(+), 601 deletions(-) diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index b59bdcd252b6..21e8b01fdd96 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -1,7 +1,13 @@ from enum import Enum from types import ModuleType -from typing import Final, NotRequired, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) _CompilerConfigDictValue = TypedDict( "_CompilerConfigDictValue", diff --git a/numpy/__init__.py b/numpy/__init__.py index aadc1fab3407..d6702bba4622 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -454,8 +454,7 @@ pass del ta - from . import lib - from . import matrixlib as _mat + from . import lib, matrixlib as _mat from .lib import scimath as emath from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 96170d80c7c9..9d4cb48825e0 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -6,8 +6,7 @@ import os import sys -from numpy._core import dtype -from numpy._core import numerictypes as _numerictypes +from numpy._core import dtype, numerictypes as _numerictypes from numpy._core.function_base import add_newdoc ############################################################################## diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 613c5cf57060..edaff5222f69 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -4,11 +4,7 @@ `require` fits this category despite its name not matching this pattern. """ from .multiarray import array, asanyarray -from .overrides import ( - array_function_dispatch, - finalize_array_function_like, - set_module, -) +from .overrides import array_function_dispatch, finalize_array_function_like, set_module __all__ = ["require"] diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index adb38583783f..28adecf4ad2f 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,5 +1,4 @@ -from typing import Final, TypeAlias, TypedDict, overload, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only from typing_extensions import ReadOnly, TypeVar import numpy as np diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 21ad7900016b..a3d5b02a2a14 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -9,9 +9,7 @@ from contextlib import nullcontext import numpy as np -from numpy._core import multiarray as mu -from numpy._core import numerictypes as nt -from numpy._core import umath as um +from numpy._core import multiarray as mu, numerictypes as nt, umath as um from numpy._core.multiarray import asanyarray from numpy._globals import _NoValue diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3c9dac7a1202..3d57e8135378 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,6 +1,5 @@ from collections.abc import Collection -from typing import Final, TypeAlias, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index bde8921f5504..0378e976254d 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -22,31 +22,19 @@ from numpy._core.multiarray import compare_chararrays from numpy._core.strings import ( _join as join, -) -from numpy._core.strings import ( _rsplit as rsplit, -) -from numpy._core.strings import ( _split as split, -) -from numpy._core.strings import ( _splitlines as splitlines, ) from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, -) -from numpy.strings import ( partition as strings_partition, -) -from numpy.strings import ( rpartition as strings_rpartition, ) -from .numeric import array as narray -from .numeric import asarray as asnarray -from .numeric import ndarray +from .numeric import array as narray, asarray as asnarray, ndarray from .numerictypes import bytes_, character, str_ __all__ = [ diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 26a5af432824..25754965c46a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,5 +1,12 @@ -from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Literal as L, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + overload, +) from typing_extensions import TypeVar import numpy as np @@ -14,13 +21,19 @@ from numpy import ( str_, ) from numpy._core.multiarray import compare_chararrays -from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBool_co as b_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBool_co as b_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _Shape, + _ShapeLike, + _SupportsArray, +) __all__ = [ "equal", diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index e20d774d014d..34fe1798f45e 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -8,10 +8,7 @@ import numpy as np from numpy._utils import set_module -from . import _methods, overrides -from . import multiarray as mu -from . import numerictypes as nt -from . import umath as um +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um from ._multiarray_umath import _array_converter from .multiarray import asanyarray, asarray, concatenate diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 600265b1fd0a..19e1238c4e15 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,6 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np from numpy._typing import ( diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index afa2ccebcfd2..a3d0086974b1 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric -from . import numerictypes as ntypes +from . import numeric, numerictypes as ntypes from ._machar import MachAr from .numeric import array, inf, nan from .umath import exp2, isnan, log10, nextafter diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 91ff666688bb..aa9e796da10b 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -6,6 +6,7 @@ from typing import ( Any, ClassVar, Final, + Literal as L, Protocol, SupportsIndex, TypeAlias, @@ -16,9 +17,6 @@ from typing import ( overload, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import CapsuleType import numpy as np @@ -41,6 +39,7 @@ from numpy import ( # type: ignore[attr-defined] count_nonzero, datetime64, dtype, + einsum as c_einsum, flatiter, float64, floating, @@ -61,9 +60,6 @@ from numpy import ( # type: ignore[attr-defined] unsignedinteger, vecdot, ) -from numpy import ( - einsum as c_einsum, -) from numpy._typing import ( ArrayLike, # DTypes diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 964447fa0d8a..4886468f0864 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -10,8 +10,7 @@ import numpy as np from numpy.exceptions import AxisError -from . import multiarray, numerictypes, overrides, shape_base, umath -from . import numerictypes as nt +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath from ._ufunc_config import errstate from .multiarray import ( # noqa: F401 ALLOW_THREADS, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index b54fa856b007..c73a2694df3c 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -3,6 +3,7 @@ from collections.abc import Callable, Sequence from typing import ( Any, Final, + Literal as L, Never, NoReturn, SupportsAbs, @@ -13,7 +14,6 @@ from typing import ( Unpack, overload, ) -from typing import Literal as L import numpy as np from numpy import ( @@ -116,15 +116,15 @@ from .fromnumeric import ( transpose, var, ) -from .multiarray import ALLOW_THREADS as ALLOW_THREADS -from .multiarray import BUFSIZE as BUFSIZE -from .multiarray import CLIP as CLIP -from .multiarray import MAXDIMS as MAXDIMS -from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS -from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT -from .multiarray import RAISE as RAISE -from .multiarray import WRAP as WRAP from .multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, _Array, _ConstructorEmpty, _KwargsEmpty, @@ -156,6 +156,7 @@ from .multiarray import ( ndarray, nditer, nested_iters, + normalize_axis_index as normalize_axis_index, promote_types, putmask, result_type, @@ -164,7 +165,6 @@ from .multiarray import ( where, zeros, ) -from .multiarray import normalize_axis_index as normalize_axis_index from .numerictypes import ( ScalarType, bool, diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index b649b8f91cd1..4f810da6904b 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,6 +1,5 @@ from builtins import bool as py_bool -from typing import Final, TypedDict, type_check_only -from typing import Literal as L +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 39bcf4ba6294..9a6af16e3b23 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -8,8 +8,7 @@ from numpy._utils import set_module -from . import numeric as sb -from . import numerictypes as nt +from . import numeric as sb, numerictypes as nt from .arrayprint import _get_legacy_print_mode # All of the functions allow formats to be a dtype diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index c2a0f0dae789..39de8739db0e 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -5,9 +5,7 @@ import itertools import operator -from . import fromnumeric as _from_nx -from . import numeric as _nx -from . import overrides +from . import fromnumeric as _from_nx, numeric as _nx, overrides from .multiarray import array, asanyarray, normalize_axis_index array_function_dispatch = functools.partial( diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index b4dc1656024f..4d56f1e0c779 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -14,10 +14,8 @@ greater_equal, less, less_equal, - not_equal, -) -from numpy import ( multiply as _multiply_ufunc, + not_equal, ) from numpy._core.multiarray import _vec_string from numpy._core.overrides import array_function_dispatch, set_module @@ -40,6 +38,10 @@ _strip_chars, _strip_whitespace, _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, isalnum, isalpha, isdecimal, @@ -49,28 +51,10 @@ isspace, istitle, isupper, - str_len, -) -from numpy._core.umath import ( - count as _count_ufunc, -) -from numpy._core.umath import ( - endswith as _endswith_ufunc, -) -from numpy._core.umath import ( - find as _find_ufunc, -) -from numpy._core.umath import ( - index as _index_ufunc, -) -from numpy._core.umath import ( rfind as _rfind_ufunc, -) -from numpy._core.umath import ( rindex as _rindex_ufunc, -) -from numpy._core.umath import ( startswith as _startswith_ufunc, + str_len, ) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index b187ce71d25c..1b8bfd84cc3d 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,12 +1,16 @@ from typing import TypeAlias, overload import numpy as np -from numpy._typing import NDArray, _AnyShape, _SupportsArray -from numpy._typing import _ArrayLikeAnyString_co as UST_co -from numpy._typing import _ArrayLikeBytes_co as S_co -from numpy._typing import _ArrayLikeInt_co as i_co -from numpy._typing import _ArrayLikeStr_co as U_co -from numpy._typing import _ArrayLikeString_co as T_co +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, +) __all__ = [ "add", diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index 0c49ec00277e..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -18,8 +18,6 @@ def func(arg1, /, arg2, *, arg3): import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, -) -from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 3336286d8c98..4d2082a949b7 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -5,8 +5,8 @@ import numpy as np from numpy._core._multiarray_umath import ( _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, ) -from numpy._core._multiarray_umath import _get_sfloat_dtype from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index 02110c28356a..70e97dd6428e 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,8 +1,5 @@ import numpy as np -from numpy.testing import ( - assert_raises, - assert_raises_regex, -) +from numpy.testing import assert_raises, assert_raises_regex class TestIndexErrors: diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 65da65ddc9f9..4ead2fc7ec6f 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -6,8 +6,7 @@ from decimal import Decimal import pytest -from hypothesis import given -from hypothesis import strategies as st +from hypothesis import given, strategies as st from hypothesis.extra import numpy as hynp import numpy as np diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index be3ef0459c82..ea78cd9d9f51 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,11 +4,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_almost_equal, - assert_equal, - assert_warns, -) +from numpy.testing import assert_almost_equal, assert_equal, assert_warns class TestFromString: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 1c2b606dedf3..8197822cd0e3 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -8,8 +8,7 @@ import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype -from numpy._core.tests._natype import pd_NA +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 001a7bffbcc8..c54b2ac86bc2 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -12,8 +12,7 @@ import numpy as np import numpy._core.umath as ncu -from numpy._core import _umath_tests as ncu_tests -from numpy._core import sctypes +from numpy._core import _umath_tests as ncu_tests, sctypes from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 16a7eee66ebd..0044749e3dce 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,148 +1,158 @@ """Private counterpart of ``numpy.typing``.""" -from ._array_like import ArrayLike as ArrayLike -from ._array_like import NDArray as NDArray -from ._array_like import _ArrayLike as _ArrayLike -from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co -from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co -from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co -from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co -from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co -from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co -from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co -from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co -from ._array_like import _ArrayLikeInt as _ArrayLikeInt -from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co -from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co -from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co -from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co -from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co -from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co -from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co -from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co -from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence -from ._array_like import _SupportsArray as _SupportsArray -from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc +from ._array_like import ( + ArrayLike as ArrayLike, + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, +) # -from ._char_codes import _BoolCodes as _BoolCodes -from ._char_codes import _ByteCodes as _ByteCodes -from ._char_codes import _BytesCodes as _BytesCodes -from ._char_codes import _CDoubleCodes as _CDoubleCodes -from ._char_codes import _CharacterCodes as _CharacterCodes -from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes -from ._char_codes import _Complex64Codes as _Complex64Codes -from ._char_codes import _Complex128Codes as _Complex128Codes -from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes -from ._char_codes import _CSingleCodes as _CSingleCodes -from ._char_codes import _DoubleCodes as _DoubleCodes -from ._char_codes import _DT64Codes as _DT64Codes -from ._char_codes import _FlexibleCodes as _FlexibleCodes -from ._char_codes import _Float16Codes as _Float16Codes -from ._char_codes import _Float32Codes as _Float32Codes -from ._char_codes import _Float64Codes as _Float64Codes -from ._char_codes import _FloatingCodes as _FloatingCodes -from ._char_codes import _GenericCodes as _GenericCodes -from ._char_codes import _HalfCodes as _HalfCodes -from ._char_codes import _InexactCodes as _InexactCodes -from ._char_codes import _Int8Codes as _Int8Codes -from ._char_codes import _Int16Codes as _Int16Codes -from ._char_codes import _Int32Codes as _Int32Codes -from ._char_codes import _Int64Codes as _Int64Codes -from ._char_codes import _IntCCodes as _IntCCodes -from ._char_codes import _IntCodes as _IntCodes -from ._char_codes import _IntegerCodes as _IntegerCodes -from ._char_codes import _IntPCodes as _IntPCodes -from ._char_codes import _LongCodes as _LongCodes -from ._char_codes import _LongDoubleCodes as _LongDoubleCodes -from ._char_codes import _LongLongCodes as _LongLongCodes -from ._char_codes import _NumberCodes as _NumberCodes -from ._char_codes import _ObjectCodes as _ObjectCodes -from ._char_codes import _ShortCodes as _ShortCodes -from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes -from ._char_codes import _SingleCodes as _SingleCodes -from ._char_codes import _StrCodes as _StrCodes -from ._char_codes import _StringCodes as _StringCodes -from ._char_codes import _TD64Codes as _TD64Codes -from ._char_codes import _UByteCodes as _UByteCodes -from ._char_codes import _UInt8Codes as _UInt8Codes -from ._char_codes import _UInt16Codes as _UInt16Codes -from ._char_codes import _UInt32Codes as _UInt32Codes -from ._char_codes import _UInt64Codes as _UInt64Codes -from ._char_codes import _UIntCCodes as _UIntCCodes -from ._char_codes import _UIntCodes as _UIntCodes -from ._char_codes import _UIntPCodes as _UIntPCodes -from ._char_codes import _ULongCodes as _ULongCodes -from ._char_codes import _ULongLongCodes as _ULongLongCodes -from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes -from ._char_codes import _UShortCodes as _UShortCodes -from ._char_codes import _VoidCodes as _VoidCodes +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _ByteCodes as _ByteCodes, + _BytesCodes as _BytesCodes, + _CDoubleCodes as _CDoubleCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _CSingleCodes as _CSingleCodes, + _DoubleCodes as _DoubleCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _HalfCodes as _HalfCodes, + _InexactCodes as _InexactCodes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _IntCCodes as _IntCCodes, + _IntCodes as _IntCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, + _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, + _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _ShortCodes as _ShortCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _SingleCodes as _SingleCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UByteCodes as _UByteCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _UIntCCodes as _UIntCCodes, + _UIntCodes as _UIntCodes, + _UIntPCodes as _UIntPCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _UShortCodes as _UShortCodes, + _VoidCodes as _VoidCodes, +) # -from ._dtype_like import DTypeLike as DTypeLike -from ._dtype_like import _DTypeLike as _DTypeLike -from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool -from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes -from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex -from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co -from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 -from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat -from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt -from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject -from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr -from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 -from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt -from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid -from ._dtype_like import _SupportsDType as _SupportsDType -from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, +) # -from ._nbit import _NBitByte as _NBitByte -from ._nbit import _NBitDouble as _NBitDouble -from ._nbit import _NBitHalf as _NBitHalf -from ._nbit import _NBitInt as _NBitInt -from ._nbit import _NBitIntC as _NBitIntC -from ._nbit import _NBitIntP as _NBitIntP -from ._nbit import _NBitLong as _NBitLong -from ._nbit import _NBitLongDouble as _NBitLongDouble -from ._nbit import _NBitLongLong as _NBitLongLong -from ._nbit import _NBitShort as _NBitShort -from ._nbit import _NBitSingle as _NBitSingle +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) # from ._nbit_base import ( NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, ) -from ._nbit_base import _8Bit as _8Bit -from ._nbit_base import _16Bit as _16Bit -from ._nbit_base import _32Bit as _32Bit -from ._nbit_base import _64Bit as _64Bit -from ._nbit_base import _96Bit as _96Bit -from ._nbit_base import _128Bit as _128Bit # from ._nested_sequence import _NestedSequence as _NestedSequence # -from ._scalars import _BoolLike_co as _BoolLike_co -from ._scalars import _CharLike_co as _CharLike_co -from ._scalars import _ComplexLike_co as _ComplexLike_co -from ._scalars import _FloatLike_co as _FloatLike_co -from ._scalars import _IntLike_co as _IntLike_co -from ._scalars import _NumberLike_co as _NumberLike_co -from ._scalars import _ScalarLike_co as _ScalarLike_co -from ._scalars import _TD64Like_co as _TD64Like_co -from ._scalars import _UIntLike_co as _UIntLike_co -from ._scalars import _VoidLike_co as _VoidLike_co +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) # -from ._shape import _AnyShape as _AnyShape -from ._shape import _Shape as _Shape -from ._shape import _ShapeLike as _ShapeLike +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike # -from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 -from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 -from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 -from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 21df1d983fe6..1ce7de5bb423 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -38,11 +38,7 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import ( - _BoolLike_co, - _IntLike_co, - _NumberLike_co, -) +from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index c406b3098384..526fb86dd322 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,12 +1,5 @@ from collections.abc import Sequence # noqa: F811 -from typing import ( - Any, - Protocol, - TypeAlias, - TypedDict, - TypeVar, - runtime_checkable, -) +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar, runtime_checkable import numpy as np diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 2ed4e88b3e32..b630777ced99 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -2,8 +2,7 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from ._convertions import asbytes as asbytes -from ._convertions import asunicode as asunicode +from ._convertions import asbytes as asbytes, asunicode as asunicode ### diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 2c338d4e5b14..11ae02e57a59 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -5,14 +5,12 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NamedTuple, TypeVar, final, type_check_only, ) -from typing import ( - Literal as L, -) from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi index adc51da2696c..f088d0281d33 100644 --- a/numpy/ctypeslib/__init__.pyi +++ b/numpy/ctypeslib/__init__.pyi @@ -3,31 +3,13 @@ from ctypes import c_int64 as _c_intp from ._ctypeslib import ( __all__ as __all__, -) -from ._ctypeslib import ( __doc__ as __doc__, -) -from ._ctypeslib import ( _concrete_ndptr as _concrete_ndptr, -) -from ._ctypeslib import ( _ndptr as _ndptr, -) -from ._ctypeslib import ( as_array as as_array, -) -from ._ctypeslib import ( as_ctypes as as_ctypes, -) -from ._ctypeslib import ( as_ctypes_type as as_ctypes_type, -) -from ._ctypeslib import ( c_intp as c_intp, -) -from ._ctypeslib import ( load_library as load_library, -) -from ._ctypeslib import ( ndpointer as ndpointer, ) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index aecb3899bdf5..996b2c0be388 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -4,15 +4,7 @@ import ctypes from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp -from typing import ( - Any, - ClassVar, - Generic, - TypeAlias, - TypeVar, - overload, -) -from typing import Literal as L +from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index f76b08fc28dc..36844a90d31f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -2,6 +2,7 @@ from typing import ( Any, Generic, + Literal as L, LiteralString, Never, NoReturn, @@ -11,7 +12,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar import numpy as np diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index d12f47e80a7d..aa7d5918f7d2 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,5 +1,4 @@ -from .f2py2e import main as main -from .f2py2e import run_main +from .f2py2e import main as main, run_main __all__ = ["get_include", "run_main"] diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 67baf9b76845..bcb2b0304401 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index dfbae5c7d94d..32e381cf9a1c 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,8 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Never, TypeAlias, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload from .cfuncs import errmess diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index c5f4fd7585ba..742d358916a2 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,8 +1,17 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload -from typing import Literal as L +from typing import ( + IO, + Any, + Concatenate, + Final, + Literal as L, + Never, + ParamSpec, + TypeAlias, + overload, +) from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 03aeffc5dcdd..46794e552b41 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -6,8 +6,7 @@ from typing import Any, Final, NotRequired, TypedDict, type_check_only from typing_extensions import TypeVar, override from .__version__ import version -from .auxfuncs import _Bool -from .auxfuncs import outmess as outmess +from .auxfuncs import _Bool, outmess as outmess ### diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 58614060ba87..30439f6b8351 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, TypeAlias -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeAlias from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index e7b14f751dc3..06be2bb16044 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload -from typing import Literal as L +from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ce223a555456..ecf884fb4999 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -5,8 +5,6 @@ from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, -) -from numpy.f2py.crackfortran import ( _selected_real_kind_func as selected_real_kind, ) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 54d0ea8c79b6..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,9 +1,4 @@ -from ._helper import ( - fftfreq, - fftshift, - ifftshift, - rfftfreq, -) +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq from ._pocketfft import ( fft, fft2, diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index d06bda7ad9a9..1ea451ec2eb1 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar, overload -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 215cf14d1395..4f5e5c944b4c 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,6 +1,5 @@ from collections.abc import Sequence -from typing import Literal as L -from typing import TypeAlias +from typing import Literal as L, TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 7cf391a12e1d..5147652172a6 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,5 +1,4 @@ -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from typing_extensions import deprecated import numpy as np diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 6185a494d035..5a85743e4d0f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -3,28 +3,36 @@ from numpy._core.multiarray import add_docstring, tracemalloc_domain # all submodules of `lib` are accessible at runtime through `__getattr__`, # so we implicitly re-export them here -from . import _array_utils_impl as _array_utils_impl -from . import _arraypad_impl as _arraypad_impl -from . import _arraysetops_impl as _arraysetops_impl -from . import _arrayterator_impl as _arrayterator_impl -from . import _datasource as _datasource -from . import _format_impl as _format_impl -from . import _function_base_impl as _function_base_impl -from . import _histograms_impl as _histograms_impl -from . import _index_tricks_impl as _index_tricks_impl -from . import _iotools as _iotools -from . import _nanfunctions_impl as _nanfunctions_impl -from . import _npyio_impl as _npyio_impl -from . import _polynomial_impl as _polynomial_impl -from . import _scimath_impl as _scimath_impl -from . import _shape_base_impl as _shape_base_impl -from . import _stride_tricks_impl as _stride_tricks_impl -from . import _twodim_base_impl as _twodim_base_impl -from . import _type_check_impl as _type_check_impl -from . import _ufunclike_impl as _ufunclike_impl -from . import _utils_impl as _utils_impl -from . import _version as _version -from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 46b43762b87f..6158f299efe2 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,22 +1,15 @@ from typing import ( Any, + Literal as L, Protocol, TypeAlias, TypeVar, overload, type_check_only, ) -from typing import ( - Literal as L, -) from numpy import generic -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeInt, -) +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 4279b809f78e..c0291680a8ec 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,5 +1,12 @@ -from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload -from typing import Literal as L +from typing import ( + Any, + Generic, + Literal as L, + NamedTuple, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 096043e6316f..f5690a829fc6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -10,9 +10,14 @@ from numpy._core import overrides, transpose from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum -from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index -from numpy._core.multiarray import interp as compiled_interp -from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) from numpy._core.numeric import ( absolute, arange, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index cb6e18b53fa4..78947b1b3b46 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -4,6 +4,7 @@ from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, + Literal as L, ParamSpec, Protocol, SupportsIndex, @@ -13,7 +14,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeIs, deprecated import numpy as np diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 5e7afb5e397b..06987dd71e6b 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,17 +1,7 @@ from collections.abc import Sequence -from typing import ( - Any, - SupportsIndex, - TypeAlias, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, SupportsIndex, TypeAlias -from numpy._typing import ( - ArrayLike, - NDArray, -) +from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index c4509d9aa3ad..208a8e868b48 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,7 +1,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload -from typing import Literal as L +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + Self, + SupportsIndex, + final, + overload, +) from typing_extensions import TypeVar, deprecated import numpy as np diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index f39800d58d07..fd5d277cbd7d 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -11,11 +11,7 @@ from numpy._core.fromnumeric import ( sum, var, ) -from numpy.lib._function_base_impl import ( - median, - percentile, - quantile, -) +from numpy.lib._function_base_impl import median, percentile, quantile __all__ = [ "nansum", diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 94f014ccd52d..253fcb0fdf9e 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -14,13 +14,13 @@ from typing import ( Any, ClassVar, Generic, + Literal as L, Protocol, Self, TypeAlias, overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index faf2f01e6a22..3da1a2af60c9 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,5 +1,6 @@ from typing import ( Any, + Literal as L, NoReturn, SupportsIndex, SupportsInt, @@ -7,9 +8,6 @@ from typing import ( TypeVar, overload, ) -from typing import ( - Literal as L, -) import numpy as np from numpy import ( diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 43df38ed5b06..9e70d0a617f6 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,13 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) -from typing import ( - Literal as L, -) +from typing import Any, Literal as L, TypeAlias, TypeVar, overload import numpy as np from numpy import ( diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index b9ab2a02f5f5..5e98ad22ca8b 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Protocol, TypeAlias, overload, type_check_only -from typing import Literal as L +from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only from typing_extensions import TypeVar import numpy as np diff --git a/numpy/lib/array_utils.pyi b/numpy/lib/array_utils.pyi index 8adc3c5b22a6..4b9ebe334a1f 100644 --- a/numpy/lib/array_utils.pyi +++ b/numpy/lib/array_utils.pyi @@ -1,12 +1,6 @@ from ._array_utils_impl import ( __all__ as __all__, -) -from ._array_utils_impl import ( byte_bounds as byte_bounds, -) -from ._array_utils_impl import ( normalize_axis_index as normalize_axis_index, -) -from ._array_utils_impl import ( normalize_axis_tuple as normalize_axis_tuple, ) diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index dd9470e1e6a3..c29e18fe0581 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,66 +1,24 @@ from ._format_impl import ( ARRAY_ALIGN as ARRAY_ALIGN, -) -from ._format_impl import ( BUFFER_SIZE as BUFFER_SIZE, -) -from ._format_impl import ( EXPECTED_KEYS as EXPECTED_KEYS, -) -from ._format_impl import ( GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, -) -from ._format_impl import ( MAGIC_LEN as MAGIC_LEN, -) -from ._format_impl import ( MAGIC_PREFIX as MAGIC_PREFIX, -) -from ._format_impl import ( __all__ as __all__, -) -from ._format_impl import ( __doc__ as __doc__, -) -from ._format_impl import ( descr_to_dtype as descr_to_dtype, -) -from ._format_impl import ( drop_metadata as drop_metadata, -) -from ._format_impl import ( dtype_to_descr as dtype_to_descr, -) -from ._format_impl import ( header_data_from_array_1_0 as header_data_from_array_1_0, -) -from ._format_impl import ( isfileobj as isfileobj, -) -from ._format_impl import ( magic as magic, -) -from ._format_impl import ( open_memmap as open_memmap, -) -from ._format_impl import ( read_array as read_array, -) -from ._format_impl import ( read_array_header_1_0 as read_array_header_1_0, -) -from ._format_impl import ( read_array_header_2_0 as read_array_header_2_0, -) -from ._format_impl import ( read_magic as read_magic, -) -from ._format_impl import ( write_array as write_array, -) -from ._format_impl import ( write_array_header_1_0 as write_array_header_1_0, -) -from ._format_impl import ( write_array_header_2_0 as write_array_header_2_0, ) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index a7e4c93932c6..5526a332fead 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -65,8 +65,7 @@ def opt_func_info(func_name=None, signature=None): """ import re - from numpy._core._multiarray_umath import __cpu_targets_info__ as targets - from numpy._core._multiarray_umath import dtype + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 4f4801feac8f..f23c58fa6586 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,6 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any -from typing import Literal as L +from typing import Any, Literal as L from numpy import ufunc diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index 49fb4d1fc736..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,9 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, -) -from numpy.lib._npyio_impl import ( NpzFile as NpzFile, -) -from numpy.lib._npyio_impl import ( __doc__ as __doc__, ) diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index 253235dfc576..ef2772a33a47 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,30 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, -) -from ._scimath_impl import ( arccos as arccos, -) -from ._scimath_impl import ( arcsin as arcsin, -) -from ._scimath_impl import ( arctanh as arctanh, -) -from ._scimath_impl import ( log as log, -) -from ._scimath_impl import ( log2 as log2, -) -from ._scimath_impl import ( log10 as log10, -) -from ._scimath_impl import ( logn as logn, -) -from ._scimath_impl import ( power as power, -) -from ._scimath_impl import ( sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index 42d8fe9ef43b..eb46f28ae5f4 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,6 +1,4 @@ from numpy.lib._stride_tricks_impl import ( as_strided as as_strided, -) -from numpy.lib._stride_tricks_impl import ( sliding_window_view as sliding_window_view, ) diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 1581ffbe95fd..548a3db2dc07 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -10,12 +10,7 @@ flatten_dtype, has_nested_fields, ) -from numpy.testing import ( - assert_, - assert_allclose, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises class TestLineSplitter: diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 16c8048c1a11..53c115f7bd65 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,6 +1,4 @@ -from . import _linalg as _linalg -from . import _umath_linalg as _umath_linalg -from . import linalg as linalg +from . import _linalg as _linalg, _umath_linalg as _umath_linalg, linalg as linalg from ._linalg import ( cholesky, cond, diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index d7850c4a0204..52a2ffb8f50b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -35,7 +35,9 @@ cdouble, complexfloating, count_nonzero, + cross as _core_cross, csingle, + diagonal as _core_diagonal, divide, dot, double, @@ -49,10 +51,13 @@ intp, isfinite, isnan, + matmul as _core_matmul, + matrix_transpose as _core_matrix_transpose, moveaxis, multiply, newaxis, object_, + outer as _core_outer, overrides, prod, reciprocal, @@ -62,34 +67,11 @@ sqrt, sum, swapaxes, - zeros, -) -from numpy._core import ( - cross as _core_cross, -) -from numpy._core import ( - diagonal as _core_diagonal, -) -from numpy._core import ( - matmul as _core_matmul, -) -from numpy._core import ( - matrix_transpose as _core_matrix_transpose, -) -from numpy._core import ( - outer as _core_outer, -) -from numpy._core import ( tensordot as _core_tensordot, -) -from numpy._core import ( trace as _core_trace, -) -from numpy._core import ( transpose as _core_transpose, -) -from numpy._core import ( vecdot as _core_vecdot, + zeros, ) from numpy._globals import _NoValue from numpy._typing import NDArray diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 3f318a892da5..68bd6d933921 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,6 +1,7 @@ from collections.abc import Iterable from typing import ( Any, + Literal as L, NamedTuple, Never, SupportsIndex, @@ -9,7 +10,6 @@ from typing import ( TypeVar, overload, ) -from typing import Literal as L import numpy as np from numpy import ( diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi index cd07acdb1f9e..f90706a7b159 100644 --- a/numpy/linalg/_umath_linalg.pyi +++ b/numpy/linalg/_umath_linalg.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 05ea373a6a12..5e231fbbadb7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -35,6 +35,7 @@ amax, amin, angle, + array as narray, # noqa: F401 bool_, expand_dims, finfo, # noqa: F401 @@ -42,7 +43,6 @@ iscomplexobj, ndarray, ) -from numpy import array as narray # noqa: F401 from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 094c1e26b191..381cf75f00c3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -23,8 +23,7 @@ import warnings import numpy as np -from numpy import array as nxarray -from numpy import ndarray +from numpy import array as nxarray, ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 0da915101511..e0b0db24904c 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -8,9 +8,11 @@ import numpy as np import numpy.ma as ma -from numpy._core.records import fromarrays as recfromarrays -from numpy._core.records import fromrecords as recfromrecords -from numpy._core.records import recarray +from numpy._core.records import ( + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) from numpy.ma import masked, nomask from numpy.ma.mrecords import ( MaskedRecords, @@ -20,11 +22,7 @@ fromtextfile, mrecarray, ) -from numpy.ma.testutils import ( - assert_, - assert_equal, - assert_equal_records, -) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records from numpy.testing import temppath diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 30c3311798fc..f5d6d3ec27b9 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -83,11 +83,7 @@ where, zeros, ) -from numpy.testing import ( - assert_, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_equal, assert_raises pi = np.pi diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 4e639653bda4..6ce03b36abde 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -12,13 +12,13 @@ PinvCases, SolveCases, SVDCases, + TestQR as _TestQR, _TestNorm2D, _TestNormDoubleBase, _TestNormInt64Base, _TestNormSingleBase, apply_tag, ) -from numpy.linalg.tests.test_linalg import TestQR as _TestQR CASES = [] diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index ec342df0f9d1..85c92816a261 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Self, TypeVar, overload -from typing import Literal as L +from typing import Any, Concatenate, Final, Literal as L, Self, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index f7d907c1b39d..07db43d0c000 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index e8013e66b62f..94ad7248f268 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,5 +1,4 @@ -from typing import Any, Final, TypeVar -from typing import Literal as L +from typing import Any, Final, Literal as L, TypeVar import numpy as np diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 6f67257a607c..a2b84f72bab7 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 35ea2ffd2bf2..d81f3e6f54a4 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index b4c784492b50..2942adf2afa8 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,5 +1,4 @@ -from typing import Final -from typing import Literal as L +from typing import Final, Literal as L import numpy as np diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index c627e16dca1d..65ae4e5503b2 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,12 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - SupportsIndex, - TypeAlias, - TypeVar, - overload, -) +from typing import Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 2cead454631c..14777ac60375 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises def trim(x): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index d10aafbda866..156dccf6ea88 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -18,12 +18,7 @@ Legendre, Polynomial, ) -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises # # fixtures diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 8bd3951f4241..a289ba0b50cc 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises H0 = np.array([1]) H1 = np.array([0, 2]) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 29f34f66380e..233dfb28254a 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises He0 = np.array([1]) He1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 6793b780416d..884f15a9fe8f 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) / 1 L1 = np.array([1, -1]) / 1 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index d0ed7060cbe7..6c87f44ee707 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -6,12 +6,7 @@ import numpy as np import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises L0 = np.array([1]) L1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 96e88b9de1fa..a6f5e3990b6b 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -3,12 +3,7 @@ """ import numpy as np import numpy.polynomial.polyutils as pu -from numpy.testing import ( - assert_, - assert_almost_equal, - assert_equal, - assert_raises, -) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises class TestMisc: diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 6ccc6180657c..e71be8acd981 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -4,11 +4,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index 39b7d8c719ac..de52582c2b56 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -2,11 +2,7 @@ import numpy as np from numpy import random -from numpy.testing import ( - assert_, - assert_array_equal, - assert_raises, -) +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 59a7539b69f1..d5584e511796 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -14,6 +14,7 @@ from typing import ( ClassVar, Final, Generic, + Literal as L, NoReturn, ParamSpec, Self, @@ -23,7 +24,6 @@ from typing import ( overload, type_check_only, ) -from typing import Literal as L from typing_extensions import TypeVar from unittest.case import SkipTest diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 3e6ded326941..9787bcbbc101 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,13 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import ( - IS_WASM, - assert_, - assert_equal, - assert_raises, - assert_warns, -) +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns def test_numpy_reloading(): diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index d8ce95887bce..01b743941cf2 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,8 +5,7 @@ import os import subprocess import sys -from os.path import dirname, isfile -from os.path import join as pathjoin +from os.path import dirname, isfile, join as pathjoin import pytest diff --git a/ruff.toml b/ruff.toml index 7454c6c05e5b..61e849af1d69 100644 --- a/ruff.toml +++ b/ruff.toml @@ -130,3 +130,5 @@ builtins-allowed-modules = ["random", "typing"] # these are treated as stdlib within .pyi stubs extra-standard-library = ["_typeshed", "typing_extensions"] known-first-party = ["numpy"] +combine-as-imports = true +split-on-trailing-comma = false From 7311f0a55bdf6f363024467247bef4464df08e25 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 03:08:15 +0200 Subject: [PATCH 0160/1718] MAINT: Fix ``I001`` ruff error on main (#29272) --- numpy/_core/__init__.pyi | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index a8884917f34a..ce5427bbfcd9 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -65,9 +65,9 @@ from .fromnumeric import ( take, trace, transpose, + transpose as permute_dims, var, ) -from .fromnumeric import transpose as permute_dims from .function_base import geomspace, linspace, logspace from .getlimits import finfo, iinfo from .memmap import memmap @@ -91,6 +91,7 @@ from .numeric import ( broadcast, can_cast, concatenate, + concatenate as concat, convolve, copyto, correlate, @@ -145,7 +146,6 @@ from .numeric import ( zeros, zeros_like, ) -from .numeric import concatenate as concat from .numerictypes import ( ScalarType, bool, @@ -228,14 +228,22 @@ from .shape_base import ( ) from .umath import ( absolute, + absolute as abs, add, arccos, + arccos as acos, arccosh, + arccosh as acosh, arcsin, + arcsin as asin, arcsinh, + arcsinh as asinh, arctan, + arctan as atan, arctan2, + arctan2 as atan2, arctanh, + arctanh as atanh, bitwise_and, bitwise_count, bitwise_or, @@ -272,6 +280,7 @@ from .umath import ( heaviside, hypot, invert, + invert as bitwise_invert, isfinite, isinf, isnan, @@ -279,6 +288,7 @@ from .umath import ( lcm, ldexp, left_shift, + left_shift as bitwise_left_shift, less, less_equal, log, @@ -303,11 +313,13 @@ from .umath import ( pi, positive, power, + power as pow, rad2deg, radians, reciprocal, remainder, right_shift, + right_shift as bitwise_right_shift, rint, sign, signbit, @@ -323,18 +335,6 @@ from .umath import ( trunc, vecmat, ) -from .umath import absolute as abs -from .umath import arccos as acos -from .umath import arccosh as acosh -from .umath import arcsin as asin -from .umath import arcsinh as asinh -from .umath import arctan as atan -from .umath import arctan2 as atan2 -from .umath import arctanh as atanh -from .umath import invert as bitwise_invert -from .umath import left_shift as bitwise_left_shift -from .umath import power as pow -from .umath import right_shift as bitwise_right_shift __all__ = [ "False_", From 004458d21696a2867ed8142b172ada4bb4607b03 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 26 Jun 2025 05:41:55 +0200 Subject: [PATCH 0161/1718] TYP: Work around a mypy issue with bool arrays (#29248) --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 3 +-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9fae513bdf39..d85c506ae659 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3848,14 +3848,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def imag(self) -> np.bool[L[False]]: ... - @overload - def __init__(self: np.bool[L[False]], /) -> None: ... + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... @overload def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... @overload def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... @overload - def __init__(self, value: object, /) -> None: ... + def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 7b27d57bfe23..45cc986d33a6 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -39,6 +39,8 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 60ee04218489..0fa657240f0b 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -357,8 +357,7 @@ assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) assert_type(np.ma.nomask, np.bool[Literal[False]]) -# https://github.com/python/mypy/issues/18974 -assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] +assert_type(np.ma.MaskType, type[np.bool]) assert_type(MAR_1d.__setmask__([True, False]), None) assert_type(MAR_1d.__setmask__(np.False_), None) From 8f5ce9cbe49a8f4191455c70adca28f61cf774cf Mon Sep 17 00:00:00 2001 From: carlosgmartin Date: Thu, 26 Jun 2025 08:25:12 -0400 Subject: [PATCH 0162/1718] ENH: Extend numpy.pad to handle pad_width dictionary argument. --- .../upcoming_changes/29273.new_feature.rst | 1 + numpy/lib/_arraypad_impl.py | 38 ++++++++++++++++++- numpy/lib/_arraypad_impl.pyi | 14 +++++-- numpy/lib/tests/test_arraypad.py | 12 ++++++ numpy/typing/tests/data/reveal/arraypad.pyi | 5 +++ 5 files changed, 65 insertions(+), 5 deletions(-) create mode 100644 doc/release/upcoming_changes/29273.new_feature.rst diff --git a/doc/release/upcoming_changes/29273.new_feature.rst b/doc/release/upcoming_changes/29273.new_feature.rst new file mode 100644 index 000000000000..3e380ca0dbe6 --- /dev/null +++ b/doc/release/upcoming_changes/29273.new_feature.rst @@ -0,0 +1 @@ +Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 507a0ab51b52..681b92fc8a72 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -3,6 +3,8 @@ of an n-dimensional array. """ +import typing + import numpy as np from numpy._core.overrides import array_function_dispatch from numpy.lib._index_tricks_impl import ndindex @@ -550,7 +552,7 @@ def pad(array, pad_width, mode='constant', **kwargs): ---------- array : array_like of rank N The array to pad. - pad_width : {sequence, array_like, int} + pad_width : {sequence, array_like, int, dict} Number of values padded to the edges of each axis. ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths for each axis. @@ -558,6 +560,9 @@ def pad(array, pad_width, mode='constant', **kwargs): and after pad for each axis. ``(pad,)`` or ``int`` is a shortcut for before = after = pad width for all axes. + If a ``dict``, each key is an axis and its corresponding value is an ``int`` or + ``int`` pair describing the padding ``(before, after)`` or ``pad`` width for + that axis. mode : str or function, optional One of the following string values or a user supplied function. @@ -745,8 +750,39 @@ def pad(array, pad_width, mode='constant', **kwargs): [100, 100, 3, 4, 5, 100, 100], [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) + + >>> a = np.arange(1, 7).reshape(2, 3) + >>> np.pad(a, {1: (1, 2)}) + array([[0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {-1: 2}) + array([[0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {0: (3, 0)}) + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [1, 2, 3], + [4, 5, 6]]) + >>> np.pad(a, {0: (3, 0), 1: 2}) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) """ array = np.asarray(array) + if isinstance(pad_width, dict): + seq = [(0, 0)] * array.ndim + for axis, width in pad_width.items(): + match width: + case int(both): + seq[axis] = both, both + case tuple((int(before), int(after))): + seq[axis] = before, after + case _ as invalid: + typing.assert_never(invalid) + pad_width = seq pad_width = np.asarray(pad_width) if not pad_width.dtype.kind == 'i': diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 6158f299efe2..cfabdecf669e 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -43,11 +43,17 @@ _ModeKind: TypeAlias = L[ # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. +_PadWidth: TypeAlias = ( + _ArrayLikeInt + | dict[int, int] + | dict[int, tuple[int, int]] + | dict[int, int | tuple[int, int]] +) # Expand `**kwargs` into explicit keyword-only arguments @overload def pad( array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeKind = ..., *, stat_length: _ArrayLikeInt | None = ..., @@ -58,7 +64,7 @@ def pad( @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeKind = ..., *, stat_length: _ArrayLikeInt | None = ..., @@ -69,14 +75,14 @@ def pad( @overload def pad( array: _ArrayLike[_ScalarT], - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[_ScalarT]: ... @overload def pad( array: ArrayLike, - pad_width: _ArrayLikeInt, + pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, ) -> NDArray[Any]: ... diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 6efbe348ca81..14383e743e47 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -1413,3 +1413,15 @@ def test_dtype_persistence(dtype, mode): arr = np.zeros((3, 2, 1), dtype=dtype) result = np.pad(arr, 1, mode=mode) assert result.dtype == dtype + + +@pytest.mark.parametrize("input_shape, pad_width, expected_shape", [ + ((3, 4, 5), {-2: (1, 3)}, (3, 4 + 1 + 3, 5)), + ((3, 4, 5), {0: (5, 2)}, (3 + 5 + 2, 4, 5)), + ((3, 4, 5), {0: (5, 2), -1: (3, 4)}, (3 + 5 + 2, 4, 5 + 3 + 4)), + ((3, 4, 5), {1: 5}, (3, 4 + 2 * 5, 5)), +]) +def test_pad_dict_pad_width(input_shape, pad_width, expected_shape): + a = np.zeros(input_shape) + result = np.pad(a, pad_width) + assert result.shape == expected_shape diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index c5a443d93fe3..3d53d913a770 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -20,3 +20,8 @@ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) From bd683804a22657972c3b63987ed2fcace5fbf1b8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 16:58:22 +0100 Subject: [PATCH 0163/1718] TYP: Add overloads for `MaskedArray.__{div,rdiv,floordiv,rfloordiv}__` (#29271) --- numpy/__init__.pyi | 4 + numpy/ma/core.pyi | 134 +++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 165 ++++++++++++++++++++++++++ 3 files changed, 296 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d85c506ae659..e6a104d6b269 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3091,6 +3091,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__truediv__` @overload def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3122,6 +3123,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rtruediv__` @overload def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3151,6 +3153,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload @@ -3180,6 +3183,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 6c0de39594dd..caea284bb566 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -30,6 +30,8 @@ from numpy import ( floating, generic, inexact, + int8, + int64, int_, integer, intp, @@ -260,16 +262,18 @@ _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] _MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] _MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] _MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @@ -742,10 +746,126 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __truediv__(self, other): ... - def __rtruediv__(self, other): ... - def __floordiv__(self, other): ... - def __rfloordiv__(self, other): ... + # Keep in sync with `ndarray.__truediv__` + @overload + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 0fa657240f0b..a101ba0d48b0 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -50,6 +50,7 @@ MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] b: np.bool f4: np.float32 f: float +i: int assert_type(MAR_1d.shape, tuple[int]) @@ -719,3 +720,167 @@ assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) From 6eeaae25ddfacf586ee02ddc600618127fa79cfd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 26 Jun 2025 19:42:09 +0100 Subject: [PATCH 0164/1718] TYP: fix overloads where `out: _ArrayT` was typed as being the default (#29278) --- numpy/__init__.pyi | 34 +++++++++- numpy/_core/multiarray.pyi | 62 +++++++++++++++++-- .../tests/data/reveal/array_constructors.pyi | 2 +- 3 files changed, 88 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e6a104d6b269..d12e63a8bed9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2401,7 +2401,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -2425,7 +2435,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... @@ -3655,7 +3674,16 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index aa9e796da10b..560822d68466 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -528,7 +528,6 @@ def concatenate( # type: ignore[misc] casting: _CastingKind | None = ... ) -> NDArray[_ScalarT]: ... @overload -@overload def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, @@ -553,7 +552,17 @@ def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, + dtype: DTypeLike = ..., + casting: _CastingKind | None = ... +) -> _ArrayT: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, *, dtype: DTypeLike = ..., casting: _CastingKind | None = ... @@ -1094,7 +1103,17 @@ def busday_count( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @@ -1126,7 +1145,18 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload def busday_offset( # type: ignore[misc] @@ -1156,7 +1186,18 @@ def busday_offset( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload @@ -1181,7 +1222,16 @@ def is_busday( weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: _ArrayT = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike, + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, ) -> _ArrayT: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 45cc986d33a6..766547e54b60 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -53,7 +53,7 @@ assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) From d4054614bdbd5593f249e1cff90dc132ea5b0986 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Fri, 25 Apr 2025 12:44:47 -0700 Subject: [PATCH 0165/1718] DOC: Add flat examples to argmax and armgin --- numpy/_core/fromnumeric.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 34fe1798f45e..8103a8733265 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1303,6 +1303,8 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Indexes of the maximal elements of a N-dimensional array: + >>> a.flat[np.argmax(a)] + 15 >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) >>> ind (1, 2) @@ -1401,6 +1403,8 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Indices of the minimum elements of a N-dimensional array: + >>> a.flat[np.argmin(a)] + 10 >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) >>> ind (0, 0) From 202df739922a994227771c31371669f50b5b1e27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:36:33 -0600 Subject: [PATCH 0166/1718] MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 (#29285) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.0 to 3.29.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ce28f5bb42b7a9f2c824e633a3f6ee835bab6858...39edc492dbe16b1465b0cafca41432d857bdb31a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0342fd92c924..1b8f958b5a21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e64789006e2c..c23baa2f2b42 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 with: sarif_file: results.sarif From 0f18e7c42e4136cdb1eaa1e1c1eff331f2019950 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 27 Jun 2025 14:38:45 -0600 Subject: [PATCH 0167/1718] BUG: handle case in mapiter where descriptors might get replaced (#29286) Need to use the actual dtype, not the one passed to the array creation (because it can get replaced). Fixes #29279. --- numpy/_core/src/multiarray/mapping.c | 2 ++ numpy/_core/tests/test_stringdtype.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7953e32fcbf0..2ce7dcdb234a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -3013,6 +3013,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type, if (extra_op == NULL) { goto fail; } + // extra_op_dtype might have been replaced, so get a new reference + extra_op_dtype = PyArray_DESCR(extra_op); } /* diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8197822cd0e3..52a225619ccf 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -517,6 +517,18 @@ def test_fancy_indexing(string_list): assert_array_equal(a, b) assert a[0] == 'd' * 25 + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) From fb7135f9a512d6e35544aa331a0e38f3d60863ac Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 28 Jun 2025 07:58:33 -0600 Subject: [PATCH 0168/1718] BUG: Fix macro redefinition (#29289) Also update .clang-format to indent preprocessor conditional blocks. --- .clang-format | 1 + numpy/_core/src/multiarray/alloc.c | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.clang-format b/.clang-format index 034478ae2466..7e94a6fdb47c 100644 --- a/.clang-format +++ b/.clang-format @@ -27,6 +27,7 @@ IncludeCategories: Priority: 1 - Regex: '^<[[:alnum:]_.]+"' Priority: 2 +IndentPPDirectives: AfterHash Language: Cpp PointerAlignment: Right ReflowComments: true diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index cc9c5762a196..8061feed24e5 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -30,14 +30,17 @@ /* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN * use-of-uninitialized-memory warnings less useful. */ -#define USE_ALLOC_CACHE 1 #ifdef Py_GIL_DISABLED -# define USE_ALLOC_CACHE 0 +# define USE_ALLOC_CACHE 0 #elif defined(__has_feature) -# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) -# define USE_ALLOC_CACHE 0 -# endif +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif #endif +#ifndef USE_ALLOC_CACHE +# define USE_ALLOC_CACHE 1 +#endif + # define NBUCKETS 1024 /* number of buckets for data*/ # define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ From 2bb4005d189667b71a6088db358f8848802f36e9 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sat, 28 Jun 2025 17:49:51 +0300 Subject: [PATCH 0169/1718] DOC: avoid searching some directories for doxygen-commented source code (#29275) * DOC: avoid searching some directories for doxygen-commented source code * lint [skip azp][skip actions][skip cirrus] * use known paths [skip azp][skip actions][skip cirrus] --- doc/preprocess.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/preprocess.py b/doc/preprocess.py index b2e64ab6393a..bc43e89764f8 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -4,7 +4,7 @@ def main(): - doxy_gen(os.path.abspath(os.path.join('..'))) + doxy_gen(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) def doxy_gen(root_path): """ @@ -24,6 +24,7 @@ def doxy_gen(root_path): class DoxyTpl(Template): delimiter = '@' + def doxy_config(root_path): """ Fetch all Doxygen sub-config files and gather it with the main config file. @@ -35,13 +36,14 @@ def doxy_config(root_path): conf = DoxyTpl(fd.read()) confs.append(conf.substitute(CUR_DIR=dsrc_path, **sub)) - for dpath, _, files in os.walk(root_path): - if ".doxyfile" not in files: - continue - conf_path = os.path.join(dpath, ".doxyfile") - with open(conf_path) as fd: - conf = DoxyTpl(fd.read()) - confs.append(conf.substitute(CUR_DIR=dpath, **sub)) + for subdir in ["doc", "numpy"]: + for dpath, _, files in os.walk(os.path.join(root_path, subdir)): + if ".doxyfile" not in files: + continue + conf_path = os.path.join(dpath, ".doxyfile") + with open(conf_path) as fd: + conf = DoxyTpl(fd.read()) + confs.append(conf.substitute(CUR_DIR=dpath, **sub)) return confs From 907b73ce9befb95deb5d0c2097bf3f307fd64cac Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 21:22:53 +0200 Subject: [PATCH 0170/1718] MAIN: Enforce ruff E501 rule --- numpy/_core/tests/test_multithreading.py | 13 ++++++++----- ruff.toml | 1 - 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 09f907561ae5..5f8a2f11ea1f 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -272,12 +272,15 @@ def closure(b): def test_nonzero(dtype): # See: gh-28361 # - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed + # np.nonzero uses np.count_nonzero to determine the size of the output. + # array. In a second pass the indices of the non-zero elements are + # determined, but they can have changed # - # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure - # np.nonzero does not generate a segmentation fault + # This test triggers a data race which is suppressed in the TSAN CI. + # The test is to ensure np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=100).astype(dtype) + expected_warning = ('number of non-zero array elements changed' + ' during function execution') def func(index): for _ in range(10): @@ -287,6 +290,6 @@ def func(index): try: _ = np.nonzero(x) except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) diff --git a/ruff.toml b/ruff.toml index 8db6df67377a..323f8cc5b3b9 100644 --- a/ruff.toml +++ b/ruff.toml @@ -81,7 +81,6 @@ ignore = [ "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] -"numpy/_core/tests/test_multithreading.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_ufunc*py" = ["E501"] "numpy/_core/tests/test_umath*py" = ["E501"] From 6aeb7c6e5d7a40bc49ed2cb5980d5a706583bc26 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Jun 2025 22:50:06 +0200 Subject: [PATCH 0171/1718] More linting --- numpy/_core/tests/test_datetime.py | 6 ++-- numpy/_core/tests/test_simd_module.py | 6 ++-- numpy/_core/tests/test_strings.py | 42 ++++++++++++++++++--------- ruff.toml | 4 +-- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 81ac9778971b..7ccb080a75c1 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -874,9 +874,11 @@ def test_pickle(self): def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') - assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) - assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) def test_dtype_promotion(self): diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index dca83fd427b6..3de1596aa10a 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -23,7 +23,8 @@ int_sfx = unsigned_sfx + signed_sfx all_sfx = unsigned_sfx + int_sfx -@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") class Test_SIMD_MODULE: @pytest.mark.parametrize('sfx', all_sfx) @@ -47,7 +48,8 @@ def test_raises(self): pytest.raises(TypeError, vcb("setall"), [1]) pytest.raises(TypeError, vcb("load"), 1) pytest.raises(ValueError, vcb("load"), [1]) - pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) @pytest.mark.skipif(not npyv2, reason=( "could not find a second SIMD extension with NPYV support" diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 56e928df4d7b..1b77a535eee6 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -989,22 +989,36 @@ def test_slice_unsupported(self, dt): with pytest.raises(TypeError, match="did not contain a loop"): np.strings.slice(np.array([1, 2, 3]), 4) - with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): - np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) - - @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64]) + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) def test_slice_int_type_promotion(self, int_dt, dt): buf = np.array(["hello", "world"], dtype=dt) - - assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) - - assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) - assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) @pytest.mark.parametrize("dt", ["U", "T"]) class TestMethodsWithUnicode: diff --git a/ruff.toml b/ruff.toml index 323f8cc5b3b9..9916097420b6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -76,7 +76,6 @@ ignore = [ "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_datetime.py" = ["E501"] "numpy/_core/tests/test_dtype.py" = ["E501"] "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] @@ -86,8 +85,7 @@ ignore = [ "numpy/_core/tests/test_umath*py" = ["E501"] "numpy/_core/tests/test_numeric*.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd*.py" = ["E501"] -"numpy/_core/tests/test_strings.py" = ["E501"] +"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] From 7bdf6b4b9ed4976c0e6df512d95f09ea87aaa7f0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 29 Jun 2025 14:52:03 -0600 Subject: [PATCH 0172/1718] BUG: Fix version check in blas_utils.c Co-Authored-By: Daniel Bertalan --- numpy/_core/src/common/blas_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index aaf976ed70e4..155963891071 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -43,7 +43,7 @@ is_macOS_version_15_4_or_later(void){ goto cleanup; } - if(major >= 15 && minor >= 4){ + if (major > 15 || (major == 15 && minor >= 4)) { ret = true; } From c96fcc8f7a5030098f8e0c304ce20864c3509021 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 21:07:46 -0600 Subject: [PATCH 0173/1718] MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 (#29296) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.1 to 3.29.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/39edc492dbe16b1465b0cafca41432d857bdb31a...181d5eefc20863364f96762470ba6f862bdef56b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1b8f958b5a21..cbee65f0b713 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c23baa2f2b42..aa3bf1aec7cc 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v2.1.27 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 with: sarif_file: results.sarif From c0c972ddf58b891d6b9851348413a29fd8e45624 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 11:40:06 +0100 Subject: [PATCH 0174/1718] DOCS: Remove incorrect "Returns" section from `MaskedArray.sort` --- numpy/ma/core.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 5e231fbbadb7..2860bec848aa 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5812,11 +5812,6 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, stable : bool, optional Only for compatibility with ``np.sort``. Ignored. - Returns - ------- - sorted_array : ndarray - Array of the same type and shape as `a`. - See Also -------- numpy.ndarray.sort : Method to sort an array in-place. From 82ae5ab13162e7c1e0c49cad6106d0d8cfa90fd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 1 Jul 2025 12:24:22 +0200 Subject: [PATCH 0175/1718] DEP: Give a visible warning when `align=` to dtype is a non-bool This seems generally confusing. I would like to make it keyword only, but this already gives a warning when loading _very_ old pickles, meaning I am not quite sure we should change away from a warning quickly. We should fix things around pickling and start pickling in a way that makes it easier to move to keyword only arguments. (I suppose one could detect the case of `np.dtype(obj, False, True)` and assume it is via unpickling, but... I am assuming that it is OK to (eventually) break unpickling these 10+ year old files, but I am not in a rush to actually do so and go through with the deprecation. Signed-off-by: Sebastian Berg --- .../upcoming_changes/29301.deprecation.rst | 7 ++++ numpy/_core/src/common/npy_import.h | 1 + numpy/_core/src/multiarray/conversion_utils.c | 20 +++------ numpy/_core/src/multiarray/descriptor.c | 41 +++++++++++++------ numpy/_core/tests/test_datetime.py | 26 ++++++------ numpy/_core/tests/test_deprecations.py | 20 +++++++++ numpy/_core/tests/test_multiarray.py | 14 +++++++ numpy/_core/tests/test_regression.py | 12 +++++- numpy/lib/tests/test_format.py | 17 ++++---- 9 files changed, 111 insertions(+), 47 deletions(-) create mode 100644 doc/release/upcoming_changes/29301.deprecation.rst diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst new file mode 100644 index 000000000000..e520b692458d --- /dev/null +++ b/doc/release/upcoming_changes/29301.deprecation.rst @@ -0,0 +1,7 @@ +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be +given if ``align=`` is not a boolean. +This is mainly to prevent accidentally passing a subarray align flag where it +has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. +We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 970efa8f549e..87944989e95d 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -41,6 +41,7 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 5ada3e6e4faf..1994dd0ee8f7 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -438,15 +438,11 @@ PyArray_ConvertMultiAxis(PyObject *axis_in, int ndim, npy_bool *out_axis_flags) NPY_NO_EXPORT int PyArray_BoolConverter(PyObject *object, npy_bool *val) { - if (PyObject_IsTrue(object)) { - *val = NPY_TRUE; - } - else { - *val = NPY_FALSE; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } @@ -460,15 +456,11 @@ PyArray_OptionalBoolConverter(PyObject *object, int *val) if (object == Py_None) { return NPY_SUCCEED; } - if (PyObject_IsTrue(object)) { - *val = 1; - } - else { - *val = 0; - } - if (PyErr_Occurred()) { + int bool_val = PyObject_IsTrue(object); + if (bool_val == -1) { return NPY_FAIL; } + *val = (npy_bool)bool_val; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index f520e3c4bceb..c32b0e5e3f9f 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1029,17 +1029,13 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) static PyArray_Descr * _convert_from_field_dict(PyObject *obj, int align) { - PyObject *_numpy_internal; - PyArray_Descr *res; - - _numpy_internal = PyImport_ImportModule("numpy._core._internal"); - if (_numpy_internal == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_usefields", &npy_runtime_imports._usefields) < 0) { return NULL; } - res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, - "_usefields", "Oi", obj, align); - Py_DECREF(_numpy_internal); - return res; + + return (PyArray_Descr *)PyObject_CallFunctionObjArgs( + npy_runtime_imports._usefields, obj, align ? Py_True : Py_False, NULL); } /* @@ -2554,7 +2550,9 @@ arraydescr_new(PyTypeObject *subtype, return NULL; } - PyObject *odescr, *metadata=NULL; + PyObject *odescr; + PyObject *oalign = NULL; + PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; npy_bool copy = NPY_FALSE; @@ -2562,14 +2560,33 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, &odescr, - PyArray_BoolConverter, &align, + &oalign, PyArray_BoolConverter, ©, &PyDict_Type, &metadata)) { return NULL; } + if (oalign != NULL) { + /* + * In the future, reject non Python (or NumPy) boolean, including integers to avoid any + * possibility of thinking that an integer alignment makes sense here. + */ + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + /* Deprecated 2025-07-01: NumPy 2.4 */ + if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, + "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " + "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", + oalign) < 0) { + return NULL; + } + } + if (!PyArray_BoolConverter(oalign, &align)) { + return NULL; + } + } + conv = _convert_from_any(odescr, align); if (conv == NULL) { return NULL; diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 7ccb080a75c1..c5d2ca459e6a 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -858,18 +858,20 @@ def test_pickle(self): delta) # Check that loading pickles from 1.6 works - pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ - b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ - b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + with pytest.warns(np.exceptions.VisibleDeprecationWarning, + match=r".*align should be passed"): + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 7d4875d6d149..df27a1f9d076 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -453,3 +453,23 @@ def test_deprecated(self): struct_ufunc.add_triplet, "new docs" ) ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but sneeds to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0faf35c64b98..4ee021d18f6f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4505,18 +4505,24 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_int8(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_version0_float32(self): s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 Date: Tue, 1 Jul 2025 17:34:13 +0200 Subject: [PATCH 0176/1718] MAINT: Enable linting with ruff E501 (#29300) * MAINT: Enforce ruff E501 * fix merge --- benchmarks/benchmarks/bench_linalg.py | 13 ++++--- numpy/_core/tests/test_simd.py | 12 ++++--- numpy/_core/tests/test_ufunc.py | 13 ++++--- numpy/_core/tests/test_umath_accuracy.py | 12 +++++-- numpy/_core/tests/test_umath_complex.py | 44 +++++++++++++----------- numpy/tests/test_configtool.py | 12 ++++--- ruff.toml | 9 ++--- 7 files changed, 68 insertions(+), 47 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 03e2fd77f4f2..49a7ae84fde6 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -148,7 +148,9 @@ def setup(self, dtype): self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype) self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype) self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40) - self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40) + + non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype) + self.non_contiguous_dim3 = non_contiguous_dim3.reshape(20, 30, 40) # outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two def time_einsum_outer(self, dtype): @@ -180,11 +182,13 @@ def time_einsum_contig_outstride0(self, dtype): # outer(a,b): non_contiguous arrays def time_einsum_noncon_outer(self, dtype): - np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True) + np.einsum("i,j", self.non_contiguous_dim1, + self.non_contiguous_dim1, optimize=True) # multiply(a, b):non_contiguous arrays def time_einsum_noncon_multiply(self, dtype): - np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True) + np.einsum("..., ...", self.non_contiguous_dim2, + self.non_contiguous_dim3, optimize=True) # sum and multiply:non_contiguous arrays def time_einsum_noncon_sum_mul(self, dtype): @@ -200,7 +204,8 @@ def time_einsum_noncon_mul(self, dtype): # contig_contig_outstride0_two: non_contiguous arrays def time_einsum_noncon_contig_contig(self, dtype): - np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True) + np.einsum("ji,i->", self.non_contiguous_dim2, + self.non_contiguous_dim1_small, optimize=True) # sum_of_products_contig_outstride0_one: non_contiguous arrays def time_einsum_noncon_contig_outstride0(self, dtype): diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index acea4315e679..335abc98c84e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -271,7 +271,8 @@ def test_operators_shift(self): shr = self.shr(vdata_a, count) assert shr == data_shr_a - # shift by zero or max or out-range immediate constant is not applicable and illogical + # shift by zero or max or out-range immediate constant is not + # applicable and illogical for count in range(1, self._scalar_size()): # load to cast data_shl_a = self.load([a << count for a in data_a]) @@ -419,7 +420,8 @@ def test_sqrt(self): sqrt = self.sqrt(self.setall(case)) assert sqrt == pytest.approx(data_sqrt, nan_ok=True) - data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) sqrt = self.sqrt(vdata) assert sqrt == data_sqrt @@ -1334,8 +1336,10 @@ def test_mask_conditional(self): for sfx in sfxes: skip_m = skip_sfx.get(sfx, skip) inhr = (cls,) - attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} - tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) if skip_m: pytest.mark.skip(reason=skip_m)(tcls) globals()[tcls.__name__] = tcls diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index a1cd63aec523..836be1245399 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1100,17 +1100,15 @@ def test_output_ellipsis_errors(self): match=r"out=\.\.\. is only allowed as a keyword argument."): np.add.reduce(1, (), None, ...) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + type_error = r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple" + with pytest.raises(TypeError, match=type_error): np.negative(1, out=(...,)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): # We only allow out=... not individual args for now np.divmod(1, 2, out=(np.empty(()), ...)) - with pytest.raises(TypeError, - match=r"must use `\.\.\.` as `out=\.\.\.` and not per-operand/in a tuple"): + with pytest.raises(TypeError, match=type_error): np.add.reduce(1, out=(...,)) def test_axes_argument(self): @@ -1556,7 +1554,8 @@ def __eq__(self, other): arr1d = np.array([HasComparisons()]) assert_equal(arr1d == arr1d, np.array([True])) - assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) def test_object_array_reduction(self): diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index da9419d63a8a..3ca2f508672e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -68,8 +68,16 @@ def test_validate_transcendentals(self): npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] - inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) - outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 8f6f5c682a91..7012e7e357fe 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -562,31 +562,35 @@ class TestSpecialComplexAVX: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) def test_array(self, stride, astype): - arr = np.array([complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(np.inf, np.nan), - complex(np.inf, np.inf), - complex(0., np.inf), - complex(np.inf, 0.), - complex(0., 0.), - complex(0., np.nan), - complex(np.nan, 0.)], dtype=astype) - abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) - sq_true = np.array([complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.nan), - complex(np.nan, np.inf), - complex(-np.inf, np.nan), - complex(np.inf, np.nan), - complex(0., 0.), - complex(np.nan, np.nan), - complex(np.nan, np.nan)], dtype=astype) + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) with np.errstate(invalid='ignore'): assert_equal(np.abs(arr[::stride]), abs_true[::stride]) assert_equal(np.square(arr[::stride]), sq_true[::stride]) class TestComplexAbsoluteAVX: - @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) # test to ensure masking and strides work as intended in the AVX implementation diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index e0b9bb1b7aff..a9c23b5cc007 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -15,8 +15,10 @@ PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' -@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") -@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") class TestNumpyConfig: def check_numpyconfig(self, arg): p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) @@ -36,13 +38,15 @@ def test_configtool_pkgconfigdir(self): assert pathlib.Path(stdout) == PKG_CONFIG_DIR -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") def test_pkg_config_entrypoint(): (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ -@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") @pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") def test_pkg_config_config_exists(): assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/ruff.toml b/ruff.toml index 9916097420b6..ed6026ed0665 100644 --- a/ruff.toml +++ b/ruff.toml @@ -72,7 +72,6 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"benchmarks/benchmarks/bench_linalg.py" = ["E501"] "numpy/_core/tests/test_arrayprint.py" = ["E501"] "numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] @@ -81,17 +80,15 @@ ignore = [ "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] -"numpy/_core/tests/test_ufunc*py" = ["E501"] -"numpy/_core/tests/test_umath*py" = ["E501"] -"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_umath.py" = ["E501"] +"numpy/_core/tests/test_numeric.py" = ["E501"] +"numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] -"numpy/_core/tests/test_simd.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] -"numpy/tests/test_configtool.py" = ["E501"] "numpy/f2py/*py" = ["E501"] # for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length "numpy/_typing/_array_like.py" = ["E501"] From 3727d5b23e33c3c495b8025ad8c7413d72c9db0d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 1 Jul 2025 18:39:39 +0100 Subject: [PATCH 0177/1718] TYP: Add shape typing to return values of `np.nonzero` and `ndarray.nonzero`, simplify `MaskedArray.nonzero` return type (#29303) --- numpy/__init__.pyi | 2 +- numpy/_core/fromnumeric.pyi | 2 +- numpy/ma/core.pyi | 2 +- numpy/typing/tests/data/reveal/fromnumeric.pyi | 8 ++++---- numpy/typing/tests/data/reveal/ma.pyi | 2 +- numpy/typing/tests/data/reveal/ndarray_misc.pyi | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d12e63a8bed9..d071f3e6b92d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2355,7 +2355,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... # `nonzero()` is deprecated for 0d arrays/generics - def nonzero(self) -> tuple[NDArray[intp], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2aedc727e6dc..ba6669936571 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -576,7 +576,7 @@ def ravel( @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... # this prevents `Any` from being returned with Pyright @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index caea284bb566..3ca5d9be0b97 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1102,7 +1102,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... + def nonzero(self) -> tuple[_Array1D[intp], ...]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5438e001a13f..62bc926c765b 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -131,10 +131,10 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a101ba0d48b0..48311a7e87fa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -380,7 +380,7 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) # Masked Array addition diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 4cbb90621ca9..7f0a214f8f52 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -170,7 +170,7 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) From ec34c6d3f6ff7969bdd82f9b2347070d11d8b7de Mon Sep 17 00:00:00 2001 From: ianlv <168640168+ianlv@users.noreply.github.com> Date: Wed, 2 Jul 2025 16:38:32 +0800 Subject: [PATCH 0178/1718] chore: remove redundant words in comment (#29306) --- doc/source/reference/c-api/array.rst | 2 +- numpy/_core/src/common/dlpack/dlpack.h | 2 +- numpy/_core/src/multiarray/alloc.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 02db78ebb2b1..dc043b77f187 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3814,7 +3814,7 @@ In this case, the helper C files typically do not have a canonical place where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and fast to call it often). -To solve this, NumPy provides the following pattern that the the main +To solve this, NumPy provides the following pattern that the main file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: .. code-block:: c diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index 19ecc27761f8..4dc164fe9c1b 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -270,7 +270,7 @@ typedef struct DLManagedTensor { void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; -// bit masks used in in the DLManagedTensorVersioned +// bit masks used in the DLManagedTensorVersioned /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index f5600c99aaa5..8cd763f971ed 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -75,7 +75,7 @@ _npy_init_workspace( /* * Helper definition macro for a small work/scratchspace. - * The `NAME` is the C array to to be defined of with the type `TYPE`. + * The `NAME` is the C array to be defined of with the type `TYPE`. * * The usage pattern for this is: * From 888c4ff4be6adbada62bbb4a35523c05f9cff663 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 10:00:14 +0100 Subject: [PATCH 0179/1718] TYP: Add type annotations for `MaskedArray.__{pow,rpow}__` (#29277) --- numpy/__init__.pyi | 2 + numpy/ma/core.pyi | 69 +++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 84 +++++++++++++++++++++++++++ 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d071f3e6b92d..6f52d1e37ce6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3230,6 +3230,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__pow__` @overload def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -3263,6 +3264,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + # Keep in sync with `MaskedArray.__rpow__` @overload def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3ca5d9be0b97..5d29577b308b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -866,8 +866,73 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - def __pow__(self, other, mod: None = None, /): ... - def __rpow__(self, other, mod: None = None, /): ... + # Keep in sync with `ndarray.__pow__` + @overload + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__( + self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... # Keep in sync with `ndarray.__iadd__` @overload diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 48311a7e87fa..f2e9b136259a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -884,3 +884,87 @@ assert_type(AR_LIKE_f // MAR_o, Any) assert_type(AR_LIKE_td64 // MAR_o, Any) assert_type(AR_LIKE_dt64 // MAR_o, Any) assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4 , Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8 , Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) From 34d12a963351a0ec1a58e42f37592e49d4f7eed7 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:05:30 +0000 Subject: [PATCH 0180/1718] test: add regression test for grammar in ufunc TypeError message --- numpy/_core/tests/test_umath.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index c54b2ac86bc2..b5145e9d642b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4917,3 +4917,12 @@ def test_ufunc_arg(self): @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) From 99f894a76ebf91638d05c8611d4e0bc6f1ec9b78 Mon Sep 17 00:00:00 2001 From: Shyok Mutsuddi Date: Wed, 2 Jul 2025 11:06:34 +0000 Subject: [PATCH 0181/1718] fix: correct singular/plural grammar in ufunc TypeError message --- numpy/_core/src/common/npy_argparse.c | 10 ++++++---- numpy/_core/src/umath/ufunc_object.c | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 6766b17043ac..aa011be9c585 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -243,16 +243,18 @@ static int raise_incorrect_number_of_positional_args(const char *funcname, const _NpyArgParserCache *cache, Py_ssize_t len_args) { + const char *verb = (len_args == 1) ? "was" : "were"; if (cache->npositional == cache->nrequired) { PyErr_Format(PyExc_TypeError, - "%s() takes %d positional arguments but %zd were given", - funcname, cache->npositional, len_args); + "%s() takes %d positional arguments but %zd %s given", + funcname, cache->npositional, len_args, verb); } else { PyErr_Format(PyExc_TypeError, "%s() takes from %d to %d positional arguments but " - "%zd were given", - funcname, cache->nrequired, cache->npositional, len_args); + "%zd %s given", + funcname, cache->nrequired, cache->npositional, + len_args, verb); } return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 4cdde8d3d77d..485364af1ff2 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4319,10 +4319,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Check number of arguments */ if (NPY_UNLIKELY((len_args < nin) || (len_args > nop))) { + const char *verb = (len_args == 1) ? "was" : "were"; PyErr_Format(PyExc_TypeError, - "%s() takes from %d to %d positional arguments but " - "%zd were given", - ufunc_get_name_cstr(ufunc) , nin, nop, len_args); + "%s() takes from %d to %d positional arguments but " + "%zd %s given", + ufunc_get_name_cstr(ufunc), nin, nop, len_args, verb); goto fail; } From a04953939d91490103d308491479217ef4a9802b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 2 Jul 2025 13:10:01 +0200 Subject: [PATCH 0182/1718] Update numpy/_core/tests/test_deprecations.py Co-authored-by: Matti Picus --- numpy/_core/tests/test_deprecations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index df27a1f9d076..b83a2ac610ee 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -458,7 +458,7 @@ def test_deprecated(self): class TestDTypeAlignBool(_VisibleDeprecationTestCase): # Deprecated in Numpy 2.4, 2025-07 # NOTE: As you can see, finalizing this deprecation breaks some (very) old - # pickle files. This may be fine, but sneeds to be done with some care since + # pickle files. This may be fine, but needs to be done with some care since # it breaks all of them and not just some. # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " From 0578bdfe74c914b602ba17fef833dd5c5b2ac546 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 15:19:28 +0100 Subject: [PATCH 0183/1718] TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` (#29307) --- numpy/__init__.pyi | 4 ++ numpy/ma/core.pyi | 63 +++++++++++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++ 3 files changed, 75 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f52d1e37ce6..6bcbe5b68662 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1741,6 +1741,7 @@ class _ArrayOrScalarCommon: @overload def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray @@ -1776,6 +1777,7 @@ class _ArrayOrScalarCommon: @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1783,6 +1785,7 @@ class _ArrayOrScalarCommon: @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -2385,6 +2388,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stable: bool | None = ..., ) -> None: ... + # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 5d29577b308b..3fad41a1434e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -46,6 +46,7 @@ from numpy import ( from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _32Bit, _64Bit, @@ -1168,18 +1169,72 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _ArrayT: ... def nonzero(self) -> tuple[_Array1D[intp], ...]: ... - def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + ) -> _ArrayT: ... + def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... - def cumsum(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... product: Any - def cumprod(self, axis=..., dtype=..., out=...): ... + + # Keep in sync with `ndarray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... def anom(self, axis=..., dtype=...): ... def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def round(self, decimals=..., out=...): ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... # Keep in-sync with np.ma.argmin diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2e9b136259a..9e8ab98ec4ce 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -383,6 +383,18 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) From 1cc6113ca186b24d08cb7be94e2834bec4768c07 Mon Sep 17 00:00:00 2001 From: Sachin Shah <39803835+inventshah@users.noreply.github.com> Date: Wed, 2 Jul 2025 11:25:35 -0400 Subject: [PATCH 0184/1718] TYP: add explicit types for np.quantile (#29305) --- numpy/lib/_function_base_impl.pyi | 161 +++++++++++++++++++++++++++++- 1 file changed, 158 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 78947b1b3b46..14e48f1cc3fd 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -615,6 +615,7 @@ _MethodKind = L[ "nearest", ] +# NOTE: keep in sync with `quantile` @overload def percentile( a: _ArrayLikeFloat_co, @@ -772,9 +773,163 @@ def percentile( weights: _ArrayLikeFloat_co | None = ..., ) -> _ArrayT: ... -# NOTE: Not an alias, but they do have identical signatures -# (that we can reuse) -quantile = percentile +# NOTE: keep in sync with `percentile` +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> floating: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> complexfloating: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> timedelta64: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> datetime64: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[timedelta64]: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[datetime64]: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> NDArray[object_]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: _ArrayLikeFloat_co | None = ..., +) -> _ArrayT: ... _ScalarT_fm = TypeVar( "_ScalarT_fm", From 75e86ba27ae9f99ee67660204d7ca1e16cb323da Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 2 Jul 2025 18:38:59 +0100 Subject: [PATCH 0185/1718] DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` (#29311) --- numpy/ma/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2860bec848aa..e7d8a20f6c6c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5454,8 +5454,8 @@ def anom(self, axis=None, dtype=None): The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type - the default is float32; for arrays of float types it is the same as - the array type. + the default is float32; for arrays of float types it is the same as + the array type. See Also -------- From 84234016c4676ffe0215f300e5b3f24df62f8efd Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 2 Jul 2025 22:50:59 +0300 Subject: [PATCH 0186/1718] BLD: remove unused github workflow (#29312) --- .github/workflows/windows_arm64.yml | 208 ---------------------------- 1 file changed, 208 deletions(-) delete mode 100644 .github/workflows/windows_arm64.yml diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml deleted file mode 100644 index 3eaf02eb062c..000000000000 --- a/.github/workflows/windows_arm64.yml +++ /dev/null @@ -1,208 +0,0 @@ -name: Windows Arm64 - -on: - workflow_dispatch: - -env: - python_version: 3.12 - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read # to fetch code (actions/checkout) - -jobs: - windows_arm: - runs-on: windows-2022 - - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: ${{env.python_version}} - architecture: x64 - - - name: Install build dependencies from PyPI - run: | - python -m pip install -r requirements/build_requirements.txt - - - name: Prepare python - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Detecting python location and version - $PythonDir = (Split-Path -Parent (get-command python).Path) - $PythonVersionParts = ( -split (python -V)) - $PythonVersion = $PythonVersionParts[1] - - #Downloading the package for appropriate python version from nuget - $PythonARM64NugetLink = "https://www.nuget.org/api/v2/package/pythonarm64/$PythonVersion" - $PythonARM64NugetZip = "nuget_python.zip" - $PythonARM64NugetDir = "temp_nuget" - Invoke-WebRequest $PythonARM64NugetLink -OutFile $PythonARM64NugetZip - - #Changing the libs folder to enable python libraries to be linked for arm64 - Expand-Archive $PythonARM64NugetZip $PythonARM64NugetDir - Copy-Item $PythonARM64NugetDir\tools\libs\* $PythonDir\libs - Remove-Item -Force -Recurse $PythonARM64NugetDir - Remove-Item -Force $PythonARM64NugetZip - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Prepare Licence - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - $CurrentDir = (get-location).Path - $LicenseFile = "$CurrentDir\LICENSE.txt" - Set-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile "----" - Add-Content $LicenseFile ([Environment]::NewLine) - Add-Content $LicenseFile (Get-Content "$CurrentDir\LICENSES_bundled.txt") - Add-Content $LicenseFile (Get-Content "$CurrentDir\tools\wheels\LICENSE_win32.txt") - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Wheel build - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Creating cross compile script for messon subsystem - $CurrentDir = (get-location) - $CrossScript = "$CurrentDir\arm64_w64.txt" - $CrossScriptContent = - { - [host_machine] - system = 'windows' - subsystem = 'windows' - kernel = 'nt' - cpu_family = 'aarch64' - cpu = 'aarch64' - endian = 'little' - - [binaries] - c='cl.exe' - cpp = 'cl.exe' - - [properties] - sizeof_short = 2 - sizeof_int = 4 - sizeof_long = 4 - sizeof_long_long = 8 - sizeof_float = 4 - sizeof_double = 8 - sizeof_long_double = 8 - sizeof_size_t = 8 - sizeof_wchar_t = 2 - sizeof_off_t = 4 - sizeof_Py_intptr_t = 8 - sizeof_PY_LONG_LONG = 8 - longdouble_format = 'IEEE_DOUBLE_LE' - } - Set-Content $CrossScript $CrossScriptContent.ToString() - - #Setting up cross compilers from MSVC - $Products = 'Community', 'Professional', 'Enterprise', 'BuildTools' | % { "Microsoft.VisualStudio.Product.$_" } - $VsInstallPath = (vswhere -products $Products -latest -format json | ConvertFrom-Json).installationPath - $VSVars = (Get-ChildItem -Path $VsInstallPath -Recurse -Filter "vcvarsamd64_arm64.bat").FullName - $ScriptingObj = New-Object -ComObject Scripting.FileSystemObject - $VSVarsShort = $ScriptingObj.GetFile($VSVars).ShortPath - cmd /c "$VSVarsShort && set" | - ForEach-Object { - if ($_ -match "=") { - $Var = $_.split("=") - set-item -force -path "ENV:\$($Var[0])" -value "$($Var[1])" - } - } - - #Building the wheel - pip wheel . --config-settings=setup-args="--cross-file=$CrossScript" - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Fix wheel - shell: powershell - run: | - $ErrorActionPreference = "Stop" - - #Finding whl file - $CurrentDir = (get-location) - $WhlName = ((Get-ChildItem -Filter "*.whl").FullName) - $ZipWhlName = "$CurrentDir\ZipWhlName.zip" - $UnzippedWhl = "$CurrentDir\unzipedWhl" - - #Expanding whl file - Rename-Item -Path $WhlName $ZipWhlName - if (Test-Path $UnzippedWhl) { - Remove-Item -Force -Recurse $UnzippedWhl - } - Expand-Archive -Force -Path $ZipWhlName $UnzippedWhl - - #Renaming all files to show that their arch is arm64 - Get-ChildItem -Recurse -Path $UnzippedWhl *win_amd64* | Rename-Item -NewName { $_.Name -replace 'win_amd64', 'win_arm64' } - $DIST_DIR = (Get-ChildItem -Recurse -Path $UnzippedWhl *dist-info).FullName - - #Changing amd64 references from metafiles - (GET-Content $DIST_DIR/RECORD) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/RECORD - (GET-Content $DIST_DIR/WHEEL) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/WHEEL - - #Packing whl file - Compress-Archive -Path $UnzippedWhl\* -DestinationPath $ZipWhlName -Force - $WhlName = $WhlName.Replace("win_amd64", "win_arm64") - Rename-Item -Path $ZipWhlName $WhlName - - if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } - - - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: ${{ env.python_version }}-win_arm64 - path: ./*.whl - - - name: Setup Mamba - uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - # - name: Upload wheels - # if: success() - # shell: bash -el {0} - # # see https://github.com/marketplace/actions/setup-miniconda for why - # # `-el {0}` is required. - # env: - # NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - # run: | - # source tools/wheels/upload_wheels.sh - # set_upload_vars - # # trigger an upload to - # # https://anaconda.org/scientific-python-nightly-wheels/numpy - # # for cron jobs or "Run workflow" (restricted to main branch). - # # Tags will upload to - # # https://anaconda.org/multibuild-wheels-staging/numpy - # # The tokens were originally generated at anaconda.org - # upload_wheels - From ec8edceb1a8b94883a1e6e474d615d9be0b6b9e7 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 11:47:22 +0100 Subject: [PATCH 0187/1718] TYP: rename `_T` to `_ScalarT` in `matlib.pyi` for consistency (#29310) --- numpy/matlib.pyi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index baeadc078028..93bee1975df3 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -507,8 +507,8 @@ __all__ += np.__all__ ### -_T = TypeVar("_T", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] _Order: TypeAlias = Literal["C", "F"] ### @@ -517,7 +517,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -525,7 +525,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -533,7 +533,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -541,7 +541,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -555,9 +555,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -575,8 +575,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... @overload -def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... From 85708b3c0bd3d2fdbcb00cb08d0a7b323ce3af2d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:05:06 +0100 Subject: [PATCH 0188/1718] TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` (#29309) --- numpy/ma/core.pyi | 16 ++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3fad41a1434e..ca9826c7d083 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1200,7 +1200,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: _ArrayT, ) -> _ArrayT: ... - def dot(self, b, out=..., strict=...): ... + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = ..., strict: bool = ...) -> _MaskedArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... # Keep in sync with `ndarray.cumsum` @@ -1223,7 +1228,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... - def anom(self, axis=..., dtype=...): ... + + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9e8ab98ec4ce..c26dca0ed9ec 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -373,6 +373,11 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) @@ -380,6 +385,10 @@ assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.d assert_type(MAR_b.T, MaskedArray[np.bool]) assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) + assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From 1dac511d00980b2a30e7fff23312bf84922dc0c2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 3 Jul 2025 15:37:03 -0600 Subject: [PATCH 0189/1718] MAINT: remove out-of-date comment --- numpy/_core/src/multiarray/mapping.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 2ce7dcdb234a..7483448e632b 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2110,8 +2110,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - // TODO: the heuristic used here to determine the src_dtype might be subtly wrong - // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, descr, PyArray_DESCR(self), From 2273cd8d1e8be9a9ec237f6115cf59d4c071ebd2 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Fri, 4 Jul 2025 13:14:00 -0700 Subject: [PATCH 0190/1718] DOC: Fix spelling (#29320) --- doc/source/reference/arrays.classes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 8a2e804eb36b..80821c2c08fa 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -32,7 +32,7 @@ Note that :func:`asarray` always returns the base-class ndarray. If you are confident that your use of the array object can handle any subclass of an ndarray, then :func:`asanyarray` can be used to allow subclasses to propagate more cleanly through your subroutine. In -principal a subclass could redefine any aspect of the array and +principle, a subclass could redefine any aspect of the array and therefore, under strict guidelines, :func:`asanyarray` would rarely be useful. However, most subclasses of the array object will not redefine certain aspects of the array object such as the buffer From c21df31bb298384430c0f99765a55926ff69e39f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 5 Jul 2025 16:14:09 -0600 Subject: [PATCH 0191/1718] MAINT: Rename nep-0049.rst. Rename to nep-0049-data-allocation-strategies.rst. Closes #29323. --- .../{nep-0049.rst => nep-0049-data-allocation-strategies.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/neps/{nep-0049.rst => nep-0049-data-allocation-strategies.rst} (100%) diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049-data-allocation-strategies.rst similarity index 100% rename from doc/neps/nep-0049.rst rename to doc/neps/nep-0049-data-allocation-strategies.rst From 2a445f43a7574ab1d80ac6a7981b62829a863064 Mon Sep 17 00:00:00 2001 From: Charlie Lin Date: Sun, 6 Jul 2025 20:32:45 -0400 Subject: [PATCH 0192/1718] BLD: update `highway` submodule to latest master Fixes #29130 --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 12b325bc1793..37c08e5528f6 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 12b325bc1793dee68ab2157995a690db859fe9e0 +Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 From 48bc85af1e8aae77f8325de0babb8bbb24cb2fab Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Mon, 7 Jul 2025 00:26:33 -0700 Subject: [PATCH 0193/1718] DOC: Clarify assert_allclose differences vs. allclose (#29325) Resolves #9988 --- numpy/testing/_private/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 5f8dfc209283..2d45fa30b8a6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1625,9 +1625,10 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. - The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note - that ``allclose`` has different default values). It compares the difference - between `actual` and `desired` to ``atol + rtol * abs(desired)``. + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. Parameters ---------- From 6a6b171ef8123c236f7afc59ff9ce3e011895ed9 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Mon, 7 Jul 2025 03:29:31 -0400 Subject: [PATCH 0194/1718] TST: Add test for non-npy files in npz and different names (#29313) * TST: Add test for non-npy files in npz and different names Retrieving `a` from an npz file will first check for a file `a`, then for `a.npy`; this checks both. * BUG: Argument to read should be int not str Old behavior is to read everything, which will happen if I pass no argument. --------- Co-authored-by: Sebastian Berg --- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/tests/test_io.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 36ead97a1aae..e9a8509fc685 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -261,7 +261,7 @@ def __getitem__(self, key): max_header_size=self.max_header_size ) else: - return bytes.read(key) + return bytes.read() def __contains__(self, key): return (key in self._files) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 48f579d7ddc7..05ef0be92211 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -7,6 +7,7 @@ import threading import time import warnings +import zipfile from ctypes import c_bool from datetime import datetime from io import BytesIO, StringIO @@ -217,6 +218,22 @@ def roundtrip(self, *args, **kwargs): self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow From d844b79201d966ae3668f12386d0a585ea766630 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:42:13 +0100 Subject: [PATCH 0195/1718] TYP: `svd` overload incorrectly noted `Literal[False]` to be the default for `compute_uv` (#29331) --- numpy/linalg/_linalg.pyi | 20 ++++++++++++++++++-- numpy/typing/tests/data/reveal/linalg.pyi | 2 ++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 68bd6d933921..e9b4cc0eb0e4 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -277,14 +277,30 @@ def svd( def svd( a: _ArrayLikeInt_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeComplex_co, full_matrices: bool = ..., - compute_uv: L[False] = ..., + *, + compute_uv: L[False], + hermitian: bool = ..., +) -> NDArray[floating]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool, + compute_uv: L[False], hermitian: bool = ..., ) -> NDArray[floating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 417fb0d8c558..663fb888f012 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -74,8 +74,10 @@ assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) assert_type(np.linalg.svd(AR_c16), SVDResult) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) From 85cb82c79c4923f1e3289f80498064f1b0c70f25 Mon Sep 17 00:00:00 2001 From: polaris-3 <214435818+polaris-3@users.noreply.github.com> Date: Mon, 7 Jul 2025 16:41:49 +0200 Subject: [PATCH 0196/1718] ENH: Improve error message in numpy.testing.assert_array_compare (#29112) Improve the error message in numpy.testing.assert_array_compare to show the position of differing elements. Currently, if there is an assertion error in functions like numpy.testing.assert_allclose, both arrays are printed. Typically the arrays are so large that they are truncated before being printed, making it hard to spot the differences. I suggest to print the indices of the differing elements as well as the corresponding values. To avoid excessive output, the number of printed out differences is limited to five. * MAINT: Different error messages for different numbers of mismatches --- .../upcoming_changes/29112.improvement.rst | 5 ++ numpy/testing/_private/utils.py | 33 ++++++++ numpy/testing/tests/test_utils.py | 83 +++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 doc/release/upcoming_changes/29112.improvement.rst diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst new file mode 100644 index 000000000000..01baa668b9fe --- /dev/null +++ b/doc/release/upcoming_changes/29112.improvement.rst @@ -0,0 +1,5 @@ +Improved error message for `assert_array_compare` +------------------------------------------------- +The error message generated by `assert_array_compare` which is used by functions +like `assert_allclose`, `assert_array_less` etc. now also includes information +about the indices at which the assertion fails. \ No newline at end of file diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 2d45fa30b8a6..78f9e9a004b1 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -574,6 +574,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): Arrays are not almost equal to 9 decimals Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) Max absolute difference among violations: 6.66669964e-09 Max relative difference among violations: 2.85715698e-09 ACTUAL: array([1. , 2.333333333]) @@ -875,6 +877,31 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): percent_mismatch = 100 * n_mismatch / n_elements remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) with errstate(all='ignore'): # ignore errors for non-numeric types @@ -1013,6 +1040,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Arrays are not equal Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) Max absolute difference among violations: 4.4408921e-16 Max relative difference among violations: 1.41357986e-16 ACTUAL: array([1. , 3.141593, nan]) @@ -1126,6 +1155,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', Arrays are not almost equal to 5 decimals Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) Max absolute difference among violations: 6.e-05 Max relative difference among violations: 2.57136612e-05 ACTUAL: array([1. , 2.33333, nan]) @@ -1247,6 +1278,8 @@ def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): Arrays are not strictly ordered `x < y` Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) Max absolute difference among violations: 0. Max relative difference among violations: 0. x: array([ 1., 1., nan]) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index fcf20091ca8e..b09df821680b 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -265,6 +265,8 @@ def test_array_vs_array_not_equal(self): b = np.array([34986, 545676, 439655, 0]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 563766\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -272,6 +274,9 @@ def test_array_vs_array_not_equal(self): a = np.array([34986, 545676, 439655.2, 563766]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '563766.\n' 'Max relative difference among violations: ' @@ -466,6 +471,8 @@ def test_closeness(self): self._assert_func([1.499999], [0.0], decimal=0) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 1.5\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -474,12 +481,16 @@ def test_closeness(self): a = [1.4999999, 0.00003] b = [1.49999991, 0] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(a, b, decimal=7) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' 'Max absolute difference among violations: 3.e-05\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -493,6 +504,8 @@ def test_simple(self): self._assert_func(x, y, decimal=4) expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' 'Max absolute difference among violations: ' '1.e-04\n' 'Max relative difference among violations: ' @@ -504,6 +517,9 @@ def test_array_vs_scalar(self): a = [5498.42354, 849.54345, 0.00] b = 5498.42354 expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 1.') @@ -511,6 +527,9 @@ def test_array_vs_scalar(self): self._assert_func(a, b, decimal=9) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: 5.4722099') @@ -519,6 +538,8 @@ def test_array_vs_scalar(self): a = [5498.42354, 0.00] expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -527,6 +548,8 @@ def test_array_vs_scalar(self): b = 0 expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: ' '5498.42354\n' 'Max relative difference among violations: inf') @@ -603,6 +626,8 @@ def all(self, *args, **kwargs): all(z) b = np.array([1., 202]).view(MyArray) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' 'Max absolute difference among violations: 200.\n' 'Max relative difference among violations: 0.99009') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -693,6 +718,10 @@ def test_error_message(self): # Test with a different amount of decimal digits expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -708,6 +737,8 @@ def test_error_message(self): # differs. Note that we only check for the formatting of the arrays # themselves. expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' 'Max absolute difference among violations: 1.e-05\n' 'Max relative difference among violations: ' '3.33328889e-06\n' @@ -720,6 +751,8 @@ def test_error_message(self): x = np.array([np.inf, 0]) y = np.array([np.inf, 1]) expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.\n' ' ACTUAL: array([inf, 0.])\n' @@ -731,6 +764,9 @@ def test_error_message(self): x = np.array([1, 2]) y = np.array([0, 0]) expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' 'Max absolute difference among violations: 2\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -742,6 +778,12 @@ def test_error_message_2(self): x = 2 y = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -750,6 +792,12 @@ def test_error_message_2(self): y = 2 x = np.ones(20) expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -838,6 +886,9 @@ def test_simple_arrays(self): b = np.array([2, 4, 6, 8]) expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' 'Max absolute difference among violations: 12\n' 'Max relative difference among violations: 1.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -849,6 +900,11 @@ def test_rank2(self): self._assert_func(x, y) expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' 'Max absolute difference among violations: 0.1\n' 'Max relative difference among violations: 0.09090909') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -867,6 +923,8 @@ def test_rank3(self): y[0, 0, 0] = 0 expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 1.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -910,12 +968,20 @@ def test_simple_items_and_array(self): y = 999090.54 expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: 0.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' 'Max absolute difference among violations: ' '999087.0864\n' 'Max relative difference among violations: ' @@ -928,12 +994,17 @@ def test_zeroes(self): y = np.array(87654.) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' 'Max absolute difference among violations: 458802.\n' 'Max relative difference among violations: 5.23423917') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' 'Max absolute difference among violations: 87654.\n' 'Max relative difference among violations: ' '5670.5626011') @@ -943,12 +1014,18 @@ def test_zeroes(self): y = 0 expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' 'Max absolute difference among violations: 546456.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): self._assert_func(x, y) expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' 'Max absolute difference among violations: 0.\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1134,12 +1211,16 @@ def test_simple(self): b = np.array([x, y, x, x]) c = np.array([x, y, x, z]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: inf') with pytest.raises(AssertionError, match=re.escape(expected_msg)): assert_allclose(b, c) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' 'Max absolute difference among violations: 0.001\n' 'Max relative difference among violations: 1.') with pytest.raises(AssertionError, match=re.escape(expected_msg)): @@ -1155,6 +1236,8 @@ def test_report_fail_percentage(self): b = np.array([1, 1, 1, 2]) expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' 'Max absolute difference among violations: 1\n' 'Max relative difference among violations: 0.5') with pytest.raises(AssertionError, match=re.escape(expected_msg)): From a63bed470e4e0e2785b1299155e8d4a34e302282 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 7 Jul 2025 15:48:03 +0100 Subject: [PATCH 0197/1718] TYP: Allow passing `dtype=None` to `trace` (#29332) --- numpy/__init__.pyi | 6 +++--- numpy/_core/fromnumeric.pyi | 6 +++--- numpy/linalg/_linalg.pyi | 2 +- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/reveal/fromnumeric.pyi | 1 + numpy/typing/tests/data/reveal/ma.pyi | 1 + 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6bcbe5b68662..d8c57c87cbbe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2395,7 +2395,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -2404,7 +2404,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -2414,7 +2414,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index ba6669936571..34849c2cc800 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -531,7 +531,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -540,7 +540,7 @@ def trace( offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -549,7 +549,7 @@ def trace( offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9b4cc0eb0e4..51844817e2dc 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -440,7 +440,7 @@ def trace( /, *, offset: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., ) -> Any: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ca9826c7d083..63dea396de66 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1177,7 +1177,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: None = ..., ) -> Any: ... @overload @@ -1186,7 +1186,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT, ) -> _ArrayT: ... @@ -1196,7 +1196,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 62bc926c765b..a2ba83e6c4c8 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -124,6 +124,7 @@ assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index c26dca0ed9ec..7c2cb9d5f05e 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -394,6 +394,7 @@ assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) From fdf5f1b5f2d5268e4908cbe068891bcbbc3b47bc Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 7 Jul 2025 11:25:41 -0600 Subject: [PATCH 0198/1718] MAINT: remove internal uses of assert_warns and suppress_warnings (#29322) * MAINT: remove internal uses of assert_warns and suppress_warnings * MAINT: adjust check for ignore filters in warnings tests * MAINT: simplify filtering per review comments * MAINT: fix warnings test * MAINT: appease linter --- numpy/_core/tests/test_api.py | 3 +- numpy/_core/tests/test_arrayprint.py | 3 +- numpy/_core/tests/test_datetime.py | 21 +-- numpy/_core/tests/test_einsum.py | 6 +- numpy/_core/tests/test_indexing.py | 5 +- numpy/_core/tests/test_mem_policy.py | 4 +- numpy/_core/tests/test_memmap.py | 7 +- numpy/_core/tests/test_multiarray.py | 51 +++---- numpy/_core/tests/test_nditer.py | 13 +- numpy/_core/tests/test_regression.py | 9 +- numpy/_core/tests/test_scalar_ctors.py | 6 +- numpy/_core/tests/test_scalarmath.py | 28 ++-- numpy/_core/tests/test_ufunc.py | 9 +- numpy/_core/tests/test_umath.py | 29 ++-- numpy/distutils/tests/test_exec_command.py | 12 +- numpy/lib/_nanfunctions_impl.py | 1 - numpy/lib/_scimath_impl.py | 6 +- numpy/lib/tests/test_format.py | 5 +- numpy/lib/tests/test_function_base.py | 16 +-- numpy/lib/tests/test_histograms.py | 16 +-- numpy/lib/tests/test_io.py | 23 ++-- numpy/lib/tests/test_nanfunctions.py | 59 ++++---- numpy/lib/tests/test_stride_tricks.py | 5 +- numpy/linalg/tests/test_deprecations.py | 11 +- numpy/linalg/tests/test_linalg.py | 12 +- numpy/ma/tests/test_core.py | 137 +++++++++---------- numpy/ma/tests/test_deprecations.py | 7 +- numpy/ma/tests/test_extras.py | 68 ++++----- numpy/ma/tests/test_regression.py | 14 +- numpy/polynomial/tests/test_polynomial.py | 5 +- numpy/random/tests/test_generator_mt19937.py | 12 +- numpy/random/tests/test_random.py | 17 +-- numpy/random/tests/test_randomstate.py | 45 +++--- numpy/tests/test_reloading.py | 6 +- numpy/tests/test_warnings.py | 9 +- 35 files changed, 318 insertions(+), 362 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index da4a8f423bc5..c2e5bf8909f9 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -12,7 +12,6 @@ assert_array_equal, assert_equal, assert_raises, - assert_warns, ) @@ -315,7 +314,7 @@ def test_object_array_astype_to_void(): def test_array_astype_warning(t): # test ComplexWarning when casting from complex to float or int a = np.array(10, dtype=np.complex128) - assert_warns(np.exceptions.ComplexWarning, a.astype, t) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) @pytest.mark.parametrize(["dtype", "out_dtype"], [(np.bytes_, np.bool), diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 1fd4ac2fddb7..a054f408e954 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import run_threaded @@ -736,7 +735,7 @@ def test_0d_arrays(self): assert_equal(str(x), "1") # check `style` arg raises - assert_warns(DeprecationWarning, np.array2string, + pytest.warns(DeprecationWarning, np.array2string, np.array(1.), style=repr) # but not in legacy mode np.array2string(np.array(1.), style=repr, legacy='1.13') diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c5d2ca459e6a..a10ca15bc373 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +import warnings from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -13,8 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) try: @@ -1282,8 +1281,9 @@ def test_datetime_multiply(self): assert_raises(TypeError, np.multiply, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in multiply") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in multiply", RuntimeWarning) nat = np.timedelta64('NaT') def check(a, b, res): @@ -1344,7 +1344,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): np.timedelta64(-1)), ]) def test_timedelta_floor_div_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = op1 // op2 assert_equal(actual, 0) assert_equal(actual.dtype, np.int64) @@ -1428,9 +1428,9 @@ def test_timedelta_divmod_typeerror(self, op1, op2): np.timedelta64(-1)), ]) def test_timedelta_divmod_warnings(self, op1, op2): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): expected = (op1 // op2, op1 % op2) - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = divmod(op1, op2) assert_equal(actual, expected) @@ -1482,8 +1482,9 @@ def test_datetime_divide(self): assert_raises(TypeError, np.divide, 1.5, dta) # NaTs - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, r".*encountered in divide") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', r".*encountered in divide", RuntimeWarning) nat = np.timedelta64('NaT') for tp in (int, float): assert_equal(np.timedelta64(1) / tp(0), nat) @@ -2045,7 +2046,7 @@ def test_timedelta_modulus_error(self, val1, val2): @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_timedelta_modulus_div_by_zero(self): - with assert_warns(RuntimeWarning): + with pytest.warns(RuntimeWarning): actual = np.timedelta64(10, 's') % np.timedelta64(0, 's') assert_equal(actual, np.timedelta64('NaT')) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0bd180b5e41f..84d4af1707b6 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1,4 +1,5 @@ import itertools +import warnings import pytest @@ -11,7 +12,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Setup for optimize einsum @@ -455,8 +455,8 @@ def check_einsum_sums(self, dtype, do_opt=False): np.outer(a, b)) # Suppress the complex warnings for the 'as f8' tests - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector for n in range(1, 17): diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 81ba85ea4648..2a8a669d0787 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -784,12 +783,12 @@ def test_boolean_index_cast_assign(self): assert_equal(zero_array[0, 1], 1) # Fancy indexing works, although we get a cast warning. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. - assert_warns(ComplexWarning, + pytest.warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index b9f971e73249..70befb8bd324 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -9,7 +9,7 @@ import numpy as np from numpy._core.multiarray import get_handler_name -from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture @@ -432,7 +432,7 @@ def test_switch_owner(get_module, policy): # The policy should be NULL, so we have to assume we can call # "free". A warning is given if the policy == "1" if policy: - with assert_warns(RuntimeWarning) as w: + with pytest.warns(RuntimeWarning) as w: del a gc.collect() else: diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index cbd825205844..49931e1680e8 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,6 +1,7 @@ import mmap import os import sys +import warnings from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile @@ -26,7 +27,6 @@ assert_array_equal, assert_equal, break_cycles, - suppress_warnings, ) @@ -167,8 +167,9 @@ def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data - with suppress_warnings() as sup: - sup.filter(FutureWarning, "np.average currently does not preserve") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) for unary_op in [sum, average, prod]: result = unary_op(fp) assert_(isscalar(result)) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4ee021d18f6f..930c736c6076 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -48,11 +48,9 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, break_cycles, check_support_sve, runstring, - suppress_warnings, temppath, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -3805,11 +3803,11 @@ def test__complex__(self): ap = complex(a) assert_equal(ap, a, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): bp = complex(b) assert_equal(bp, b, msg) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): cp = complex(c) assert_equal(cp, c, msg) @@ -3833,7 +3831,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(TypeError, complex, e) class TestCequenceMethods: @@ -4932,9 +4930,11 @@ class TestArgmax: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) val = np.max(arr) assert_equal(np.argmax(arr), pos, err_msg=f"{arr!r}") @@ -5074,9 +5074,11 @@ class TestArgmin: @pytest.mark.parametrize('data', nan_arr) def test_combinations(self, data): arr, pos = data - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) min_val = np.min(arr) assert_equal(np.argmin(arr), pos, err_msg=f"{arr!r}") @@ -7268,8 +7270,8 @@ def test_out_arg(self): out = np.zeros((5, 2), dtype=np.complex128) c = self.matmul(a, b, out=out) assert_(c is out) - with suppress_warnings() as sup: - sup.filter(ComplexWarning, '') + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) c = c.astype(tgt.dtype) assert_array_equal(c, tgt) @@ -8816,8 +8818,9 @@ def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "Assigning the 'data' attribute") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "Assigning the 'data' attribute", DeprecationWarning) for s in attr: assert_raises(AttributeError, delattr, a, s) @@ -9071,9 +9074,9 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([1])), 1) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_equal(int_func(np.array([[42]])), 42) assert_raises(TypeError, int_func, np.array([1, 2])) @@ -9087,7 +9090,7 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): assert_raises(NotImplementedError, int_func, np.array([NotConvertible()])) @@ -9677,12 +9680,10 @@ def test_view_assign(self): @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - arr = np.arange(9).reshape(3, 3) - v = arr.T + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) - assert len(sup.log) == 1 def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( @@ -10202,8 +10203,8 @@ def test_strided_loop_alignments(self): xf128 = _aligned_zeros(3, np.longdouble, align=align) # test casting, both to and from misaligned - with suppress_warnings() as sup: - sup.filter(ComplexWarning, "Casting complex values") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) xc64.astype('f8') xf64.astype(np.complex64) test = xc64 + xf64 diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index f71130f16331..f0e10333808c 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,6 +1,7 @@ import subprocess import sys import textwrap +import warnings import pytest @@ -15,7 +16,6 @@ assert_array_equal, assert_equal, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -1899,8 +1899,8 @@ def test_iter_buffered_cast_byteswapped(): assert_equal(a, 2 * np.arange(10, dtype='f4')) - with suppress_warnings() as sup: - sup.filter(np.exceptions.ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) a = np.arange(10, dtype='f8') a = a.view(a.dtype.newbyteorder()).byteswap() @@ -3310,13 +3310,10 @@ def test_warn_noclose(): a = np.arange(6, dtype='f4') au = a.byteswap() au = au.view(au.dtype.newbyteorder()) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): it = np.nditer(au, [], [['readwrite', 'updateifcopy']], - casting='equiv', op_dtypes=[np.dtype('f4')]) + casting='equiv', op_dtypes=[np.dtype('f4')]) del it - assert len(sup.log) == 1 - @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index d9a3a529c9ea..2aeb29a1320d 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -3,6 +3,7 @@ import pickle import sys import tempfile +import warnings from io import BytesIO from itertools import chain from os import path @@ -27,8 +28,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) from numpy.testing._private.utils import _no_tracing, requires_memory @@ -1619,9 +1618,9 @@ def test_fromfile_tofile_seeks(self): def test_complex_scalar_warning(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1 + 2j) - assert_warns(ComplexWarning, float, x) - with suppress_warnings() as sup: - sup.filter(ComplexWarning) + pytest.warns(ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) assert_equal(float(x), float(x.real)) def test_complex_scalar_complex_cast(self): diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index ea78cd9d9f51..05ede01b5973 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -4,7 +4,7 @@ import pytest import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, assert_warns +from numpy.testing import assert_almost_equal, assert_equal class TestFromString: @@ -25,7 +25,7 @@ def test_floating_overflow(self): assert_equal(fsingle, np.inf) fdouble = np.double('1e10000') assert_equal(fdouble, np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '1e10000') assert_equal(flongdouble, np.inf) fhalf = np.half('-1e10000') @@ -34,7 +34,7 @@ def test_floating_overflow(self): assert_equal(fsingle, -np.inf) fdouble = np.double('-1e10000') assert_equal(fdouble, -np.inf) - flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + flongdouble = pytest.warns(RuntimeWarning, np.longdouble, '-1e10000') assert_equal(flongdouble, -np.inf) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 746b410f79d2..8c24b4cfdc88 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -23,7 +23,6 @@ assert_equal, assert_raises, check_support_sve, - suppress_warnings, ) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -369,12 +368,7 @@ def test_float_modulus_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in remainder") - sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") - sup.filter(RuntimeWarning, "invalid value encountered in divmod") + with warnings.catch_warnings(), np.errstate(all='ignore'): for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -522,21 +516,17 @@ def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) assert_raises(OverflowError, int, x) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") def test_int_from_infinite_longdouble___int__(self): x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - with suppress_warnings() as sup: - sup.record(ComplexWarning) + with pytest.warns(ComplexWarning): x = np.clongdouble(np.inf) assert_raises(OverflowError, x.__int__) - assert_equal(len(sup.log), 1) @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @@ -731,8 +721,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] if dt in np.typecodes['UnsignedInteger']: @@ -749,8 +739,8 @@ def test_exceptions(self): def test_result(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for dt in types: a = np.ones((), dtype=dt)[()] assert_equal(operator.sub(a, a), 0) @@ -771,8 +761,8 @@ def _test_abs_func(self, absfunc, test_dtype): x = test_dtype(np.finfo(test_dtype).max) assert_equal(absfunc(x), x.real) - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) x = test_dtype(np.finfo(test_dtype).tiny) assert_equal(absfunc(x), x.real) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 836be1245399..b86e22917734 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -26,7 +26,6 @@ assert_equal, assert_no_warnings, assert_raises, - suppress_warnings, ) from numpy.testing._private.utils import requires_memory @@ -686,8 +685,8 @@ def test_true_divide(self): tgt = float(x) / float(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: @@ -706,8 +705,8 @@ def test_true_divide(self): tgt = complex(x) / complex(y) rtol = max(np.finfo(dtout).resolution, 1e-15) # The value of tiny for double double is NaN - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(dtout).tiny): atol = max(np.finfo(dtout).tiny, 3e-308) else: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index b5145e9d642b..d8ed56c31b93 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -30,7 +30,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - suppress_warnings, ) from numpy.testing._private.utils import _glibc_older_than @@ -703,8 +702,8 @@ def test_floor_division_corner_cases(self, dtype): fone = np.array(1.0, dtype=dtype) fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -859,9 +858,9 @@ def test_float_divmod_corner_cases(self): fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in divmod") - sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -898,9 +897,9 @@ def test_float_remainder_corner_cases(self): assert_(rem >= -b, f'dt: {dt}') # Check nans, inf - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in remainder") - sup.filter(RuntimeWarning, "invalid value encountered in fmod") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -2957,9 +2956,11 @@ def test_minmax_blocked(self): inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan emsg = lambda: f'{inp!r}\n{msg}' - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, - "invalid value encountered in reduce") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) assert_(np.isnan(inp.max()), msg=emsg) assert_(np.isnan(inp.min()), msg=emsg) @@ -4603,8 +4604,8 @@ def test_nextafter_0(): for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): # The value of tiny for double double is NaN, so we need to pass the # assert - with suppress_warnings() as sup: - sup.filter(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) if not np.isnan(np.finfo(t).tiny): tiny = np.finfo(t).tiny assert_( diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py index d1a20056a5a2..749523528e63 100644 --- a/numpy/distutils/tests/test_exec_command.py +++ b/numpy/distutils/tests/test_exec_command.py @@ -5,7 +5,7 @@ from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, assert_warns, IS_WASM +from numpy.testing import tempdir, assert_, IS_WASM # In python 3 stdout, stderr are text (unicode compliant) devices, so to @@ -68,7 +68,7 @@ def test_exec_command_stdout(): # Test posix version: with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -76,14 +76,14 @@ def test_exec_command_stdout(): with emulate_nonposix(): with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") def test_exec_command_stderr(): # Test posix version: with redirect_stdout(TemporaryFile(mode='w+')): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") if os.name == 'posix': @@ -91,7 +91,7 @@ def test_exec_command_stderr(): with emulate_nonposix(): with redirect_stdout(TemporaryFile()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): exec_command.exec_command("cd '.'") @@ -206,7 +206,7 @@ def check_execute_in(self, **kws): def test_basic(self): with redirect_stdout(StringIO()): with redirect_stderr(StringIO()): - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): if os.name == "posix": self.check_posix(use_tee=0) self.check_posix(use_tee=1) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 4a01490301c8..aec60d484ba4 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -717,7 +717,6 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, inf >>> np.nansum([1, np.nan, -np.inf]) -inf - >>> from numpy.testing import suppress_warnings >>> with np.errstate(invalid="ignore"): ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present np.float64(nan) diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 8136a7d54515..b33f42b3d10d 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -628,9 +628,9 @@ def arctanh(x): >>> np.emath.arctanh(0.5) 0.5493061443340549 - >>> from numpy.testing import suppress_warnings - >>> with suppress_warnings() as sup: - ... sup.filter(RuntimeWarning) + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index cf11dd25dbda..a6de6238d269 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -291,7 +291,6 @@ assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, ) from numpy.testing._private.utils import requires_memory @@ -1008,7 +1007,7 @@ def test_unicode_field_names(tmpdir): # notifies the user that 3.0 is selected with open(fname, 'wb') as f: - with assert_warns(UserWarning): + with pytest.warns(UserWarning): format.write_array(f, arr, version=None) def test_header_growth_axis(): @@ -1041,7 +1040,7 @@ def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() - with assert_warns(UserWarning): + with pytest.warns(UserWarning): np.save(buf, arr) buf.seek(0) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index eccf4bcfb019..37f8bbadc31a 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -61,8 +61,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, - suppress_warnings, ) np_floats = [np.half, np.single, np.double, np.longdouble] @@ -2473,10 +2471,10 @@ def test_simple(self): def test_ddof(self): # ddof raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + warnings.simplefilter('ignore', DeprecationWarning) # ddof has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) @@ -2485,11 +2483,11 @@ def test_ddof(self): def test_bias(self): # bias raises DeprecationWarning - with suppress_warnings() as sup: + with warnings.catch_warnings(): warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) - sup.filter(DeprecationWarning) + pytest.warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + pytest.warns(DeprecationWarning, corrcoef, self.A, bias=0) + warnings.simplefilter('ignore', DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(self.A, bias=1), self.res1) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 4ba953f462fc..fec8828d242e 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,3 +1,5 @@ +import warnings + import pytest import numpy as np @@ -12,7 +14,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) @@ -138,11 +139,9 @@ def test_bool_conversion(self): # Should raise an warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs - with suppress_warnings() as sup: - rec = sup.record(RuntimeWarning, 'Converting input from .*') + with pytest.warns(RuntimeWarning, match='Converting input from .*'): hist, edges = np.histogram([True, True, False]) # A warning should be issued - assert_equal(len(rec), 1) assert_array_equal(hist, int_hist) assert_array_equal(edges, int_edges) @@ -284,9 +283,8 @@ def test_some_nan_values(self): all_nan = np.array([np.nan, np.nan]) # the internal comparisons with NaN give warnings - sup = suppress_warnings() - sup.filter(RuntimeWarning) - with sup: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) # can't infer range with nan assert_raises(ValueError, histogram, one_nan, bins='auto') assert_raises(ValueError, histogram, all_nan, bins='auto') @@ -618,9 +616,9 @@ def test_integer(self, bins): """ Test that bin width for integer data is at least 1. """ - with suppress_warnings() as sup: + with warnings.catch_warnings(): if bins == 'stone': - sup.filter(RuntimeWarning) + warnings.simplefilter('ignore', RuntimeWarning) assert_equal( np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), np.arange(9)) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 05ef0be92211..a1698f240bff 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -36,9 +36,7 @@ assert_no_warnings, assert_raises, assert_raises_regex, - assert_warns, break_cycles, - suppress_warnings, tempdir, temppath, ) @@ -336,8 +334,9 @@ def test_closing_fid(self): # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. - with suppress_warnings() as sup: - sup.filter(ResourceWarning) # TODO: specify exact message + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): try: np.load(tmp)["data"] @@ -1446,8 +1445,8 @@ def test_skip_footer(self): assert_equal(test, ctrl) def test_skip_footer_with_invalid(self): - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' # Footer too small to get rid of all invalid values assert_raises(ValueError, np.genfromtxt, @@ -1845,8 +1844,8 @@ def test_usecols_with_named_columns(self): def test_empty_file(self): # Test that an empty file raises the proper warning. - with suppress_warnings() as sup: - sup.filter(message="genfromtxt: Empty input file:") + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") data = TextIO() test = np.genfromtxt(data) assert_equal(test, np.array([])) @@ -1991,7 +1990,7 @@ def test_invalid_raise(self): def f(): return np.genfromtxt(mdata, invalid_raise=False, **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) # @@ -2012,7 +2011,7 @@ def test_invalid_raise_with_usecols(self): def f(): return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) - mtest = assert_warns(ConversionWarning, f) + mtest = pytest.warns(ConversionWarning, f) assert_equal(len(mtest), 45) assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) # @@ -2424,8 +2423,8 @@ def test_max_rows(self): assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) # Test with invalid not raise - with suppress_warnings() as sup: - sup.filter(ConversionWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 89a6d1f95fed..447b84db3edc 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -15,7 +15,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) # Test data @@ -281,8 +280,9 @@ def test_mutation(self): def test_result_values(self): for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): for row in _ndat: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "invalid value encountered in") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) ind = f(row) val = row[ind] # comparing with NaN is tricky as the result @@ -491,10 +491,10 @@ def test_dtype_from_dtype(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type assert_(res is tgt) @@ -508,10 +508,10 @@ def test_dtype_from_char(self): codes = 'efdgFDG' for nf, rf in zip(self.nanfuncs, self.stdfuncs): for c in codes: - with suppress_warnings() as sup: + with warnings.catch_warnings(): if nf in {np.nanstd, np.nanvar} and c in 'FDG': # Giving the warning is a small bug, see gh-8000 - sup.filter(ComplexWarning) + warnings.simplefilter('ignore', ComplexWarning) tgt = rf(mat, dtype=c, axis=1).dtype.type res = nf(mat, dtype=c, axis=1).dtype.type assert_(res is tgt) @@ -723,22 +723,22 @@ def test_ddof(self): res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) - def test_ddof_too_big(self): + def test_ddof_too_big(self, recwarn): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - sup.filter(ComplexWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) - if any(tgt): - assert_(len(sup.log) == 1) - else: - assert_(len(sup.log) == 0) + if any(tgt): + assert_(len(recwarn) == 1) + recwarn.pop(RuntimeWarning) + else: + assert_(len(recwarn) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @@ -860,8 +860,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanmedian(d, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanmedian(d, axis=(0, 1), keepdims=True) @@ -946,17 +946,15 @@ def test_result_values(self): @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): mat = np.full((3, 3), np.nan).astype(dtype) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) - + with pytest.warns(RuntimeWarning) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() if axis is None: - assert_(len(sup.log) == 1) + assert_(len(r) == 1) else: - assert_(len(sup.log) == 3) + assert_(len(r) == 3) # Check scalar scalar = np.array(np.nan).astype(dtype)[()] @@ -965,9 +963,9 @@ def test_allnans(self, dtype, axis): assert np.isnan(output_scalar) if axis is None: - assert_(len(sup.log) == 2) + assert_(len(r) == 2) else: - assert_(len(sup.log) == 4) + assert_(len(r) == 4) def test_empty(self): mat = np.zeros((0, 3)) @@ -995,8 +993,8 @@ def test_extended_axis_invalid(self): assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) def test_float_special(self): - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) for inf in [np.inf, -np.inf]: a = np.array([[inf, np.nan], [np.nan, np.nan]]) assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) @@ -1063,8 +1061,8 @@ def test_keepdims(self): w = np.random.random((4, 200)) * np.array(d.shape)[:, None] w = w.astype(np.intp) d[tuple(w)] = np.nan - with suppress_warnings() as sup: - sup.filter(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) res = np.nanpercentile(d, 90, axis=None, keepdims=True) assert_equal(res.shape, (1, 1, 1, 1)) res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) @@ -1233,8 +1231,9 @@ def test_multiple_percentiles(self): large_mat[:, :, 3:] *= 2 for axis in [None, 0, 1]: for keepdim in [False, True]: - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "All-NaN slice encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) nan_val = np.nanpercentile(nan_mat, perc, axis=axis, keepdims=keepdim) diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fe40c953a147..fb654b4cfb85 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -16,7 +16,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -593,9 +592,9 @@ def test_writeable(): for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version if array_is_broadcast: - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): assert_equal(result.flags.writeable, True) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): result[:] = 0 # Warning not emitted, writing to the array resets it assert_equal(result.flags.writeable, True) diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py index cd4c10832e7e..7fb5008f1ff8 100644 --- a/numpy/linalg/tests/test_deprecations.py +++ b/numpy/linalg/tests/test_deprecations.py @@ -1,8 +1,9 @@ """Test deprecation and future warnings. """ +import pytest + import numpy as np -from numpy.testing import assert_warns def test_qr_mode_full_future_warning(): @@ -14,7 +15,7 @@ def test_qr_mode_full_future_warning(): """ a = np.eye(2) - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') - assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cbf7dd63be5e..8ad1c3ed6d16 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -8,6 +8,7 @@ import textwrap import threading import traceback +import warnings import pytest @@ -42,7 +43,6 @@ assert_equal, assert_raises, assert_raises_regex, - suppress_warnings, ) try: @@ -1318,8 +1318,9 @@ def test_vector_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 0.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) @@ -1481,8 +1482,9 @@ def test_matrix_return_type(self): self.check_dtype(at, an) assert_almost_equal(an, 2.0) - with suppress_warnings() as sup: - sup.filter(RuntimeWarning, "divide by zero encountered") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 1.0) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 091ba6c99fff..2704857308a3 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -139,32 +139,24 @@ assert_not_equal, fail_if_equal, ) -from numpy.testing import ( - IS_WASM, - assert_raises, - assert_warns, - suppress_warnings, - temppath, -) +from numpy.testing import IS_WASM, assert_raises, temppath from numpy.testing._private.utils import requires_memory pi = np.pi -suppress_copy_mask_on_assignment = suppress_warnings() -suppress_copy_mask_on_assignment.filter( - numpy.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a shared mask will not copy") - - # For parametrized numeric testing num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] num_ids = [dt_.char for dt_ in num_dts] +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" class TestMaskedArray: # Base test class for MaskedArrays. + # message for warning filters def setup_method(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) @@ -492,7 +484,7 @@ def test_setitem_no_warning(self): x[...] = value x[[0, 1, 2]] = value - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_copy(self): # Tests of some subtle points of copying and sizing. n = [0, 0, 1, 0, 0] @@ -511,9 +503,9 @@ def test_copy(self): y1a = array(y1) # Default for masked array is not to copy; see gh-10318. assert_(y1a._data.__array_interface__ == - y1._data.__array_interface__) + y1._data.__array_interface__) assert_(y1a._mask.__array_interface__ == - y1._mask.__array_interface__) + y1._mask.__array_interface__) y2 = array(x1, mask=m3) assert_(y2._data.__array_interface__ == x1.__array_interface__) @@ -612,7 +604,7 @@ def test_format(self): # assert_equal(format(masked, " <5"), "-- ") # Expect a FutureWarning for using format_spec with MaskedElement - with assert_warns(FutureWarning): + with pytest.warns(FutureWarning): with_format_string = format(masked, " >5") assert_equal(with_format_string, "--") @@ -793,8 +785,9 @@ def test_topython(self): assert_equal(1.0, float(array([[1]]))) assert_raises(TypeError, float, array([1, 1])) - with suppress_warnings() as sup: - sup.filter(UserWarning, 'Warning: converting a masked element') + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) @@ -847,14 +840,17 @@ def test_oddfeatures_2(self): assert_(z[1] is not masked) assert_(z[2] is masked) - @suppress_copy_mask_on_assignment def test_oddfeatures_3(self): - # Tests some generic features - atest = array([10], mask=True) - btest = array([20]) - idx = atest.mask - atest[idx] = btest[idx] - assert_equal(atest, [20]) + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) def test_filled_with_object_dtype(self): a = np.ma.masked_all(1, dtype='O') @@ -2004,6 +2000,7 @@ def test_comparisons_strings(self, op, fill): ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") def test_eq_with_None(self): # Really, comparisons with None should not be done, but check them # anyway. Note that pep8 will flag these tests. @@ -2011,23 +2008,21 @@ def test_eq_with_None(self): # test will fail (and have to be changed accordingly). # With partial mask - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Comparison to `None`") - a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 - assert_equal(a.data == None, [True, False]) # noqa: E711 - assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 - # With nomask - a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) # noqa: E711 - assert_equal(a != None, [False, True]) # noqa: E711 - # With complete mask - a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 - assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 - # Fully masked, even comparison to None should return "masked" - a = masked - assert_equal(a == None, masked) # noqa: E711 + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -2383,16 +2378,15 @@ def test_fillvalue(self): assert_equal(x.fill_value, 999.) assert_equal(x._fill_value, np.array(999.)) + @pytest.mark.filterwarnings("ignore:.*Numpy has detected.*:FutureWarning") def test_subarray_fillvalue(self): # gh-10483 test multi-field index fill value fields = array([(1, 1, 1)], dtype=[('i', int), ('s', '|S8'), ('f', float)]) - with suppress_warnings() as sup: - sup.filter(FutureWarning, "Numpy has detected") - subfields = fields[['i', 'f']] - assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) - # test comparison does not raise: - subfields[1:] == subfields[:-1] + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] def test_fillvalue_exotic_dtype(self): # Tests yet more exotic flexible dtypes @@ -3138,14 +3132,14 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) - + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3159,28 +3153,29 @@ def test_inplace_division_scalar_type(self): try: x /= t(2) assert_equal(x, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= t(2) assert_equal(xm, y) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_division_array_type(self): # Test of inplace division - for t in self.othertypes: - with suppress_warnings() as sup: - sup.record(UserWarning) + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in self.othertypes: (x, y, xm) = (_.astype(t) for _ in self.uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked + nwarns = 0 # May get a DeprecationWarning or a TypeError. # @@ -3194,8 +3189,8 @@ def test_inplace_division_array_type(self): try: x /= a assert_equal(x, y / a) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 try: xm /= a assert_equal(xm, y / a) @@ -3203,13 +3198,13 @@ def test_inplace_division_array_type(self): xm.mask, mask_or(mask_or(m, a.mask), (a == t(0))) ) - except (DeprecationWarning, TypeError) as e: - warnings.warn(str(e), stacklevel=1) + except (DeprecationWarning, TypeError): + nwarns += 1 if issubclass(t, np.integer): - assert_equal(len(sup.log), 2, f'Failed on type={t}.') + assert_equal(nwarns, 2, f'Failed on type={t}.') else: - assert_equal(len(sup.log), 0, f'Failed on type={t}.') + assert_equal(nwarns, 0, f'Failed on type={t}.') def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power @@ -3508,7 +3503,7 @@ def test_ones(self): b = a.view() assert_(np.may_share_memory(a.mask, b.mask)) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_put(self): # Tests put. d = arange(5) @@ -4226,7 +4221,7 @@ def test_varstd(self): assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) - @suppress_copy_mask_on_assignment + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) def test_varstd_specialcases(self): # Test a special case for var nout = np.array(-1, dtype=float) @@ -5582,7 +5577,7 @@ def test_coercion_int(self): def test_coercion_float(self): a_f = np.zeros((), float) - assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) assert_(np.isnan(a_f[()])) @pytest.mark.xfail(reason="See gh-9750") diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 8cc8b9c72bb9..a2c98d0c229d 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -9,7 +9,6 @@ import numpy as np from numpy.ma.core import MaskedArrayFutureWarning from numpy.ma.testutils import assert_equal -from numpy.testing import assert_warns class TestArgsort: @@ -23,7 +22,7 @@ def _test_base(self, argsort, cls): # argsort has a bad default for >1d arrays arr_2d = np.array([[1, 2], [3, 4]]).view(cls) - result = assert_warns( + result = pytest.warns( np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) assert_equal(result, argsort(arr_2d, axis=None)) @@ -53,10 +52,10 @@ def test_axis_default(self): ma_max = np.ma.maximum.reduce # check that the default axis is still None, but warns on 2d arrays - result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) assert_equal(result, ma_max(data2d, axis=None)) - result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) assert_equal(result, ma_min(data2d, axis=None)) # no warnings on 1d, as both new and old defaults are equivalent diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 3d10e839cbc9..34a032087536 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -68,7 +68,6 @@ assert_array_equal, assert_equal, ) -from numpy.testing import assert_warns, suppress_warnings class TestGeneric: @@ -746,7 +745,7 @@ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) - with assert_warns(DeprecationWarning): + with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) @@ -1287,19 +1286,14 @@ def test_special(self): def test_empty(self): # empty arrays a = np.ma.masked_array(np.array([], dtype=float)) - with suppress_warnings() as w: - w.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # multiple dimensions a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) # no axis - with suppress_warnings() as w: - w.record(RuntimeWarning) - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_array_equal(np.ma.median(a), np.nan) - assert_(w.log[0].category is RuntimeWarning) # axis 0 and 1 b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) @@ -1308,10 +1302,8 @@ def test_empty(self): # axis 2 b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) + with pytest.warns(RuntimeWarning): assert_equal(np.ma.median(a, axis=2), b) - assert_(w[0].category is RuntimeWarning) def test_object(self): o = np.ma.masked_array(np.arange(7.)) @@ -1418,10 +1410,13 @@ def test_ddof(self): x, y = self.data, self.data2 expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, ddof=-1) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) + # ddof has no or negligible effect on the function assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) assert_almost_equal(corrcoef(x, ddof=-1), expected) @@ -1433,12 +1428,16 @@ def test_bias(self): x, y = self.data, self.data2 expected = np.corrcoef(x) # bias raises DeprecationWarning - with suppress_warnings() as sup: - warnings.simplefilter("always") - assert_warns(DeprecationWarning, corrcoef, x, y, True, False) - assert_warns(DeprecationWarning, corrcoef, x, y, True, True) - assert_warns(DeprecationWarning, corrcoef, x, bias=False) - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, False) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, True, True) + with pytest.warns(DeprecationWarning): + corrcoef(x, y, bias=False) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # bias has no or negligible effect on the function assert_almost_equal(corrcoef(x, bias=1), expected) @@ -1448,8 +1447,9 @@ def test_1d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1459,8 +1459,9 @@ def test_2d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) @@ -1473,8 +1474,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) try: @@ -1486,8 +1488,9 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1], bias=1)) @@ -1503,8 +1506,9 @@ def test_2d_with_missing(self): test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) # ddof and bias have no or negligible effect on the function assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], control[:-1, :-1]) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 025387ba454c..2a08234cba61 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,10 +1,7 @@ +import warnings + import numpy as np -from numpy.testing import ( - assert_, - assert_allclose, - assert_array_equal, - suppress_warnings, -) +from numpy.testing import assert_, assert_allclose, assert_array_equal class TestRegression: @@ -67,8 +64,9 @@ def test_ddof_corrcoef(self): x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) y = np.array([2, 2.5, 3.1, 3, 5]) # this test can be removed after deprecation. - with suppress_warnings() as sup: - sup.filter(DeprecationWarning, "bias and ddof have no effect") + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "bias and ddof have no effect", DeprecationWarning) r0 = np.ma.corrcoef(x, y, ddof=0) r1 = np.ma.corrcoef(x, y, ddof=1) # ddof should not have an effect (it gets cancelled out) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 8bfa3c184cf7..5c8e85c7f860 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -6,6 +6,8 @@ from fractions import Fraction from functools import reduce +import pytest + import numpy as np import numpy.polynomial.polynomial as poly import numpy.polynomial.polyutils as pu @@ -16,7 +18,6 @@ assert_equal, assert_raises, assert_raises_regex, - assert_warns, ) @@ -656,7 +657,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with assert_warns(pu.RankWarning): + with pytest.warns(pu.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index d09cbba4ec39..50c232d4a8e7 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,6 +1,7 @@ import hashlib import os.path import sys +import warnings import pytest @@ -17,8 +18,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) random = Generator(MT19937()) @@ -1463,8 +1462,8 @@ def test_multivariate_normal(self, method): # Check that non positive-semidefinite covariance warns with # RuntimeWarning cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, method='eigh') assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method='cholesky') @@ -1491,10 +1490,9 @@ def test_multivariate_normal(self, method): method='cholesky') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter("error") random.multivariate_normal(mean, cov, method=method) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index d5981906f6ef..4eb455eb77be 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -13,8 +13,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) @@ -331,10 +329,8 @@ def test_randint(self): def test_random_integers(self): np.random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -346,11 +342,9 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) @@ -795,7 +789,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(np.random.multivariate_normal, mean, cov, @@ -806,10 +800,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error') np.random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 def test_negative_binomial(self): np.random.seed(self.seed) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index cf4488543c12..c56d3c0f186c 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -16,8 +16,6 @@ assert_equal, assert_no_warnings, assert_raises, - assert_warns, - suppress_warnings, ) INT_FUNCS = {'binomial': (100.0, 0.6), @@ -242,12 +240,10 @@ def test_negative_binomial(self): def test_get_state_warning(self): rs = random.RandomState(PCG64()) - with suppress_warnings() as sup: - w = sup.record(RuntimeWarning) + with pytest.warns(RuntimeWarning): state = rs.get_state() - assert_(len(w) == 1) - assert isinstance(state, dict) - assert state['bit_generator'] == 'PCG64' + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): state = self.random_state.get_state() @@ -487,20 +483,16 @@ def test_randint(self): def test_random_integers(self): random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(-99, 99, size=(3, 2)) - assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) random.seed(self.seed) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(198, size=(3, 2)) - assert_(len(w) == 1) assert_array_equal(actual, desired + 100) def test_tomaxint(self): @@ -529,20 +521,16 @@ def test_random_integers_max_int(self): # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): actual = random.random_integers(np.iinfo('l').max, np.iinfo('l').max) - assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) - with suppress_warnings() as sup: - w = sup.record(DeprecationWarning) + with pytest.warns(DeprecationWarning): typer = np.dtype('l').type actual = random.random_integers(typer(np.iinfo('l').max), typer(np.iinfo('l').max)) - assert_(len(w) == 1) assert_equal(actual, desired) def test_random_integers_deprecated(self): @@ -879,8 +867,8 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [1.1] * 10) assert_raises(ValueError, random.geometric, -0.1) assert_raises(ValueError, random.geometric, [-0.1] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.geometric, np.nan) assert_raises(ValueError, random.geometric, [np.nan] * 10) @@ -1012,7 +1000,7 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(random.multivariate_normal, mean, cov, @@ -1023,10 +1011,9 @@ def test_multivariate_normal(self): check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) - with suppress_warnings() as sup: + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) random.multivariate_normal(mean, cov) - w = sup.record(RuntimeWarning) - assert len(w) == 0 mu = np.zeros(2) cov = np.eye(2) @@ -1048,8 +1035,8 @@ def test_negative_binomial(self): assert_array_equal(actual, desired) def test_negative_binomial_exceptions(self): - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.negative_binomial, 100, np.nan) assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10) @@ -1131,8 +1118,8 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [lamneg] * 10) assert_raises(ValueError, random.poisson, lambig) assert_raises(ValueError, random.poisson, [lambig] * 10) - with suppress_warnings() as sup: - sup.record(RuntimeWarning) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) assert_raises(ValueError, random.poisson, np.nan) assert_raises(ValueError, random.poisson, [np.nan] * 10) diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 9787bcbbc101..b70a715237a5 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -7,7 +7,7 @@ import pytest import numpy.exceptions as ex -from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises, assert_warns +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises def test_numpy_reloading(): @@ -19,14 +19,14 @@ def test_numpy_reloading(): VisibleDeprecationWarning = ex.VisibleDeprecationWarning ModuleDeprecationWarning = ex.ModuleDeprecationWarning - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) assert_raises(RuntimeError, reload, numpy._globals) - with assert_warns(UserWarning): + with pytest.warns(UserWarning): reload(np) assert_(_NoValue is np._NoValue) assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 560ee6143265..7efa2a1d1896 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,10 +34,11 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if node.args[0].value == "ignore": - raise AssertionError( - "warnings should have an appropriate stacklevel; " - f"found in {self.__filename} on line {node.lineno}") + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): From 67e5848521b6a857205639be66bb1b29823aa7e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 17:35:39 -0600 Subject: [PATCH 0199/1718] MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 (#29334) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.0 to 3.0.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/5f22145df44122af0f5a201f93cf0207171beca7...95d2f3a92fbf80abe066b09418bbf128a8923df2) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 453a67088adf..ce0c2e803143 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ab6bbb618899..223ec38898cf 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -175,7 +175,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 + uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 2028c7b0c53868dab9d6efaf07b796640f311970 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Mon, 7 Jul 2025 16:36:27 -0700 Subject: [PATCH 0200/1718] DOC: vectorize with signature doesn't pre-call function --- numpy/lib/_function_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f5690a829fc6..0379c3c60a1d 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2332,7 +2332,7 @@ class vectorize: cache : bool, optional If `True`, then cache the first function call that determines the number - of outputs if `otypes` is not provided. + of outputs if `otypes` and `signature` are not provided. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2356,7 +2356,7 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` is not specified, then a call to the function with the + If `otypes` and `signature` are not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the From d4c09dfde305cefe328cbc8c952dd4c41836a41c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 8 Jul 2025 16:23:42 +0200 Subject: [PATCH 0201/1718] TST: Avoid uninitialized values in test (#29341) I think I missed this one before once... can cause occasional warnings if uninitialized. Signed-off-by: Sebastian Berg --- numpy/_core/tests/test_ufunc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index b86e22917734..9c5489a614e8 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2129,7 +2129,8 @@ class ArrayPriorityMinus2000(ArrayPriorityBase): assert np.add(y, x) is ArrayPriorityMinus1000 assert np.add(x, xb) is ArrayPriorityMinus1000 assert np.add(xb, x) is ArrayPriorityMinus1000b - assert np.add(np.zeros(2), ArrayPriorityMinus0(2)) is ArrayPriorityMinus0 + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 assert type(np.add(xb, x, np.zeros(2))) is np.ndarray @pytest.mark.parametrize("a", ( From 46c0bffa8b137762dee19cbe1541790bd1108fb0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 8 Jul 2025 19:19:24 +0100 Subject: [PATCH 0202/1718] TYP: correct default value of `unicode` in `chararray.__new__` (#29340) --- numpy/_core/defchararray.pyi | 14 +++++++++++++- numpy/lib/_polynomial_impl.pyi | 26 ++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 25754965c46a..680875240096 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -114,11 +114,23 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _CharArray[str_]: ... + @overload def __new__( subtype, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[True] = ..., + *, + unicode: L[True], buffer: _SupportsBuffer = ..., offset: SupportsIndex = ..., strides: _ShapeLike = ..., diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 3da1a2af60c9..3b0eade1399e 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -151,22 +151,44 @@ def polyfit( cov: L[True, "unscaled"] = ..., ) -> _2Tup[NDArray[complex128]]: ... @overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[float64]]: ... @overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... +@overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, rcond: float | None = ..., - full: L[True] = ..., + *, + full: L[True], w: _ArrayLikeFloat_co | None = ..., cov: bool | L["unscaled"] = ..., ) -> _5Tup[NDArray[complex128]]: ... From 2e93c4639f46403b995c910f01e2db33000e854a Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Wed, 9 Jul 2025 01:08:36 -0700 Subject: [PATCH 0203/1718] DOC: Add missing `self` in `__array_ufunc__` signature (#29343) --- doc/source/user/basics.subclassing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 7b1e8fd34512..a937521a7abb 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -469,7 +469,7 @@ implemented. The signature of ``__array_ufunc__`` is:: - def __array_ufunc__(ufunc, method, *inputs, **kwargs): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either From 34776f4c86c3399670144b6b035cb5eaf8a4ec33 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 9 Jul 2025 10:33:27 +0200 Subject: [PATCH 0204/1718] API,BUG: Fix scalar handling in array-interface allowing NULL pointers (#29338) This fixes the scalar handling in the array-interface and also a bug in the shape handling error path. In theory it breaks code that used NULL to trigger the scalar path, but this path was: 1. Completely undocumented, and 2. correctly triggered by *ommitting* the data field instead. I didn't remove/deprecate the scalar path in this PR, maybe we should. But, I do think we can ignore that theoretical use-case since it is nonsensical. Signed-off-by: Sebastian Berg --- doc/release/upcoming_changes/29338.change.rst | 9 ++++ doc/source/reference/arrays.interface.rst | 18 +++++--- numpy/_core/src/multiarray/ctors.c | 28 ++++++++++--- numpy/_core/tests/test_multiarray.py | 41 ++++++++++++++++++- 4 files changed, 84 insertions(+), 12 deletions(-) create mode 100644 doc/release/upcoming_changes/29338.change.rst diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst new file mode 100644 index 000000000000..64bf188009c8 --- /dev/null +++ b/doc/release/upcoming_changes/29338.change.rst @@ -0,0 +1,9 @@ +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do +its own dummy allocation, though). +Previously, these incorrectly triggered an undocumented +scalar path. +In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct +scalar path by not providing a ``data`` field at all. diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index ebe3f6b68918..75c17060c8fc 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -120,7 +120,7 @@ This approach to the interface consists of the object having an **Default**: ``[('', typestr)]`` - **data** (optional) + **data** A 2-tuple whose first argument is a :doc:`Python integer ` that points to the data-area storing the array contents. @@ -136,15 +136,23 @@ This approach to the interface consists of the object having an This attribute can also be an object exposing the :ref:`buffer interface ` which - will be used to share the data. If this key is not present (or - returns None), then memory sharing will be done - through the buffer interface of the object itself. In this + will be used to share the data. If this key is ``None``, then memory sharing + will be done through the buffer interface of the object itself. In this case, the offset key can be used to indicate the start of the buffer. A reference to the object exposing the array interface must be stored by the new object if the memory area is to be secured. - **Default**: ``None`` + .. note:: + Not specifying this field uses a "scalar" path that we may remove in the future + as we are not aware of any users. In this case, NumPy assigns the original object + as a scalar into the array. + + .. versionchanged:: 2.4 + Prior to NumPy 2.4 a ``NULL`` pointer used the undocumented "scalar" path + and was thus usually not accepted (and triggered crashes on some paths). + After NumPy 2.4, ``NULL`` is accepted, although NumPy will create a 1-byte sized + new allocation for the array. **strides** (optional) Either ``None`` to indicate a C-style contiguous array or diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index f7efe5041ab3..ff30e581cd91 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2141,6 +2141,7 @@ PyArray_FromInterface(PyObject *origin) Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; + int use_scalar_assign = 0; if (PyArray_LookupSpecial_OnInstance( origin, npy_interned_str.array_interface, &iface) < 0) { @@ -2229,12 +2230,10 @@ PyArray_FromInterface(PyObject *origin) /* Shape must be specified when 'data' is specified */ int result = PyDict_ContainsString(iface, "data"); if (result < 0) { - Py_DECREF(attr); return NULL; } else if (result == 1) { Py_DECREF(iface); - Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2277,7 +2276,10 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through pointer */ - if (attr && PyTuple_Check(attr)) { + if (attr == NULL) { + use_scalar_assign = 1; + } + else if (PyTuple_Check(attr)) { PyObject *dataptr; if (PyTuple_GET_SIZE(attr) != 2) { PyErr_SetString(PyExc_TypeError, @@ -2309,7 +2311,7 @@ PyArray_FromInterface(PyObject *origin) } /* Case for data access through buffer */ - else if (attr) { + else { if (attr != Py_None) { base = attr; } @@ -2366,18 +2368,32 @@ PyArray_FromInterface(PyObject *origin) if (ret == NULL) { goto fail; } - if (data == NULL) { + if (use_scalar_assign) { + /* + * NOTE(seberg): I honestly doubt anyone is using this scalar path and we + * could probably just deprecate (or just remove it in a 3.0 version). + */ if (PyArray_SIZE(ret) > 1) { PyErr_SetString(PyExc_ValueError, "cannot coerce scalar to array with size > 1"); Py_DECREF(ret); goto fail; } - if (PyArray_SETITEM(ret, PyArray_DATA(ret), origin) < 0) { + if (PyArray_Pack(PyArray_DESCR(ret), PyArray_DATA(ret), origin) < 0) { Py_DECREF(ret); goto fail; } } + else if (data == NULL && PyArray_NBYTES(ret) != 0) { + /* Caller should ensure this, but <2.4 used the above scalar coerction path */ + PyErr_SetString(PyExc_ValueError, + "data is NULL but array contains data, in older versions of NumPy " + "this may have used the scalar path. To get the scalar path " + "you must leave the data field undefined."); + Py_DECREF(ret); + goto fail; + } + result = PyDict_GetItemStringRef(iface, "strides", &attr); if (result == -1){ return NULL; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 930c736c6076..04222025883e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -8873,6 +8873,8 @@ def __array_interface__(self): (f, {'strides': ()}, 0.5), (f, {'strides': (2,)}, ValueError), (f, {'strides': 16}, TypeError), + # This fails due to going into the buffer protocol path + (f, {'data': None, 'shape': ()}, TypeError), ]) def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface @@ -8891,13 +8893,50 @@ def test_scalar_interface(self, val, iface, expected): post_cnt = sys.getrefcount(np.dtype('f8')) assert_equal(pre_cnt, post_cnt) -def test_interface_no_shape(): + +def test_interface_empty_shape(): class ArrayLike: array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) +def test_interface_no_shape_error(): + class ArrayLike: + __array_interface__ = {"data": None, "typestr": "f8"} + + with pytest.raises(ValueError, match="Missing __array_interface__ shape"): + np.array(ArrayLike()) + + +@pytest.mark.parametrize("iface", [ + {"typestr": "f8", "shape": (0, 1)}, + {"typestr": "(0,)f8,", "shape": (1, 3)}, +]) +def test_interface_nullptr(iface): + iface.update({"data": (0, True)}) + + class ArrayLike: + __array_interface__ = iface + + arr = np.asarray(ArrayLike()) + # Note, we currently set the base anyway, but we do an allocation + # (because NumPy doesn't like NULL data pointers everywhere). + assert arr.shape == iface["shape"] + assert arr.dtype == np.dtype(iface["typestr"]) + assert arr.base is not None + assert arr.flags.owndata + + +def test_interface_nullptr_size_check(): + # Note that prior to NumPy 2.4 the below took the scalar path (if shape had size 1) + class ArrayLike: + __array_interface__ = {"data": (0, True), "typestr": "f8", "shape": ()} + + with pytest.raises(ValueError, match="data is NULL but array contains data"): + np.array(ArrayLike()) + + def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], From 4ff1cc12479355ade530ae3add7a19e5db69f2ec Mon Sep 17 00:00:00 2001 From: Toshaksha <147024929+Toshaksha@users.noreply.github.com> Date: Wed, 9 Jul 2025 17:17:04 +0530 Subject: [PATCH 0205/1718] DOC: Fix Resolution Links in NEPs 38 and 49 (#29347) * Update nep-0049-data-allocation-strategies.rst * Update nep-0038-SIMD-optimizations.rst --- doc/neps/nep-0038-SIMD-optimizations.rst | 2 +- doc/neps/nep-0049-data-allocation-strategies.rst | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/neps/nep-0038-SIMD-optimizations.rst b/doc/neps/nep-0038-SIMD-optimizations.rst index eb1157342948..445c008a76c3 100644 --- a/doc/neps/nep-0038-SIMD-optimizations.rst +++ b/doc/neps/nep-0038-SIMD-optimizations.rst @@ -8,7 +8,7 @@ NEP 38 — Using SIMD optimization instructions for performance :Status: Final :Type: Standards :Created: 2019-11-25 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB/#PVWJ74UVBRZ5ZWF6MDU7EUSJXVNILAQB +:Resolution: `NumPy Discussion `_ Abstract diff --git a/doc/neps/nep-0049-data-allocation-strategies.rst b/doc/neps/nep-0049-data-allocation-strategies.rst index 180cfea17156..ec18f7a315d9 100644 --- a/doc/neps/nep-0049-data-allocation-strategies.rst +++ b/doc/neps/nep-0049-data-allocation-strategies.rst @@ -8,8 +8,7 @@ NEP 49 — Data allocation strategies :Status: Final :Type: Standards Track :Created: 2021-04-18 -:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/YZ3PNTXZUT27B6ITFAD3WRSM3T3SRVK4/#PKYXCTG4R5Q6LIRZC4SEWLNBM6GLRF26 - +:Resolution: `NumPy Discussion `_ Abstract -------- From 7f8e3fb168a833e81b8ae5315424106e8f1667cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Wed, 9 Jul 2025 16:28:47 +0200 Subject: [PATCH 0206/1718] BLD: print long double format used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Print the long double format that is used for the build. This can be helpful when you're about to cross-compile, but can also run Meson first on the native host to check the right value. Signed-off-by: Michał Górny --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 6098986618e4..1b79aa39781c 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -522,6 +522,7 @@ endif if longdouble_format == 'UNKNOWN' or longdouble_format == 'UNDEFINED' error('Unknown long double format of size: ' + cc.sizeof('long double').to_string()) endif +message(f'Long double format: @longdouble_format@') cdata.set10('HAVE_LDOUBLE_' + longdouble_format, true) if cc.has_header('endian.h') From 8cf1f7dbf583dd7f55906e799a1108d485f54709 Mon Sep 17 00:00:00 2001 From: Carlos Martin Date: Thu, 10 Jul 2025 03:18:44 -0400 Subject: [PATCH 0207/1718] ENH: Let numpy.size accept multiple axes. (#29240) * Let numpy.size accept multiple axes. * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/29240.new_feature.rst | 1 + numpy/_core/fromnumeric.py | 22 ++++++++++++------- numpy/_core/fromnumeric.pyi | 2 +- numpy/_core/tests/test_numeric.py | 4 ++++ 4 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 doc/release/upcoming_changes/29240.new_feature.rst diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst new file mode 100644 index 000000000000..02d43364b200 --- /dev/null +++ b/doc/release/upcoming_changes/29240.new_feature.rst @@ -0,0 +1 @@ +* Let ``np.size`` accept multiple axes. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 34fe1798f45e..9d01fca8aa32 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2,6 +2,7 @@ """ import functools +import math import types import warnings @@ -3569,10 +3570,13 @@ def size(a, axis=None): ---------- a : array_like Input data. - axis : int, optional - Axis along which the elements are counted. By default, give + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give the total number of elements. + .. versionchanged:: 2.4 + Extended to accept multiple axes. + Returns ------- element_count : int @@ -3590,10 +3594,12 @@ def size(a, axis=None): >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 - >>> np.size(a,1) + >>> np.size(a,axis=1) 3 - >>> np.size(a,0) + >>> np.size(a,axis=0) 2 + >>> np.size(a,axis=(0,1)) + 6 """ if axis is None: @@ -3602,10 +3608,10 @@ def size(a, axis=None): except AttributeError: return asarray(a).size else: - try: - return a.shape[axis] - except AttributeError: - return asarray(a).shape[axis] + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) def _round_dispatcher(a, decimals=None, out=None): diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 34849c2cc800..95fe7f7d8484 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1397,7 +1397,7 @@ def cumulative_prod( def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = ...) -> int: ... @overload def around( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 4ead2fc7ec6f..560d7cf71543 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -291,6 +291,10 @@ def test_size(self): assert_(np.size(A) == 6) assert_(np.size(A, 0) == 2) assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) def test_squeeze(self): A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] From f68522879dd5cd3f6eb45fd3ec971561e76915c6 Mon Sep 17 00:00:00 2001 From: Mark Ryan Date: Thu, 10 Jul 2025 09:52:26 +0000 Subject: [PATCH 0208/1718] BUG: fix test_npy_uintp_type_enum test_npy_uintp_type_enum will fail if it is run before any of the other tests in test_cython.py. The reason is that although it imports the checks module, it does not contain the install_temp parameter that ensures that the checks module is compiled. Running test_npy_uintp_type_enum before any of the other tests in test_cython.py will result in a No module named 'checks' error. If the test is run after one of the other tests in test_cython.py that do contain the install_temp parameter, test_npy_uintp_type_enum will run fine. Here we fix the issue by adding the install_temp parameter to test_npy_uintp_type_enum. Closes #29354 --- numpy/_core/tests/test_cython.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 2c7b40c5614c..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -347,6 +347,6 @@ def test_npystring_allocators_other_dtype(install_temp): @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') -def test_npy_uintp_type_enum(): +def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() From 82bd6ef31ce57859280be7591c3a20f562e69d34 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Fri, 11 Jul 2025 10:16:59 -0400 Subject: [PATCH 0209/1718] BUG: Fix reference leakage for output arrays in reduction functions (#29358) Add back missing DECREF for `out=` in ufunc.reduce (and etc.) when `out` is passed. * BUG: add Py_XDECREF to `out` for failure case in generic reduction * TST: add reference count checks for `out` in reduction leak tests --- numpy/_core/src/umath/ufunc_object.c | 4 +++ numpy/_core/tests/test_ufunc.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 485364af1ff2..fd9ae9f1e41d 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -3727,6 +3727,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (ret == NULL) { goto fail; } + + Py_XDECREF(out); Py_DECREF(signature[0]); Py_DECREF(signature[1]); @@ -3753,6 +3755,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, return wrapped_result; fail: + Py_XDECREF(out); + Py_XDECREF(signature[0]); Py_XDECREF(signature[1]); Py_XDECREF(signature[2]); diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 9c5489a614e8..84e31bd1bc3c 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -3003,6 +3003,45 @@ def test_reduce_casterrors(offset): assert out[()] < value * offset +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + def test_object_reduce_cleanup_on_failure(): # Test cleanup, including of the initial value (manually provided or not) with pytest.raises(TypeError): From 6b595af8e1d53a7da5de5c0cfd2462afebbe62a0 Mon Sep 17 00:00:00 2001 From: Dillon Niederhut Date: Sat, 12 Jul 2025 13:29:25 -0700 Subject: [PATCH 0210/1718] DOC: add typing test to dev env page Adds a small entry to the development environment page on testing to show the spin command for invoking the typing tests. [skip azp] [skip cirrus] [skip actions] Closes https://github.com/numpy/numpy/issues/29365 --- doc/source/dev/development_environment.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 7a6dc36b680d..da8135def475 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -185,6 +185,16 @@ For more extensive information, see :ref:`testing-guidelines`. Note: do not run the tests from the root directory of your numpy git repo without ``spin``, that will result in strange test errors. +Running type checks +------------------- +Changes that involve static type declarations are also executed using ``spin``. +The invocation will look like the following: + + $ spin mypy + +This will look in the ``typing/tests`` directory for sets of operations to +test for type incompatibility. + Running linting --------------- Lint checks can be performed on newly added lines of Python code. From 031377b7bd547c3e07702cf273cf9fecd8cb46fa Mon Sep 17 00:00:00 2001 From: Dillon Niederhut Date: Sun, 13 Jul 2025 06:56:44 -0500 Subject: [PATCH 0211/1718] TST: refactor typing check for @ (#29364) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/pass/simple.py | 4 +++- numpy/typing/tests/data/pass/simple_py3.py | 6 ------ 2 files changed, 3 insertions(+), 7 deletions(-) delete mode 100644 numpy/typing/tests/data/pass/simple_py3.py diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 8f44e6e76f83..408d2482b0ef 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -165,4 +165,6 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: ~array # Other methods -np.array([1, 2]).transpose() +array.transpose() + +array @ array diff --git a/numpy/typing/tests/data/pass/simple_py3.py b/numpy/typing/tests/data/pass/simple_py3.py deleted file mode 100644 index c05a1ce612ac..000000000000 --- a/numpy/typing/tests/data/pass/simple_py3.py +++ /dev/null @@ -1,6 +0,0 @@ -import numpy as np - -array = np.array([1, 2]) - -# The @ operator is not in python 2 -array @ array From 3ccbc6c29bb56795d32ee685bccdc1e9ca8ae47f Mon Sep 17 00:00:00 2001 From: Iason Krommydas Date: Sun, 13 Jul 2025 08:28:40 -0700 Subject: [PATCH 0212/1718] DOC: specify that `numpy.nan_to_num` supports array like arguments (#29362) * DOC: specify that numpy.nan_to_num supports array_like arguments * DOC: split examples into multiple lines * DOC: more consistent type specification --- numpy/lib/_type_check_impl.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 977609caa299..584088cdc21d 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -398,15 +398,15 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - nan : int, float, optional - Value to be used to fill NaN values. If no value is passed + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed then NaN values will be replaced with 0.0. - posinf : int, float, optional - Value to be used to fill positive infinity values. If no value is + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are passed then positive infinity values will be replaced with a very large number. - neginf : int, float, optional - Value to be used to fill negative infinity values. If no value is + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are passed then negative infinity values will be replaced with a very small (or negative) number. @@ -445,6 +445,12 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -454,6 +460,11 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): 0.00000000e+000 +1.79769313e+308j]) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type From 08916dc1f34de32ccce6b1018613de98c3cc68ff Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sun, 13 Jul 2025 11:35:49 -0700 Subject: [PATCH 0213/1718] BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) (#29318) * TST: Add failing test TestArrayEqual.test_masked_scalar The two added test cases fail with: E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(3.) and E AssertionError: E Arrays are not equal E E nan location mismatch: E ACTUAL: MaskedArray(3.) E DESIRED: array(nan) * BUG: Fix np.testing utils failing for masked scalar vs. scalar (#29317) TestArrayEqual.test_masked_scalar now passes. This case regressed since 73151451437 (merged in #12119) due to: - ` == ` returning np.ma.masked (not a 0-dim masked bool array), followed by - `np.bool(np.ma.masked)` unintentionally converting it to np._False Note on the modified comment: Confusingly, "isinstance(..., bool) checks" in the previous wording actually incorrectly referred to the ones towards the end of the function, which are not actually related to __eq__'s behavior but to the possibility of `func` returning a bool. * MNT: Improve comments on assert_array_compare nan/inf handling logic - Use same language as elsewhere below to explain `!= True` used to handle np.ma.masked - Clarify committed to support standard MaskedArrays - Restore note lost in 73151451437 comment changes about how the np.bool casts towards the end of the function handle np.ma.masked, and expand further. * TST: Expand TestArrayEqual.test_masked_scalar --- numpy/testing/_private/utils.py | 19 +++++++++++------ numpy/testing/tests/test_utils.py | 34 +++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 78f9e9a004b1..e868239e62ab 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -771,16 +771,20 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): y_id = func(y) # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: - # (1) all() on `masked` array scalars can return masked arrays, so we - # use != True + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). # (2) __eq__ on some ndarray subclasses returns Python booleans - # instead of element-wise comparisons, so we cast to np.bool() and - # use isinstance(..., bool) checks + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). # (3) subclasses with bare-bones __array_function__ implementations may # not implement np.all(), so favor using the .all() method - # We are not committed to supporting such subclasses, but it's nice to + # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - if np.bool(x_id == y_id).all() != True: + result = x_id == y_id + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + if result.all() != True: msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -790,6 +794,9 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): raise AssertionError(msg) # If there is a scalar, then here we know the array has the same # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). if isinstance(x_id, bool) or x_id.ndim == 0: return np.bool(x_id) elif isinstance(y_id, bool) or y_id.ndim == 0: diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index b09df821680b..243b8d420936 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -197,6 +197,40 @@ def test_masked_nan_inf(self): self._test_equal(a, b) self._test_equal(b, a) + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + def test_subclass_that_overrides_eq(self): # While we cannot guarantee testing functions will always work for # subclasses, the tests should ideally rely only on subclasses having From 81be692aacdae64962b0c847ba25c6db6ca7111b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 14 Jul 2025 11:14:24 +0300 Subject: [PATCH 0214/1718] BLD: use github to build macos-arm64 wheels with OpenBLAS and update to 0.3.30 --- .cirrus.star | 18 ----- .github/workflows/wheels.yml | 5 +- requirements/ci32_requirements.txt | 3 +- requirements/ci_requirements.txt | 6 +- tools/ci/cirrus_wheels.yml | 118 ----------------------------- 5 files changed, 5 insertions(+), 145 deletions(-) delete mode 100644 tools/ci/cirrus_wheels.yml diff --git a/.cirrus.star b/.cirrus.star index c503f25720a7..3de5ce97b0e8 100644 --- a/.cirrus.star +++ b/.cirrus.star @@ -9,17 +9,12 @@ load("cirrus", "env", "fs", "http") def main(ctx): ###################################################################### - # Should wheels be built? # Only test on the numpy/numpy repository ###################################################################### if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": return [] - # only run the wheels entry on a cron job - if env.get("CIRRUS_CRON", "") == "nightly": - return fs.read("tools/ci/cirrus_wheels.yml") - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE # only contains the actual commit message on a non-PR trigger event. # For a PR event it contains the PR title and description. @@ -31,23 +26,10 @@ def main(ctx): if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: return [] - wheel = False labels = env.get("CIRRUS_PR_LABELS", "") pr_number = env.get("CIRRUS_PR", "-1") tag = env.get("CIRRUS_TAG", "") - if "[wheel build]" in commit_msg: - wheel = True - - # if int(pr_number) > 0 and ("14 - Release" in labels or "36 - Build" in labels): - # wheel = True - - if tag.startswith("v") and "dev0" not in tag: - wheel = True - - if wheel: - return fs.read("tools/ci/cirrus_wheels.yml") - if int(pr_number) < 0: return [] diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 223ec38898cf..fd2047283a1f 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -85,10 +85,9 @@ jobs: - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - [macos-13, macosx_x86_64, openblas] - - # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - - [macos-14, macosx_arm64, accelerate] # always use accelerate + - [macos-14, macosx_arm64, openblas] + - [macos-14, macosx_arm64, accelerate] - [windows-2022, win_amd64, ""] - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 74c9a51ec111..87831586e01e 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index b6ea06c812c8..dd16787923e3 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,6 +1,4 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' -scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' -# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 -scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.30.0.1 +scipy-openblas64==0.3.30.0.1 diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml deleted file mode 100644 index 6d02411df2e9..000000000000 --- a/tools/ci/cirrus_wheels.yml +++ /dev/null @@ -1,118 +0,0 @@ -###################################################################### -# Build macosx_arm64 natively -# -# macosx_arm64 for macos >= 14 used to be built here, but are now -# built on GHA. -###################################################################### - -macosx_arm64_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - env: - CIRRUS_CLONE_SUBMODULES: true - macos_instance: - matrix: - image: ghcr.io/cirruslabs/macos-runner:sonoma - - matrix: - - env: - CIBW_BUILD: cp311-* cp312* cp313* - env: - PATH: /usr/local/lib:/usr/local/include:$PATH - CIBW_ARCHS: arm64 - - build_script: | - brew install micromamba gfortran - micromamba shell init -s bash --root-prefix ~/micromamba - source ~/.bash_profile - - micromamba create -n numpydev - micromamba activate numpydev - micromamba install -y -c conda-forge python=3.11 2>/dev/null - - # Use scipy-openblas wheels - export INSTALL_OPENBLAS=true - export CIBW_ENVIRONMENT_MACOS="MACOSX_DEPLOYMENT_TARGET='11.0' INSTALL_OPENBLAS=true RUNNER_OS=macOS PKG_CONFIG_PATH=$PWD/.openblas" - - # needed for submodules - git submodule update --init - # need to obtain all the tags so setup.py can determine FULLVERSION - git fetch origin - uname -m - python -c "import platform;print(platform.python_version());print(platform.system());print(platform.machine())" - clang --version - - python -m pip install cibuildwheel - cibuildwheel - - wheels_artifacts: - path: "wheelhouse/*" - -###################################################################### -# Upload all wheels -###################################################################### - -wheels_upload_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - # Artifacts don't seem to be persistent from task to task. - # Rather than upload wheels at the end of each cibuildwheel run we do a - # final upload here. This is because a run may be on different OS for - # which bash, etc, may not be present. - depends_on: - - macosx_arm64 - compute_engine_instance: - image_project: cirrus-images - image: family/docker-builder - platform: linux - cpu: 1 - - env: - NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!] - NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[4376691390321cd5e76613ec21de8456cc0af0164971dd9542f985a017dc30ccb4d40e60f59184618e2d55afd63e93b7] - - upload_script: | - apt-get update - apt-get install -y curl wget - export IS_SCHEDULE_DISPATCH="false" - export IS_PUSH="false" - - # cron job - if [[ "$CIRRUS_CRON" == "nightly" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # a manual build was started - if [[ "$CIRRUS_BUILD_SOURCE" == "api" && "$CIRRUS_COMMIT_MESSAGE" == "API build for null" ]]; then - export IS_SCHEDULE_DISPATCH="true" - fi - - # only upload wheels to staging if it's a tag beginning with 'v' and you're - # on a maintenance branch - if [[ "$CIRRUS_TAG" == v* ]] && [[ $CIRRUS_TAG != *"dev0"* ]]; then - export IS_PUSH="true" - fi - - if [[ $IS_PUSH == "true" ]] || [[ $IS_SCHEDULE_DISPATCH == "true" ]]; then - # install miniconda in the home directory. For some reason HOME isn't set by Cirrus - export HOME=$PWD - - # install miniconda for uploading to anaconda - wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda3 - $HOME/miniconda3/bin/conda init bash - source $HOME/miniconda3/bin/activate - conda install -y anaconda-client - - # The name of the zip file is derived from the `wheels_artifact` line. - # If you change the artifact line to `myfile_artifact` then it would be - # called myfile.zip - - curl https://api.cirrus-ci.com/v1/artifact/build/$CIRRUS_BUILD_ID/wheels.zip --output wheels.zip - unzip wheels.zip - - source ./tools/wheels/upload_wheels.sh - # IS_PUSH takes precedence over IS_SCHEDULE_DISPATCH - set_upload_vars - - # Will be skipped if not a push/tag/scheduled build - upload_wheels - fi From 8b43d86afcecf74b51d2a96425a64363b207e4a1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 14 Jul 2025 01:17:55 -0700 Subject: [PATCH 0215/1718] BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 6 +++--- numpy/_core/tests/test_strings.py | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index b0181d4186c9..ca574f605c1a 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1736,7 +1736,7 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, size_t num_codepoints = inbuf.num_codepoints(); npy_intp width = (npy_intp)*(npy_int64*)in2; - if (num_codepoints > (size_t)width) { + if ((npy_intp)num_codepoints > width) { width = num_codepoints; } @@ -1866,8 +1866,8 @@ zfill_strided_loop(PyArrayMethod_Context *context, { Buffer inbuf((char *)is.buf, is.size); size_t in_codepoints = inbuf.num_codepoints(); - size_t width = (size_t)*(npy_int64 *)in2; - if (in_codepoints > width) { + npy_intp width = (npy_intp)*(npy_int64*)in2; + if ((npy_intp)in_codepoints > width) { width = in_codepoints; } // number of leading one-byte characters plus the size of the diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 1b77a535eee6..756c6e1bb549 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -846,6 +846,7 @@ def test_rjust_raises_multiple_character_fill(self, dt): ('abc', 6, ' ', ' abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '***abc****'), ]) def test_center(self, buf, width, fillchar, res, dt): @@ -859,6 +860,7 @@ def test_center(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', 'abc '), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', 'abc*******'), ]) def test_ljust(self, buf, width, fillchar, res, dt): @@ -872,6 +874,7 @@ def test_ljust(self, buf, width, fillchar, res, dt): ('abc', 6, ' ', ' abc'), ('abc', 3, ' ', 'abc'), ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), ('abc', 10, '*', '*******abc'), ]) def test_rjust(self, buf, width, fillchar, res, dt): @@ -893,6 +896,7 @@ def test_rjust(self, buf, width, fillchar, res, dt): ('-0123', 5, '-0123'), ('000', 3, '000'), ('34', 1, '34'), + ('34', -1, '34'), ('0034', 4, '0034'), ]) def test_zfill(self, buf, width, res, dt): From 9173e548de3541077a9b50b0f4dff7eceb9354fd Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 14 Jul 2025 12:46:38 +0200 Subject: [PATCH 0216/1718] MNT: Options to catch more issues reported by pytest (#28983) Apply Repo-Review suggestions PP306, PP307, PP308. Skip PP305 for now. --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 132af0bb78ab..b8a1da2b4ec6 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -l +addopts = -l -ra --strict-markers --strict-config norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 From 0c203ed4d6ccfb44efef41e87fe5819dbeb7d822 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Mon, 14 Jul 2025 18:14:31 +0100 Subject: [PATCH 0217/1718] DEV: remove "packages" from `.gitignore` not necessary now (and can interfere with niche build circumstances https://github.com/prefix-dev/pixi-build-backends/issues/243) --- .gitignore | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.gitignore b/.gitignore index c4de68c1a9a7..b54de4091bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,23 +43,6 @@ GTAGS *.so *.mod -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.bz2 -*.bzip2 -*.dmg -*.gz -*.iso -*.jar -*.rar -*.tar -*.tbz2 -*.tgz -*.zip - # Python files # ################ # meson build/installation directories From 7bb8b9f3df8661d9fbea05b55f00717fde10b8c4 Mon Sep 17 00:00:00 2001 From: Sarang Joshi Date: Mon, 14 Jul 2025 12:18:12 -0500 Subject: [PATCH 0218/1718] Fix typo in npy_cpu_dispatch.c --- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index ff22f234a7c6..47b8d2b6f4c2 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -9,7 +9,7 @@ NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { if (npy_static_pydata.cpu_dispatch_registry != NULL) { - PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); + PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initialized"); return -1; } PyObject *mod_dict = PyModule_GetDict(mod); From 9a0086b8d22822e62db08d04add2d2c568346137 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 14 Jul 2025 23:52:44 +0100 Subject: [PATCH 0219/1718] TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray`` for ``unicode`` argument (#29377) --- numpy/_core/defchararray.pyi | 70 +++++++++++++++++++++---- numpy/typing/tests/data/reveal/char.pyi | 6 +++ 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 680875240096..d3e01c2c820a 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1044,14 +1044,15 @@ def startswith( def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes -# overload 3: arbitrary object with unicode=False (-> bytes_) -# overload 4: arbitrary object with unicode=True (-> str_) +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload def array( obj: U_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload @@ -1059,7 +1060,15 @@ def array( obj: S_co, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload @@ -1067,43 +1076,84 @@ def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def array( obj: object, itemsize: int | None = ..., copy: bool = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[True] | None = ..., order: _OrderKACF = ..., ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, itemsize: int | None = ..., - unicode: L[False] = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[False] = ..., + *, + unicode: L[False], order: _OrderKACF = ..., ) -> _CharArray[bytes_]: ... @overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload def asarray( obj: object, itemsize: int | None = ..., - unicode: L[True] = ..., + *, + unicode: L[True], order: _OrderKACF = ..., ) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 9fdc9f61e893..5c6af73888d0 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -209,6 +209,9 @@ assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, n assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) @@ -216,3 +219,6 @@ assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[n assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) From d32370539ea5aa93100842a98cf21d55667b6041 Mon Sep 17 00:00:00 2001 From: Ben Woodruff Date: Tue, 15 Jul 2025 11:42:04 -0700 Subject: [PATCH 0220/1718] BUG: Fix repeatability issues in test suite (#29380) Resolves test failures caused by persistent global state when running numpy.test() twice in the same Python session. - core/tests: Skip TestAddNewdocUfunc on repeated runs. This avoids C-level mutation errors. - f2py/tests: Reset f77modulename at end of callcrackfortran. This change alters the code base. - lib/tests: Wrap StringIO parameters in lambdas. - f2py/tests/test_docs: Reset ftype.data.a at end of test_ftype. These changes ensure that the full test suite can be run multiple times in the same interpreter without failure. Closes #26718. --- numpy/_core/tests/test_deprecations.py | 7 +++++++ numpy/f2py/f2py2e.py | 2 ++ numpy/f2py/tests/test_docs.py | 2 ++ numpy/lib/tests/test_io.py | 8 ++++++-- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index b83a2ac610ee..d4f2e9984266 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -448,6 +448,13 @@ def test_deprecated(self): class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") self.assert_deprecated( lambda: np._core.umath._add_newdoc_ufunc( struct_ufunc.add_triplet, "new docs" diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 459299f8e127..84a5aa3c20a6 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -378,6 +378,8 @@ def callcrackfortran(files, options): mod['gil_used'] = 'Py_MOD_GIL_USED' else: mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' return postlist diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 5d9aaac9f15b..7015af3b2627 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -60,5 +60,7 @@ def test_ftype(self): ftype.data.x[1] = 45 assert_array_equal(ftype.data.x, np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 # TODO: implement test methods for other example Fortran codes diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index a1698f240bff..b8c8e6cfcac1 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1276,12 +1276,16 @@ def test_max_rows_larger(self): (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), # "Bad" lines that do not end in newlines: (1, ["ignored", "1,2", "", "3,4"]), - (1, StringIO("ignored\n1,2\n\n3,4")), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), # Same as above, but do not skip any lines: (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), (0, ["-1,0", "1,2", "", "3,4"]), - (0, StringIO("-1,0\n1,2\n\n3,4"))]) + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + with pytest.warns(UserWarning, match=f"Input line 3.*max_rows={3 - skip}"): res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", From f37ffca5c734e6c4ae29109a1889429f1f59805b Mon Sep 17 00:00:00 2001 From: Verney7 Date: Wed, 16 Jul 2025 14:33:20 +0800 Subject: [PATCH 0221/1718] BLD: Add sw_64 support --- numpy/_core/include/numpy/npy_cpu.h | 3 +++ numpy/_core/include/numpy/npy_endian.h | 1 + 2 files changed, 4 insertions(+) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 52e9d5996bd1..d3a29da57f36 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -20,6 +20,7 @@ * NPY_CPU_RISCV64 * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 * NPY_CPU_WASM */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ @@ -111,6 +112,8 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 #elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ /* __wasm__ is defined by clang when targeting wasm */ diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 09262120bf82..ecb4b000763d 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -51,6 +51,7 @@ || defined(NPY_CPU_RISCV64) \ || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN From 685918f57bb2591f1ad14dedc7f2920cc533e4f8 Mon Sep 17 00:00:00 2001 From: Yuki Kobayashi Date: Wed, 16 Jul 2025 21:52:57 +0900 Subject: [PATCH 0222/1718] DOC: Fix `PyArrayMapIterObject` document (#29386) The structure of `PyArrayMapIterObject` defined in `multiarray/mapping.h`, not in `arrayobject.h`. --- doc/source/reference/c-api/types-and-structures.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 3f16b5f4dbc4..fb8efb9c8766 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1608,7 +1608,7 @@ for completeness and assistance in understanding the code. The C-structure associated with :c:var:`PyArrayMapIter_Type`. This structure is useful if you are trying to understand the advanced-index mapping code. It is defined in the - ``arrayobject.h`` header. This type is not exposed to Python and + ``multiarray/mapping.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. From 0fee7d82b176fde51be69fe06fec638768f834f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:33:12 +0100 Subject: [PATCH 0223/1718] TYP: Type `MaskedArray.{sum,std,var,mean,prod}` (#29381) --- numpy/ma/core.pyi | 152 ++++++++++++++++++- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/reveal/ma.pyi | 25 +++ 3 files changed, 174 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 63dea396de66..ee3b90f2891d 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1206,7 +1206,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... - def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.cumsum` @overload # out: None (default) @@ -1216,7 +1243,35 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + product: Any # Keep in sync with `ndarray.cumprod` @@ -1227,7 +1282,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @@ -1236,8 +1317,69 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... - def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... - def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index bb290cdf12f7..b3428f6b48c1 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -119,7 +119,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.std() A.std() A.std(axis=0) -A.std(keepdims=True) +A.std(keepdims=True, mean=0.) A.std(out=B0.astype(np.float64)) i4.sum() @@ -137,7 +137,7 @@ class IntSubClass(npt.NDArray[np.intp]): ... i4.var() A.var() A.var(axis=0) -A.var(keepdims=True) +A.var(keepdims=True, mean=0.) A.var(out=B0) A.argpartition([0]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 7c2cb9d5f05e..b5a292330208 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -743,6 +743,31 @@ assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) + # MaskedArray "true" division assert_type(MAR_f8 / b, MaskedArray[np.float64]) From 6e48cff51b4ea3c002ca86ca4835a723338dbf54 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 16 Jul 2025 17:50:07 +0100 Subject: [PATCH 0224/1718] DOC: document `mean` parameter in ``ndarray.std`` and ``ndarray.var`` (#29387) --- numpy/_core/_add_newdocs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 597d5c6deaf3..90d33d4b810a 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) Returns the variance of the array elements, along given axis. From b5d6eb4300f782f67dde893dc72f2dca94945b6c Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:37:09 -0400 Subject: [PATCH 0225/1718] MNT: add linter for thread-unsafe C API uses (#29371) * MNT: add extra lint - thread-unsafe C API lint * MNT: integrate ruff-lint with C-API-lint --- numpy/_core/include/numpy/npy_3kcompat.h | 2 +- numpy/_core/src/common/npy_cpu_dispatch.c | 2 +- numpy/_core/src/common/ufunc_override.c | 2 +- .../src/multiarray/_multiarray_tests.c.src | 8 +- numpy/_core/src/multiarray/array_coercion.c | 2 +- .../src/multiarray/arrayfunction_override.c | 4 +- numpy/_core/src/multiarray/arraytypes.c.src | 12 +-- numpy/_core/src/multiarray/buffer.c | 2 +- numpy/_core/src/multiarray/compiled_base.c | 2 +- numpy/_core/src/multiarray/conversion_utils.c | 4 +- numpy/_core/src/multiarray/convert_datatype.c | 10 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/descriptor.c | 44 ++++----- numpy/_core/src/multiarray/dtype_transfer.c | 10 +- numpy/_core/src/multiarray/dtype_traversal.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/src/multiarray/iterators.c | 2 +- .../multiarray/legacy_dtype_implementation.c | 6 +- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/src/multiarray/methods.c | 10 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/refcount.c | 6 +- numpy/_core/src/multiarray/textreading/rows.c | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 8 +- numpy/_core/src/umath/ufunc_type_resolution.c | 6 +- numpy/_core/src/umath/umathmodule.c | 6 +- numpy/f2py/src/fortranobject.c | 8 +- tools/ci/check_c_api_usage.sh | 91 +++++++++++++++++++ tools/linter.py | 28 +++++- 32 files changed, 204 insertions(+), 89 deletions(-) create mode 100644 tools/ci/check_c_api_usage.sh diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index c2bf74faf09d..cd91f66268c7 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -242,7 +242,7 @@ static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { PyObject *open; - open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK if (open == NULL) { return NULL; } diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 47b8d2b6f4c2..2cb3cd817d2a 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -33,7 +33,7 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); // noqa: borrowed-ref OK if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index e98315f14a94..0bcbea5baa30 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -108,7 +108,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * * PySequence_Fast* functions. This is required for PyPy */ PyObject *seq; - seq = PySequence_Fast(*out_kwd_obj, + seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK "Could not convert object to sequence"); if (seq == NULL) { Py_CLEAR(*out_kwd_obj); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 8012a32b070e..068fabc7fee8 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -644,7 +644,7 @@ incref_elide_l(PyObject *dummy, PyObject *args) } /* get item without increasing refcount, item may still be on the python * stack but above the inaccessible top */ - r = PyList_GetItem(arg, 4); + r = PyList_GetItem(arg, 4); // noqa: borrowed-ref OK res = PyNumber_Add(r, r); return res; @@ -863,7 +863,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) if (classes == NULL) { goto fail; } - Py_SETREF(classes, PySequence_Fast(classes, NULL)); + Py_SETREF(classes, PySequence_Fast(classes, NULL)); // noqa: borrowed-ref OK if (classes == NULL) { goto fail; } @@ -883,7 +883,7 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) PyObject *to_dtype, *cast_obj; Py_ssize_t pos = 0; - while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, + while (PyDict_Next(NPY_DT_SLOTS(from_dtype)->castingimpls, // noqa: borrowed-ref OK &pos, &to_dtype, &cast_obj)) { if (cast_obj == Py_None) { continue; @@ -965,7 +965,7 @@ identityhash_tester(PyObject *NPY_UNUSED(mod), } /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); + sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK if (sequence == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index ff7d98bd9c64..8271fb6812d1 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1148,7 +1148,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: /* Ensure we have a sequence (required for PyPy) */ - seq = PySequence_Fast(obj, "Could not convert object to sequence"); + seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* * Specifically do not fail on things that look like a dictionary, diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 9834ab138cf6..72211e2a6d62 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -371,7 +371,7 @@ array__get_implementing_args( return NULL; } - relevant_args = PySequence_Fast( + relevant_args = PySequence_Fast( // noqa: borrowed-ref OK relevant_args, "dispatcher for __array_function__ did not return an iterable"); if (relevant_args == NULL) { @@ -518,7 +518,7 @@ dispatcher_vectorcall(PyArray_ArrayFunctionDispatcherObject *self, fix_name_if_typeerror(self); return NULL; } - Py_SETREF(relevant_args, PySequence_Fast(relevant_args, + Py_SETREF(relevant_args, PySequence_Fast(relevant_args, // noqa: borrowed-ref OK "dispatcher for __array_function__ did not return an iterable")); if (relevant_args == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9e5588f98a83..52c9bdfb6bcc 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -881,7 +881,7 @@ VOID_getitem(void *input, void *vap) npy_intp offset; PyArray_Descr *new; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { Py_DECREF(ret); return NULL; @@ -973,7 +973,7 @@ _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp offset; key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); + tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { return -1; } @@ -2274,7 +2274,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr *new; if (NPY_TITLE_KEY(key, value)) { @@ -2359,7 +2359,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) PyArrayObject_fields dummy_fields = get_dummy_stack_array(arr); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK npy_intp offset; PyArray_Descr * new; @@ -2679,7 +2679,7 @@ VOID_nonzero (char *ip, PyArrayObject *ap) PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); - while (PyDict_Next(descr->fields, &pos, &key, &value)) { + while (PyDict_Next(descr->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK PyArray_Descr * new; npy_intp offset; if (NPY_TITLE_KEY(key, value)) { @@ -3041,7 +3041,7 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) PyArray_Descr *new; npy_intp offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); + tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); // noqa: borrowed-ref OK if (_unpack_field(tup, &new, &offset) < 0) { goto finish; } diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index fcff3ad6ca74..a6c683f26a8b 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -268,7 +268,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, int ret; name = PyTuple_GET_ITEM(ldescr->names, k); - item = PyDict_GetItem(ldescr->fields, name); + item = PyDict_GetItem(ldescr->fields, name); // noqa: borrowed-ref OK child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index fee0d4a61a78..179c8e404322 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1522,7 +1522,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyTypeObject *new = (PyTypeObject *)obj; _ADDDOC(new->tp_doc, new->tp_name); if (new->tp_dict != NULL && PyDict_CheckExact(new->tp_dict) && - PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { + PyDict_GetItemString(new->tp_dict, "__doc__") == Py_None) { // noqa: borrowed-ref - manual fix needed /* Warning: Modifying `tp_dict` is not generally safe! */ if (PyDict_SetItemString(new->tp_dict, "__doc__", str) < 0) { return NULL; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 1994dd0ee8f7..d487aa16727d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -130,7 +130,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) * dimension_from_scalar as soon as possible. */ if (!PyLong_CheckExact(obj) && PySequence_Check(obj)) { - seq_obj = PySequence_Fast(obj, + seq_obj = PySequence_Fast(obj, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer."); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ @@ -1135,7 +1135,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) { PyObject *seq_obj = NULL; if (!PyLong_CheckExact(seq) && PySequence_Check(seq)) { - seq_obj = PySequence_Fast(seq, + seq_obj = PySequence_Fast(seq, // noqa: borrowed-ref - manual fix needed "expected a sequence of integers or a single integer"); if (seq_obj == NULL) { /* continue attempting to parse as a single integer. */ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 59b6298b5815..d34d852a706b 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -344,7 +344,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) PyObject *cobj; key = PyLong_FromLong(type_num); - cobj = PyDict_GetItem(obj, key); + cobj = PyDict_GetItem(obj, key); // noqa: borrowed-ref OK Py_DECREF(key); if (cobj && PyCapsule_CheckExact(cobj)) { castfunc = PyCapsule_GetPointer(cobj, NULL); @@ -2749,7 +2749,7 @@ nonstructured_to_structured_resolve_descriptors( Py_ssize_t pos = 0; PyObject *key, *tuple; - while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { + while (PyDict_Next(to_descr->fields, &pos, &key, &tuple)) { // noqa: borrowed-ref OK PyArray_Descr *field_descr = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); npy_intp field_view_off = NPY_MIN_INTP; NPY_CASTING field_casting = PyArray_GetCastInfo( @@ -2898,7 +2898,7 @@ structured_to_nonstructured_resolve_descriptors( return -1; } PyObject *key = PyTuple_GetItem(PyDataType_NAMES(given_descrs[0]), 0); - PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); + PyObject *base_tup = PyDict_GetItem(PyDataType_FIELDS(given_descrs[0]), key); // noqa: borrowed-ref OK base_descr = (PyArray_Descr *)PyTuple_GET_ITEM(base_tup, 0); struct_view_offset = PyLong_AsSsize_t(PyTuple_GET_ITEM(base_tup, 1)); if (error_converting(struct_view_offset)) { @@ -3033,7 +3033,7 @@ can_cast_fields_safety( for (Py_ssize_t i = 0; i < field_count; i++) { npy_intp field_view_off = NPY_MIN_INTP; PyObject *from_key = PyTuple_GET_ITEM(PyDataType_NAMES(from), i); - PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); + PyObject *from_tup = PyDict_GetItemWithError(PyDataType_FIELDS(from), from_key); // noqa: borrowed-ref OK if (from_tup == NULL) { return give_bad_field_error(from_key); } @@ -3041,7 +3041,7 @@ can_cast_fields_safety( /* Check whether the field names match */ PyObject *to_key = PyTuple_GET_ITEM(PyDataType_NAMES(to), i); - PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); + PyObject *to_tup = PyDict_GetItem(PyDataType_FIELDS(to), to_key); // noqa: borrowed-ref OK if (to_tup == NULL) { return give_bad_field_error(from_key); } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index ff30e581cd91..4f466677c57c 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2114,7 +2114,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) { if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) { return 0; } - PyObject *tuple = PyList_GET_ITEM(descr, 0); + PyObject *tuple = PyList_GET_ITEM(descr, 0); // noqa: borrowed-ref - manual fix needed if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) { return 0; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c32b0e5e3f9f..135deee83c97 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -424,7 +424,7 @@ _convert_from_array_descr(PyObject *obj, int align) return NULL; } for (int i = 0; i < n; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) { PyErr_Format(PyExc_TypeError, "Field elements must be 2- or 3-tuples, got '%R'", @@ -507,10 +507,10 @@ _convert_from_array_descr(PyObject *obj, int align) "StringDType is not currently supported for structured dtype fields."); goto fail; } - if ((PyDict_GetItemWithError(fields, name) != NULL) + if ((PyDict_GetItemWithError(fields, name) != NULL) // noqa: borrowed-ref OK || (title && PyUnicode_Check(title) - && (PyDict_GetItemWithError(fields, title) != NULL))) { + && (PyDict_GetItemWithError(fields, title) != NULL))) { // noqa: borrowed-ref OK PyErr_Format(PyExc_ValueError, "field %R occurs more than once", name); Py_DECREF(conv); @@ -548,7 +548,7 @@ _convert_from_array_descr(PyObject *obj, int align) goto fail; } if (PyUnicode_Check(title)) { - PyObject *existing = PyDict_GetItemWithError(fields, title); + PyObject *existing = PyDict_GetItemWithError(fields, title); // noqa: borrowed-ref OK if (existing == NULL && PyErr_Occurred()) { goto fail; } @@ -613,7 +613,7 @@ _convert_from_list(PyObject *obj, int align) * Ignore any empty string at end which _internal._commastring * can produce */ - PyObject *last_item = PyList_GET_ITEM(obj, n-1); + PyObject *last_item = PyList_GET_ITEM(obj, n-1); // noqa: borrowed-ref OK if (PyUnicode_Check(last_item)) { Py_ssize_t s = PySequence_Size(last_item); if (s < 0) { @@ -643,7 +643,7 @@ _convert_from_list(PyObject *obj, int align) int totalsize = 0; for (int i = 0; i < n; i++) { PyArray_Descr *conv = _convert_from_any( - PyList_GET_ITEM(obj, i), align); + PyList_GET_ITEM(obj, i), align); // noqa: borrowed-ref OK if (conv == NULL) { goto fail; } @@ -794,7 +794,7 @@ _validate_union_object_dtype(_PyArray_LegacyDescr *new, _PyArray_LegacyDescr *co if (name == NULL) { return -1; } - tup = PyDict_GetItemWithError(conv->fields, name); + tup = PyDict_GetItemWithError(conv->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -940,7 +940,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -960,7 +960,7 @@ _validate_object_field_overlap(_PyArray_LegacyDescr *dtype) if (key == NULL) { return -1; } - tup = PyDict_GetItemWithError(fields, key); + tup = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -1213,7 +1213,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Insert into dictionary */ - if (PyDict_GetItemWithError(fields, name) != NULL) { + if (PyDict_GetItemWithError(fields, name) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "name already used as a name or title"); Py_DECREF(tup); @@ -1232,7 +1232,7 @@ _convert_from_dict(PyObject *obj, int align) } if (len == 3) { if (PyUnicode_Check(title)) { - if (PyDict_GetItemWithError(fields, title) != NULL) { + if (PyDict_GetItemWithError(fields, title) != NULL) { // noqa: borrowed-ref OK PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); Py_DECREF(tup); @@ -1895,7 +1895,7 @@ _convert_from_str(PyObject *obj, int align) if (typeDict == NULL) { goto fail; } - PyObject *item = PyDict_GetItemWithError(typeDict, obj); + PyObject *item = PyDict_GetItemWithError(typeDict, obj); // noqa: borrowed-ref - manual fix needed if (item == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2276,7 +2276,7 @@ _arraydescr_isnative(PyArray_Descr *self) PyArray_Descr *new; int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2422,7 +2422,7 @@ arraydescr_names_set( int ret; key = PyTuple_GET_ITEM(self->names, i); /* Borrowed references to item and new_key */ - item = PyDict_GetItemWithError(self->fields, key); + item = PyDict_GetItemWithError(self->fields, key); // noqa: borrowed-ref OK if (item == NULL) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -2848,7 +2848,7 @@ _descr_find_object(PyArray_Descr *self) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(self), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -2964,7 +2964,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) if (fields != Py_None) { PyObject *key, *list; key = PyLong_FromLong(-1); - list = PyDict_GetItemWithError(fields, key); + list = PyDict_GetItemWithError(fields, key); // noqa: borrowed-ref OK if (!list) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3140,7 +3140,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) for (i = 0; i < PyTuple_GET_SIZE(names); ++i) { name = PyTuple_GET_ITEM(names, i); - field = PyDict_GetItemWithError(fields, name); + field = PyDict_GetItemWithError(fields, name); // noqa: borrowed-ref OK if (!field) { if (!PyErr_Occurred()) { /* fields was missing the name it claimed to contain */ @@ -3344,7 +3344,7 @@ PyArray_DescrNewByteorder(PyArray_Descr *oself, char newendian) return NULL; } /* make new dictionary with replaced PyArray_Descr Objects */ - while (PyDict_Next(self->fields, &pos, &key, &value)) { + while (PyDict_Next(self->fields, &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -3470,7 +3470,7 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) if (key == NULL) { return 0; } - tup = PyDict_GetItem(fields, key); + tup = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK if (tup == NULL) { return 0; } @@ -3635,7 +3635,7 @@ _check_has_fields(PyArray_Descr *self) static PyObject * _subscript_by_name(_PyArray_LegacyDescr *self, PyObject *op) { - PyObject *obj = PyDict_GetItemWithError(self->fields, op); + PyObject *obj = PyDict_GetItemWithError(self->fields, op); // noqa: borrowed-ref OK if (obj == NULL) { if (!PyErr_Occurred()) { PyErr_Format(PyExc_KeyError, @@ -3672,7 +3672,7 @@ _is_list_of_strings(PyObject *obj) } seqlen = PyList_GET_SIZE(obj); for (i = 0; i < seqlen; i++) { - PyObject *item = PyList_GET_ITEM(obj, i); + PyObject *item = PyList_GET_ITEM(obj, i); // noqa: borrowed-ref - manual fix needed if (!PyUnicode_Check(item)) { return NPY_FALSE; } @@ -3716,7 +3716,7 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) */ PyTuple_SET_ITEM(names, i, name); - tup = PyDict_GetItemWithError(self->fields, name); + tup = PyDict_GetItemWithError(self->fields, name); // noqa: borrowed-ref OK if (tup == NULL) { if (!PyErr_Occurred()) { PyErr_SetObject(PyExc_KeyError, name); diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 188a55a4b5f5..64d5bfa89e8e 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2319,7 +2319,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), *out_flags = PyArrayMethod_MINIMAL_FLAGS; for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { PyMem_Free(data); @@ -2383,7 +2383,7 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), NPY_traverse_info_init(&data->decref_src); key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), 0); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { PyMem_Free(data); @@ -2435,14 +2435,14 @@ get_fields_transfer_function(int NPY_UNUSED(aligned), /* set up the transfer function for each field */ for (i = 0; i < field_count; ++i) { key = PyTuple_GET_ITEM(PyDataType_NAMES(dst_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(dst_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return NPY_FAIL; } key = PyTuple_GET_ITEM(PyDataType_NAMES(src_dtype), i); - tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); + tup = PyDict_GetItem(PyDataType_FIELDS(src_dtype), key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, &src_offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); @@ -3831,4 +3831,4 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape, *out_dataC = dataC; *out_ndim = ndim; return 0; -} +} \ No newline at end of file diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 91b1889b7d1f..e86aab7411d4 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -346,7 +346,7 @@ get_fields_traverse_function( int offset; key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dtype->fields, key); + tup = PyDict_GetItem(dtype->fields, key); // noqa: borrowed-ref OK if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &offset, &title)) { NPY_AUXDATA_FREE((NpyAuxData *)data); return -1; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 0b1b0fb39192..5ac7ef3b2320 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -693,7 +693,7 @@ void_ensure_canonical(_PyArray_LegacyDescr *self) int maxalign = 1; for (Py_ssize_t i = 0; i < field_num; i++) { PyObject *name = PyTuple_GET_ITEM(self->names, i); - PyObject *tuple = PyDict_GetItem(self->fields, name); + PyObject *tuple = PyDict_GetItem(self->fields, name); // noqa: borrowed-ref OK PyObject *new_tuple = PyTuple_New(PyTuple_GET_SIZE(tuple)); PyArray_Descr *field_descr = NPY_DT_CALL_ensure_canonical( (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0)); diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index f570caf1588f..d5359d86390f 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -127,7 +127,7 @@ static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* * For each field, add the key + descr + offset to l */ key = PyTuple_GET_ITEM(names, pos); - value = PyDict_GetItem(fields, key); + value = PyDict_GetItem(fields, key); // noqa: borrowed-ref OK /* XXX: are those checks necessary ? */ if (value == NULL) { PyErr_SetString(PyExc_SystemError, diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 422c690882ab..d83f0d7a3ac3 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1489,7 +1489,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, return NULL; } - fast_seq = PySequence_Fast(args, ""); // needed for pypy + fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed if (fast_seq == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index 70b4fa1e49db..abfc1bd0e3cd 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -320,8 +320,8 @@ can_cast_fields(PyObject *field1, PyObject *field2, NPY_CASTING casting) /* Iterate over all the fields and compare for castability */ ppos = 0; - while (PyDict_Next(field1, &ppos, &key, &tuple1)) { - if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { + while (PyDict_Next(field1, &ppos, &key, &tuple1)) { // noqa: borrowed-ref OK + if ((tuple2 = PyDict_GetItem(field2, key)) == NULL) { // noqa: borrowed-ref OK return 0; } /* Compare the dtype of the field for castability */ @@ -372,7 +372,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, Py_ssize_t ppos = 0; PyObject *tuple; PyArray_Descr *field; - PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); + PyDict_Next(lfrom->fields, &ppos, NULL, &tuple); // noqa: borrowed-ref OK field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0); /* * For a subarray, we need to get the underlying type; diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 7483448e632b..12f25534f1d0 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1351,7 +1351,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) npy_intp offset; /* get the field offset and dtype */ - tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); + tup = PyDict_GetItemWithError(PyDataType_FIELDS(PyArray_DESCR(arr)), ind); // noqa: borrowed-ref OK if (tup == NULL && PyErr_Occurred()) { return 0; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 58a554dc40be..50f7f5f3c73b 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1005,7 +1005,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); + fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (fast == NULL) { return -1; } @@ -1033,7 +1033,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); // noqa: borrowed-ref OK if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1115,7 +1115,7 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( + types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { @@ -1566,7 +1566,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyArray_Descr *new; int offset, res; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -1733,7 +1733,7 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) return -1; } while(iter->index < iter->size) { - theobject = PyList_GET_ITEM(list, iter->index); + theobject = PyList_GET_ITEM(list, iter->index); // noqa: borrowed-ref OK setitem(theobject, iter->dataptr, self); PyArray_ITER_NEXT(iter); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e80c6c0cd45c..b8ade23b6d76 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2764,7 +2764,7 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed "be a list or a tuple"); if (obj == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 27c392db8720..0af2ee408b4b 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -588,7 +588,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed if (op_in == NULL) { Py_DECREF(op_in); return -1; diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 571b50372684..ac70f38f39a5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -184,7 +184,7 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -246,7 +246,7 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(descr), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } @@ -480,7 +480,7 @@ _fill_with_none(char *optr, PyArray_Descr *dtype) int offset; Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { + while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { // noqa: borrowed-ref OK if (NPY_TITLE_KEY(key, value)) { continue; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index c459fa826e53..e5a294ecb01d 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -61,7 +61,7 @@ create_conv_funcs( Py_ssize_t pos = 0; int error = 0; Py_BEGIN_CRITICAL_SECTION(converters); - while (PyDict_Next(converters, &pos, &key, &value)) { + while (PyDict_Next(converters, &pos, &key, &value)) { // noqa: borrowed-ref OK Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 445f7ad7fe67..9d026f32044f 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -344,7 +344,7 @@ static int _warn_if_cast_exists_already( if (to_DType == NULL) { return -1; } - PyObject *cast_impl = PyDict_GetItemWithError( + PyObject *cast_impl = PyDict_GetItemWithError( // noqa: borrowed-ref OK NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType); Py_DECREF(to_DType); if (cast_impl == NULL) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index fd9ae9f1e41d..1d2c3edbd3b9 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1368,7 +1368,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, * Get axes tuple for operand. If not a tuple already, make it one if * there is only one axis (its content is checked later). */ - op_axes_tuple = PyList_GET_ITEM(axes, iop); + op_axes_tuple = PyList_GET_ITEM(axes, iop); // noqa: borrowed-ref - manual fix needed if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ @@ -4944,7 +4944,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, function, arg_typenums, data); if (result == 0) { - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { result = -1; } @@ -5075,7 +5075,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, */ int add_new_loop = 1; for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); + PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); @@ -5117,7 +5117,7 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, funcdata->nargs = 0; /* Get entry for this user-defined type*/ - cobj = PyDict_GetItemWithError(ufunc->userloops, key); + cobj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref OK if (cobj == NULL && PyErr_Occurred()) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 95670efb936f..8d617f3ddc6c 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1455,7 +1455,7 @@ find_userloop(PyUFuncObject *ufunc, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(ufunc->userloops, key); + obj = PyDict_GetItemWithError(ufunc->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1742,7 +1742,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; @@ -1813,7 +1813,7 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self, if (key == NULL) { return -1; } - obj = PyDict_GetItemWithError(self->userloops, key); + obj = PyDict_GetItemWithError(self->userloops, key); // noqa: borrowed-ref - manual fix needed Py_DECREF(key); if (obj == NULL && PyErr_Occurred()){ return -1; diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index e5cf2cf8acb3..3efb02bd4a49 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -267,11 +267,11 @@ int initumath(PyObject *m) PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO)); PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN)); - s = PyDict_GetItemString(d, "divide"); + s = PyDict_GetItemString(d, "divide"); // noqa: borrowed-ref OK PyDict_SetItemString(d, "true_divide", s); - s = PyDict_GetItemString(d, "conjugate"); - s2 = PyDict_GetItemString(d, "remainder"); + s = PyDict_GetItemString(d, "conjugate"); // noqa: borrowed-ref OK + s2 = PyDict_GetItemString(d, "remainder"); // noqa: borrowed-ref OK /* Setup the array object's numerical structures with appropriate ufuncs in d*/ diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 5c2b4bdf0931..d6664d6bdfb7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -47,7 +47,7 @@ F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) "failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -87,7 +87,7 @@ F2PyGetThreadLocalCallbackPtr(char *key) "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); } - value = PyDict_GetItemString(local_dict, key); + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK if (value != NULL) { prev = PyLong_AsVoidPtr(value); if (PyErr_Occurred()) { @@ -365,7 +365,7 @@ fortran_getattr(PyFortranObject *fp, char *name) if (fp->dict != NULL) { // python 3.13 added PyDict_GetItemRef #if PY_VERSION_HEX < 0x030D0000 - PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK if (v == NULL && PyErr_Occurred()) { return NULL; } @@ -822,7 +822,7 @@ get_elsize(PyObject *obj) { } else if (PyUnicode_Check(obj)) { return PyUnicode_GET_LENGTH(obj); } else if (PySequence_Check(obj)) { - PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK if (fast != NULL) { Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); int sz, elsize = 0; diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh new file mode 100644 index 000000000000..9f4203284823 --- /dev/null +++ b/tools/ci/check_c_api_usage.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +set -e + +# List of suspicious function calls: +SUSPICIOUS_FUNCS=( + "PyList_GetItem" + "PyDict_GetItem" + "PyDict_GetItemWithError" + "PyDict_GetItemString" + "PyDict_SetDefault" + "PyDict_Next" + "PyWeakref_GetObject" + "PyWeakref_GET_OBJECT" + "PyList_GET_ITEM" + "_PyDict_GetItemStringWithError" + "PySequence_Fast" +) + +# Find all C/C++ source files in the repo +ALL_FILES=$(find numpy -type f \( -name "*.c" -o -name "*.h" -o -name "*.c.src" -o -name "*.cpp" \) ! -path "*/pythoncapi-compat/*") + +# For debugging: print out file count +echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." + +# Prepare a result file +mkdir -p .tmp +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT + +FAIL=0 + +# Scan each changed file +for file in $ALL_FILES; do + + for func in "${SUSPICIOUS_FUNCS[@]}"; do + # -n : show line number + # -P : perl-style boundaries + # (?> "$OUTPUT" + echo " -> $line" >> "$OUTPUT" + echo "Recommendation:" >> "$OUTPUT" + echo "If this use is intentional and safe, add '// noqa: borrowed-ref OK' on the same line to silence this warning." >> "$OUTPUT" + echo "Otherwise, consider replacing $func with a thread-safe API function." >> "$OUTPUT" + echo "" >> "$OUTPUT" + FAIL=1 + done <<< "$matches" + fi + done +done + +if [[ $FAIL -eq 1 ]]; then + echo "C API borrow-ref linter found issues." +else + echo "C API borrow-ref linter found no issues." > $OUTPUT +fi + +cat "$OUTPUT" +exit "$FAIL" diff --git a/tools/linter.py b/tools/linter.py index 1ce9ca763343..4b5115c97a2b 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -18,6 +18,7 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: Unlike pycodestyle, ruff by itself is not capable of limiting its output to the given diff. """ + print("Running Ruff Check...") command = ["ruff", "check"] if fix: command.append("--fix") @@ -31,12 +32,35 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: return res.returncode, res.stdout def run_lint(self, fix: bool) -> None: - retcode, errors = self.run_ruff(fix) - errors and print(errors) + # Ruff Linter + retcode, ruff_errors = self.run_ruff(fix) + ruff_errors and print(ruff_errors) + + if retcode: + sys.exit(retcode) + + # C API Borrowed-ref Linter + retcode, c_API_errors = self.run_check_c_api() + c_API_errors and print(c_API_errors) sys.exit(retcode) + def run_check_c_api(self) -> tuple[int, str]: + # Running borrowed ref checker + print("Running C API borrow-reference linter...") + borrowed_ref_script = os.path.join(self.repository_root, "tools", "ci", + "check_c_api_usage.sh") + borrowed_res = subprocess.run( + ["bash", borrowed_ref_script], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + ) + + # Exit with non-zero if C API Check fails + return borrowed_res.returncode, borrowed_res.stdout + if __name__ == "__main__": parser = ArgumentParser() From d02611ad99488637b48f4be203f297ea7b29c95d Mon Sep 17 00:00:00 2001 From: hannah Date: Thu, 17 Jul 2025 05:43:20 -0400 Subject: [PATCH 0226/1718] Doc: use different data in stacked arrays (#29390) Improve docstring examples for dstack and column_stack --- numpy/lib/_shape_base_impl.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 89b86c80964d..c44d603611ee 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -653,11 +653,11 @@ def column_stack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1, 4], + [2, 5], + [3, 6]]) """ arrays = [] @@ -713,18 +713,18 @@ def dstack(tup): -------- >>> import numpy as np >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) + >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) - array([[[1, 2], - [2, 3], - [3, 4]]]) + array([[[1, 4], + [2, 5], + [3, 6]]]) >>> a = np.array([[1],[2],[3]]) - >>> b = np.array([[2],[3],[4]]) + >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) - array([[[1, 2]], - [[2, 3]], - [[3, 4]]]) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) """ arrs = atleast_3d(*tup) From bd5cb2a293c02b8a50864cec1a751afc54f2ff47 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Sat, 19 Jul 2025 23:55:14 -0700 Subject: [PATCH 0227/1718] MNT: Cleanup infs handling in np.testing assertion utilities (#29321) * TST: Add failing test showing shape mismatch issue Recently, #29112 added showing first mismatches indices, but assert_array_almost_equal.compare trips it up by returning a shape different from its input, causing an IndexError: ``` <...> if invalids.ndim != 0: if flagged.ndim > 0: > positions = np.argwhere(np.asarray(~flagged))[invalids] E IndexError: boolean index did not match indexed array along axis 0; size of axis is 3 but size of corresponding boolean axis is 2 ``` (traceback shown using pytest --full-trace) * MNT: Remove assert_array_almost_equal old infs logic, but tests fail A nice cleanup (newer, similar inf handling already exists now in assert_array_compare), and resolves the shape mismatch issue. However, the removed logic was handling complex infs while the one isn't, causing the new test and an existing one (TestInterp::test_complex_interp) to now fail with RuntimeWarnings attempting to subtract the complex infs. * MNT: assert_array_compare handles all inf values assert_array_compare now tests all inf values for matching position and value, including complex infs. Fixes the failing tests. * TST: Test array_allclose behavior for inf values The behavior for real infs is the same is before. For complex infs, demonstrates that the behavior for mismatching values is now cleaner, showing a concise error message vs. previously displaying nan max errors. For complex infs with matching values, the behavior is the same as before, accepting them as equal (although internally they would now be filtered ahead of being passed to isclose, like real infs already had been). * TST: assert_allclose behavior with nans that are also infs * MNT: Extract robust_any_difference() helper to DRY --- numpy/testing/_private/utils.py | 84 ++++++++++++++++++------------- numpy/testing/tests/test_utils.py | 49 ++++++++++++++++++ 2 files changed, 98 insertions(+), 35 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e868239e62ab..592940f3e328 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -25,7 +25,7 @@ import numpy as np import numpy.linalg._umath_linalg -from numpy import isfinite, isinf, isnan +from numpy import isfinite, isnan from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ @@ -758,17 +758,7 @@ def istime(x): def isvstring(x): return x.dtype.char == "T" - def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf. - - Combine results of running func on x and y, checking that they are True - at the same locations. - - """ - __tracebackhide__ = True # Hide traceback for py.test - - x_id = func(x) - y_id = func(y) + def robust_any_difference(x, y): # We include work-arounds here to handle three types of slightly # pathological ndarray subclasses: # (1) all() on fully masked arrays returns np.ma.masked, so we use != True @@ -781,10 +771,23 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): # not implement np.all(), so favor using the .all() method # We are not committed to supporting cases (2) and (3), but it's nice to # support them if possible. - result = x_id == y_id + result = x == y if not hasattr(result, "all") or not callable(result.all): result = np.bool(result) - if result.all() != True: + return result.all() != True + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], err_msg + '\n%s location mismatch:' @@ -804,6 +807,29 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): else: return y_id + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + try: if strict: cond = x.shape == y.shape and x.dtype == y.dtype @@ -828,12 +854,15 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') if equal_inf: - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == +inf, - hasval='+inf') - flagged |= func_assert_same_pos(x, y, - func=lambda xy: xy == -inf, - hasval='-inf') + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask elif istime(x) and istime(y): # If one is datetime64 and the other timedelta64 there is no point @@ -1183,24 +1212,9 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', """ __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type - from numpy._core.fromnumeric import any as npany from numpy._core.numerictypes import issubdtype def compare(x, y): - try: - if npany(isinf(x)) or npany(isinf(y)): - xinfid = isinf(x) - yinfid = isinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. dtype = result_type(y, 1.) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 243b8d420936..07f2c9da3005 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -612,6 +612,18 @@ def test_inf(self): assert_raises(AssertionError, lambda: self._assert_func(a, b)) + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + def test_subclass(self): a = np.array([[1., 2.], [3., 4.]]) b = np.ma.masked_array([[1., 2.], [0., 4.]], @@ -1283,11 +1295,21 @@ def test_equal_nan(self): # Should not raise: assert_allclose(a, b, equal_nan=True) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + def test_not_equal_nan(self): a = np.array([np.nan]) b = np.array([np.nan]) assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + def test_equal_nan_default(self): # Make sure equal_nan default behavior remains unchanged. (All # of these functions use assert_array_compare under the hood.) @@ -1336,6 +1358,33 @@ def test_strict(self): with pytest.raises(AssertionError): assert_allclose(x, x.astype(np.float32), strict=True) + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) class TestArrayAlmostEqualNulp: From 96cf781e226900ba88592f3a20145d6af63e863a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 11:59:22 +0200 Subject: [PATCH 0228/1718] STY: Apply ruff/Perflint rule PERF102 PERF102 When using only the values of a dict use the `values()` method PERF102 When using only the keys of a dict use the `keys()` method --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- numpy/f2py/func2subr.py | 2 +- numpy/ma/testutils.py | 8 ++++---- numpy/testing/_private/utils.py | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 7dc321ac2980..155e7d4f7421 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -53,7 +53,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * 1 # no nin try: self.afdn(*arg) @@ -100,7 +100,7 @@ def setup(self, ufuncname): except AttributeError: raise NotImplementedError self.args = [] - for _, aarg in get_squares_().items(): + for aarg in get_squares_().values(): arg = (aarg,) * self.ufn.nin try: self.ufn(*arg) diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 0a875006ed75..09b67f7c3085 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -77,7 +77,7 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): def useiso_c_binding(rout): useisoc = False - for key, value in rout['vars'].items(): + for value in rout['vars'].values(): kind_value = value.get('kindselector', {}).get('kind') if kind_value in isoc_kindmap: return True diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index bffcc34b759c..4840b758663c 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -124,7 +124,7 @@ def assert_equal(actual, desired, err_msg=''): for k, i in desired.items(): if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -162,12 +162,12 @@ def fail_if_equal(actual, desired, err_msg='',): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k in range(len(desired)): - fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + for k, i in enumerate(desired): + fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 592940f3e328..e7bc76c584d6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -367,13 +367,13 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): for k, i in desired.items(): if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + for k, i in enumerate(desired): + assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From e8d5153caceed246c378de72e5ebde15b1fc0996 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:00:41 +0200 Subject: [PATCH 0229/1718] STY: Apply ruff/Perflint rule PERF403 PERF403 Use `dict.update` instead of a for-loop PERF403 Use a dictionary comprehension instead of a for-loop --- numpy/_core/code_generators/genapi.py | 3 +-- numpy/lib/introspect.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index caeaf7a08532..b0036def7300 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,8 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - for k, v in d.items(): - ret[k] = v + ret.update(dict(d.items())) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 5526a332fead..7ea621b823c8 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,13 +80,14 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = {} - for chars, targets in v.items(): + matching_chars = { + chars: targets + for chars, targets in v.items() if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ): - matching_chars[chars] = targets + ) + } if matching_chars: matching_sigs[k] = matching_chars else: From 4eeace478850ade16d74b9fb0db96b5342f73090 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 12:03:27 +0200 Subject: [PATCH 0230/1718] MNT: Enforce ruff/Perflint rules (PERF) --- ruff.toml | 54 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/ruff.toml b/ruff.toml index ed6026ed0665..98b6b0c7244d 100644 --- a/ruff.toml +++ b/ruff.toml @@ -32,6 +32,7 @@ extend-select = [ "FLY", "I", "PD", + "PERF", "E", "W", "PGH", @@ -39,32 +40,33 @@ extend-select = [ "UP", ] ignore = [ - "B006", # Do not use mutable data structures for argument defaults - "B007", # Loop control variable not used within loop body - "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` - "B023", # Function definition does not bind loop variable - "B028", # No explicit `stacklevel` keyword argument found - "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling - "B905", #`zip()` without an explicit `strict=` parameter - "C408", # Unnecessary `dict()` call (rewrite as a literal) - "ISC002", # Implicitly concatenated string literals over multiple lines - "PIE790", # Unnecessary `pass` statement - "PD901", # Avoid using the generic variable name `df` for DataFrames - "E241", # Multiple spaces after comma - "E265", # Block comment should start with `# ` - "E266", # Too many leading `#` before block comment - "E302", # TODO: Expected 2 blank lines, found 1 - "E402", # Module level import not at top of file - "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check - "E731", # Do not assign a `lambda` expression, use a `def` - "E741", # Ambiguous variable name - "F403", # `from ... import *` used; unable to detect undefined names - "F405", # may be undefined, or defined from star imports - "F821", # Undefined name - "F841", # Local variable is assigned to but never used - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter + "C408", # Unnecessary `dict()` call (rewrite as a literal) + "ISC002", # Implicitly concatenated string literals over multiple lines + "PIE790", # Unnecessary `pass` statement + "PD901", # Avoid using the generic variable name `df` for DataFrames + "PERF401", # PERF401 Use a list comprehension to create a transformed list + "E241", # Multiple spaces after comma + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used + "UP015" , # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] From adedeb093f0254c27e46da84c1139c1f702f6bab Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 15 May 2025 23:55:18 +0200 Subject: [PATCH 0231/1718] STY: better solution for PERF102 Co-authored-by: Pieter Eendebak --- numpy/ma/testutils.py | 12 ++++++------ numpy/testing/_private/utils.py | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index 4840b758663c..0df3b1757fd6 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -121,10 +121,10 @@ def assert_equal(actual, desired, err_msg=''): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(f"{k} not in {actual}") - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}') + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return # Case #2: lists ..... if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): @@ -159,15 +159,15 @@ def fail_if_equal(actual, desired, err_msg='',): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) fail_if_equal(len(actual), len(desired), err_msg) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - fail_if_equal(actual[k], i, f'key={k!r}\n{err_msg}') + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): fail_if_equal(len(actual), len(desired), err_msg) - for k, i in enumerate(desired): - fail_if_equal(actual[k], i, f'item={k!r}\n{err_msg}') + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') return if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): return fail_if_array_equal(actual, desired, err_msg) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e7bc76c584d6..e4a74e69afb0 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -364,16 +364,16 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): if not isinstance(actual, dict): raise AssertionError(repr(type(actual))) assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): + for k in desired: if k not in actual: raise AssertionError(repr(k)) - assert_equal(actual[k], i, f'key={k!r}\n{err_msg}', + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', verbose) return if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in enumerate(desired): - assert_equal(actual[k], i, f'item={k!r}\n{err_msg}', + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return from numpy import imag, iscomplexobj, real From c56f86d6a1c54267164cf67407db1a95b0307b20 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 29 May 2025 00:27:18 +0200 Subject: [PATCH 0232/1718] STY: taken into account reviewer's comments Co-authored-by: Joren Hammudoglu --- numpy/_core/code_generators/genapi.py | 2 +- numpy/lib/introspect.py | 9 ++++----- ruff.toml | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index b0036def7300..6a370a7dc3cd 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -466,7 +466,7 @@ def _key(x): def merge_api_dicts(dicts): ret = {} for d in dicts: - ret.update(dict(d.items())) + ret.update(d) return ret diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 7ea621b823c8..816c79a669b9 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -80,14 +80,13 @@ def opt_func_info(func_name=None, signature=None): sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): - matching_chars = { - chars: targets - for chars, targets in v.items() + matching_chars = {} + for chars, targets in v.items(): if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ) - } + ): + matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: diff --git a/ruff.toml b/ruff.toml index 98b6b0c7244d..4666efdf39bc 100644 --- a/ruff.toml +++ b/ruff.toml @@ -51,7 +51,7 @@ ignore = [ "ISC002", # Implicitly concatenated string literals over multiple lines "PIE790", # Unnecessary `pass` statement "PD901", # Avoid using the generic variable name `df` for DataFrames - "PERF401", # PERF401 Use a list comprehension to create a transformed list + "PERF401", # Use a list comprehension to create a transformed list "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment From 5a031d9b8822551711973e6ae220cd00817c9523 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 21 Jul 2025 10:44:47 -0600 Subject: [PATCH 0233/1718] ENH: avoid thread safety issues around uses of `PySequence_Fast` (#29394) * BUG: prevent mutating multiter arguments while processing them * MAINT: also add locking for array creation from array-likes * MAINT: add lock nditer argument with a critical section * MAINT: give NO_BRACKETS variants a label argument * MAINT: more critical sections * MAINT: apply suggestions from Sam * MAINT: fix linter and GIL-enabled build compilation * MAINT: simplify error paths * Update numpy/_core/src/multiarray/iterators.c Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/common/npy_pycompat.h | 52 ++++++++++++++++ numpy/_core/src/multiarray/ctors.c | 59 ++++++++++--------- numpy/_core/src/multiarray/iterators.c | 13 ++-- numpy/_core/src/multiarray/methods.c | 25 ++++---- numpy/_core/src/multiarray/multiarraymodule.c | 31 +++++----- numpy/_core/src/multiarray/nditer_pywrap.c | 38 +++++++++--- numpy/_core/tests/test_multithreading.py | 47 +++++++++++++++ 7 files changed, 200 insertions(+), 65 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 769b90215f2b..f751af666524 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -6,4 +6,56 @@ #define Npy_HashDouble _Py_HashDouble +#ifdef Py_GIL_DISABLED +// Specialized version of critical section locking to safely use +// PySequence_Fast APIs without the GIL. For performance, the argument *to* +// PySequence_Fast() is provided to the macro, not the *result* of +// PySequence_Fast(), which would require an extra test to determine if the +// lock must be acquired. +// +// These are tweaked versions of macros defined in CPython in +// pycore_critical_section.h, originally added in CPython commit baf347d91643. +// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid +// repition and should behave identically to the versions in CPython. Once the +// macros are expanded, The only difference relative to those versions is the +// use of public C API symbols that are equivalent to the ones used in the +// corresponding CPython definitions. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ + { \ + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ + original, npy_cs_fast) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ + } + +// These macros are more flexible than the versions in the public CPython C API, +// but that comes at a cost. Here are some differences and limitations: +// +// * cs_name is a named label for the critical section. If you must nest +// critical sections, do *not* use the same name for multiple nesting +// critical sections. +// * The beginning and ending macros must happen within the same scope +// and the compiler won't necessarily enforce that. +// * The macros ending critical sections accept a named label. The label +// must match the opening critical section. +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ + PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ + const int _##cs_name##_should_lock_cs = \ + PyList_CheckExact(_##cs_name##_orig_seq); \ + PyCriticalSection _##cs_name; \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ + } +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ + if (_##cs_name##_should_lock_cs) { \ + PyCriticalSection_End(&_##cs_name); \ + } +#else +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) +#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } +#endif + + #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 4f466677c57c..498fa78118b3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1539,7 +1539,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ - PyArrayObject *arr = NULL, *ret; + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; @@ -1560,12 +1560,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } + Py_BEGIN_CRITICAL_SECTION(op); + ndim = PyArray_DiscoverDTypeAndShape( op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { - return NULL; + goto cleanup; } /* If the cache is NULL, then the object is considered a scalar */ @@ -1578,16 +1580,14 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (min_depth != 0 && ndim < min_depth) { PyErr_SetString(PyExc_ValueError, "object of too small depth for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (max_depth != 0 && ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Got the correct parameters, but the cache may already hold the result */ @@ -1602,9 +1602,11 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, if (was_copied_by__array__ == 1) { flags = flags & ~NPY_ARRAY_ENSURECOPY; } - PyObject *res = PyArray_FromArray(arr, dtype, flags); + // PyArray_FromArray steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); - return res; + goto cleanup; } else if (cache == NULL && PyArray_IsScalar(op, Void) && !(((PyVoidScalarObject *)op)->flags & NPY_ARRAY_OWNDATA) && @@ -1619,13 +1621,15 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * provide a dtype (newtype is NULL). */ assert(ndim == 0); - - return PyArray_NewFromDescrAndBase( + // PyArray_NewFromDescrAndBase steals a reference to the dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, 0, NULL, NULL, ((PyVoidScalarObject *)op)->obval, ((PyVoidScalarObject *)op)->flags, NULL, op); + goto cleanup; } /* * If we got this far, we definitely have to create a copy, since we are @@ -1633,9 +1637,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, */ if (flags & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } if (cache == NULL && in_descr != NULL && @@ -1662,16 +1665,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * have a better solution at some point): * https://github.com/pandas-dev/pandas/issues/35481 */ - return PyArray_FromScalar(op, dtype); + // PyArray_FromScalar steals a reference to dtype + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_FromScalar(op, dtype); + goto cleanup; } /* There was no array (or array-like) passed in directly. */ if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyErr_SetString(PyExc_TypeError, "WRITEBACKIFCOPY used for non-array input."); - Py_DECREF(dtype); npy_free_coercion_cache(cache); - return NULL; + goto cleanup; } /* Create a new array and copy the data */ @@ -1681,8 +1686,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, flags&NPY_ARRAY_F_CONTIGUOUS, NULL); if (ret == NULL) { npy_free_coercion_cache(cache); - Py_DECREF(dtype); - return NULL; + goto cleanup; } if (ndim == PyArray_NDIM(ret)) { /* @@ -1699,12 +1703,10 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(ndim == 0); if (PyArray_Pack(dtype, PyArray_BYTES(ret), op) < 0) { - Py_DECREF(dtype); - Py_DECREF(ret); - return NULL; + Py_CLEAR(ret); + goto cleanup; } - Py_DECREF(dtype); - return (PyObject *)ret; + goto cleanup; } assert(ndim != 0); assert(op == cache->converted_obj); @@ -1717,15 +1719,18 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_AssignFromCache(ret, cache); + int succeed = PyArray_AssignFromCache(ret, cache); ((PyArrayObject_fields *)ret)->nd = out_ndim; ((PyArrayObject_fields *)ret)->descr = out_descr; - Py_DECREF(dtype); - if (success < 0) { - Py_DECREF(ret); - return NULL; + if (succeed < 0) { + Py_CLEAR(ret); } + +cleanup:; + + Py_XDECREF(dtype); + Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index d83f0d7a3ac3..f45328a2bb75 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -23,6 +23,7 @@ #include "item_selection.h" #include "lowlevel_strided_loops.h" #include "array_assign.h" +#include "npy_pycompat.h" #define NEWAXIS_INDEX -1 #define ELLIPSIS_INDEX -2 @@ -1476,6 +1477,7 @@ PyArray_MultiIterNew(int n, ...) return multiiter_new_impl(n, args_impl); } + static PyObject* arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) @@ -1488,18 +1490,19 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, "keyword arguments not accepted."); return NULL; } - - fast_seq = PySequence_Fast(args, ""); // needed for pypy // noqa: borrowed-ref - manual fix needed + fast_seq = PySequence_Fast(args, ""); // noqa: borrowed-ref OK if (fast_seq == NULL) { return NULL; } + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { - Py_DECREF(fast_seq); - return multiiter_wrong_number_of_args(); + ret = multiiter_wrong_number_of_args(); + } else { + ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } - ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); Py_DECREF(fast_seq); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() return ret; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 50f7f5f3c73b..7a9f5d83a57c 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -997,26 +997,26 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; - PyObject *fast; - PyObject **in_objs, **out_objs, *where_obj; + PyObject **out_objs, *where_obj; /* check inputs */ nin = PyTuple_Size(args); if (nin < 0) { return -1; } - fast = PySequence_Fast(args, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed - if (fast == NULL) { - return -1; - } - in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { - if (PyUFunc_HasOverride(in_objs[i])) { - Py_DECREF(fast); +#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) + PyObject *obj = PyTuple_GetItem(args, i); + if (obj == NULL) { + return -1; + } +#else + PyObject *obj = PyTuple_GET_ITEM(args, i); +#endif + if (PyUFunc_HasOverride(obj)) { return 1; } } - Py_DECREF(fast); if (kwds == NULL) { return 0; } @@ -1115,14 +1115,15 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); return NULL; } - types = PySequence_Fast( // noqa: borrowed-ref - manual fix needed + types = PySequence_Fast( // noqa: borrowed-ref OK types, "types argument to ndarray.__array_function__ must be iterable"); if (types == NULL) { return NULL; } - + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(types); result = array_function_method_impl(func, types, args, kwargs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(types); return result; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index b8ade23b6d76..732919c347b0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2760,15 +2760,18 @@ einsum_sub_op_from_str( static int einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { - int ellipsis = 0, subindex = 0; + int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; PyObject *item; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref - manual fix needed + obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); if (obj == NULL) { return -1; } + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + size = PySequence_Size(obj); for (i = 0; i < size; ++i) { @@ -2778,14 +2781,12 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (ellipsis) { PyErr_SetString(PyExc_ValueError, "each subscripts list may have only one ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (subindex + 3 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } subscripts[subindex++] = '.'; subscripts[subindex++] = '.'; @@ -2800,16 +2801,14 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) PyErr_SetString(PyExc_TypeError, "each subscript must be either an integer " "or an ellipsis"); - Py_DECREF(obj); - return -1; + goto cleanup; } npy_bool bad_input = 0; if (subindex + 1 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); - Py_DECREF(obj); - return -1; + goto cleanup; } if (s < 0) { @@ -2828,16 +2827,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) if (bad_input) { PyErr_SetString(PyExc_ValueError, "subscript is not within the valid range [0, 52)"); - Py_DECREF(obj); - return -1; + goto cleanup; } } - } + ret = subindex; + + cleanup:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); Py_DECREF(obj); - return subindex; + return ret; + } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 0af2ee408b4b..86198e9cd9f4 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -20,6 +20,7 @@ #include "common.h" #include "conversion_utils.h" #include "ctors.h" +#include "npy_pycompat.h" /* Functions not part of the public NumPy C API */ npy_bool npyiter_has_writeback(NpyIter *iter); @@ -588,7 +589,7 @@ npyiter_prepare_ops(PyObject *op_in, PyObject **out_owner, PyObject ***out_objs) { /* Take ownership of op_in (either a tuple/list or single element): */ if (PyTuple_Check(op_in) || PyList_Check(op_in)) { - PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref - manual fix needed + PyObject *seq = PySequence_Fast(op_in, "failed accessing item list"); // noqa: borrowed-ref OK if (op_in == NULL) { Py_DECREF(op_in); return -1; @@ -719,19 +720,28 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) /* Need nop to set up workspaces */ PyObject **op_objs = NULL; - PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ - int nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); + PyObject *op_in_owned = NULL; /* Sequence/object owning op_objs. */ + PyArray_Descr **op_request_dtypes = NULL; + int pre_alloc_fail = 0; + int post_alloc_fail = 0; + int nop; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + + nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } /* allocate workspace for Python objects (operands and dtypes) */ NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { - goto pre_alloc_fail; + pre_alloc_fail = 1; + goto cleanup; } memset(op, 0, sizeof(PyObject *) * 2 * nop); - PyArray_Descr **op_request_dtypes = (PyArray_Descr **)(op + nop); + op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); @@ -743,11 +753,25 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) * (NPY_ALLOC_WORKSPACE has to be done before a goto fail currently.) */ if (op_flags == NULL || op_axes_storage == NULL || op_axes == NULL) { - goto finish; + post_alloc_fail = 1; + goto cleanup; } /* op and op_flags */ if (npyiter_convert_ops(nop, op_objs, op_flags_in, op, op_flags) != 1) { + post_alloc_fail = 1; + goto cleanup; + } + +cleanup: + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + + if (pre_alloc_fail) { + goto pre_alloc_fail; + } + + if (post_alloc_fail) { goto finish; } diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 5f8a2f11ea1f..b81c989ae5c7 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -293,3 +293,50 @@ def func(index): assert expected_warning in str(ex) run_threaded(func, max_workers=10, pass_count=True, outer_iterations=5) + + +# These are all implemented using PySequence_Fast, which needs locking to be safe +def np_broadcast(arrs): + for i in range(100): + np.broadcast(arrs) + +def create_array(arrs): + for i in range(100): + np.array(arrs) + +def create_nditer(arrs): + for i in range(1000): + np.nditer(arrs) + +@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) +def test_arg_locking(kernel): + # should complete without failing or generating an error about an array size + # changing + + b = threading.Barrier(5) + done = 0 + arrs = [] + + def read_arrs(): + nonlocal done + b.wait() + try: + kernel(arrs) + finally: + done += 1 + + def mutate_list(): + b.wait() + while done < 4: + if len(arrs) > 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + arrs = [np.array([1, 2, 3]) for _ in range(1000)] + + tasks = [threading.Thread(target=read_arrs) for _ in range(4)] + tasks.append(threading.Thread(target=mutate_list)) + + [t.start() for t in tasks] + [t.join() for t in tasks] From ec3ac3d31e244384856c3f1c6cc65a37ee3906f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:01:42 +0100 Subject: [PATCH 0234/1718] TYP: Type ``MaskedArray.resize``, wrap ``NoReturn`` tests in functions (#29401) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 13 ++++- .../tests/data/reveal/lib_polynomial.pyi | 5 +- numpy/typing/tests/data/reveal/ma.pyi | 5 +- numpy/typing/tests/data/reveal/ufuncs.pyi | 57 ++++++++++++------- 4 files changed, 57 insertions(+), 23 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ee3b90f2891d..a3d05556f7ce 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,7 +3,16 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload +from typing import ( + Any, + Literal, + Never, + NoReturn, + Self, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeIs, TypeVar import numpy as np @@ -1073,7 +1082,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... def reshape(self, *s, **kwargs): ... - def resize(self, newshape, refcheck=..., order=...): ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 8b0a9f3d22e7..b7cbadefc610 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -118,7 +118,10 @@ assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) -assert_type(np.polysub(AR_b, AR_b), NoReturn) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index b5a292330208..1d44c3f8ba3c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn import numpy as np from numpy import dtype, generic @@ -405,6 +405,9 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 93a8bfb15d06..0bfe4df9ad8d 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -98,26 +98,45 @@ assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) -assert_type(np.absolute.outer(), NoReturn) -assert_type(np.frexp.outer(), NoReturn) -assert_type(np.divmod.outer(), NoReturn) -assert_type(np.matmul.outer(), NoReturn) +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(), NoReturn) +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(), NoReturn) +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(), NoReturn) +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(), NoReturn) -assert_type(np.absolute.reduceat(), NoReturn) -assert_type(np.frexp.reduceat(), NoReturn) -assert_type(np.divmod.reduceat(), NoReturn) -assert_type(np.matmul.reduceat(), NoReturn) +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(), NoReturn) +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(), NoReturn) +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(), NoReturn) +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(), NoReturn) -assert_type(np.absolute.reduce(), NoReturn) -assert_type(np.frexp.reduce(), NoReturn) -assert_type(np.divmod.reduce(), NoReturn) -assert_type(np.matmul.reduce(), NoReturn) +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(), NoReturn) +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(), NoReturn) +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(), NoReturn) +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(), NoReturn) -assert_type(np.absolute.accumulate(), NoReturn) -assert_type(np.frexp.accumulate(), NoReturn) -assert_type(np.divmod.accumulate(), NoReturn) -assert_type(np.matmul.accumulate(), NoReturn) +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(), NoReturn) +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(), NoReturn) +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(), NoReturn) +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(), NoReturn) -assert_type(np.frexp.at(), NoReturn) -assert_type(np.divmod.at(), NoReturn) -assert_type(np.matmul.at(), NoReturn) +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(), NoReturn) +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(), NoReturn) +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(), NoReturn) From c7a93c19765317464e3e1e301468a5c2ccd35737 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 19:13:28 +0000 Subject: [PATCH 0235/1718] MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/181d5eefc20863364f96762470ba6f862bdef56b...d6bbdef45e766d081b84a2def353b0055f728d3e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbee65f0b713..71abe29f9d09 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index aa3bf1aec7cc..de3d582f4750 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v2.1.27 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 with: sarif_file: results.sarif From fcb82dfe9137890326ff18920442c75bd6dc488b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 22 Jul 2025 08:00:54 +1000 Subject: [PATCH 0236/1718] use a stable pypy release in CI --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 3452724841c3..7ac5ea673d61 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-nightly' + python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt From d2c0c106eab6bc79193964b2eab40298c7295094 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:51:15 +0900 Subject: [PATCH 0237/1718] ENH: Show unit information in repr for datetime64("NaT") (#29396) --- .../upcoming_changes/29396.improvement.rst | 5 +++++ numpy/_core/src/multiarray/scalartypes.c.src | 21 ++++++++++++++----- numpy/_core/tests/test_datetime.py | 4 +++- 3 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/29396.improvement.rst diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst new file mode 100644 index 000000000000..2cd3d81ad9d8 --- /dev/null +++ b/doc/release/upcoming_changes/29396.improvement.rst @@ -0,0 +1,5 @@ +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a `timedelta64` object. diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 03165b10337e..5724afda7e47 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -939,12 +939,23 @@ datetimetype_repr(PyObject *self) if (legacy_print_mode == -1) { return NULL; } - if (legacy_print_mode > 125) { - ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); - } - else { - ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + + PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); + if((scal->obval == NPY_DATETIME_NAT) && (meta != NULL)){ + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); + } else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s','%S')", iso, meta); + } + } else { + if (legacy_print_mode > 125) { + ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); + } + else { + ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); + } } + } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index a10ca15bc373..888bb7db293b 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -264,10 +264,12 @@ def test_datetime_scalar_construction(self): # Some basic strings and repr assert_equal(str(np.datetime64('NaT')), 'NaT') assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT')") + "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") + assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + "np.datetime64('NaT','ns')") # None gets constructed as NaT assert_equal(np.datetime64(None), np.datetime64('NaT')) From f13cf6e5748c8282a161a990cc7abd0859faaf75 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 22 Jul 2025 02:17:53 -0600 Subject: [PATCH 0238/1718] MAINT/BUG: Followups for PySequence_Fast locking (#29405) * MAINT: break up NPY_ALLOC_WORKSPACE into sub-macros that define and then init * MAINT: remove now-unneeded NO_BRACKETS critical section variants * BUG: always pass the argument to PySequence_Fast to BEGIN_CRITICAL_SECTION_SEQUENCE_FAST --- numpy/_core/src/common/npy_pycompat.h | 43 ++++++------------- numpy/_core/src/multiarray/alloc.h | 9 +++- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 13 +++--- numpy/_core/src/multiarray/nditer_pywrap.c | 18 +++++--- 5 files changed, 37 insertions(+), 48 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index f751af666524..605833a511b7 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -15,45 +15,26 @@ // // These are tweaked versions of macros defined in CPython in // pycore_critical_section.h, originally added in CPython commit baf347d91643. -// They're defined in terms of the NPY_*_CRITICAL_SECTION_NO_BRACKETS to avoid -// repition and should behave identically to the versions in CPython. Once the -// macros are expanded, The only difference relative to those versions is the +// They should behave identically to the versions in CPython. Once the +// macros are expanded, the only difference relative to those versions is the // use of public C API symbols that are equivalent to the ones used in the // corresponding CPython definitions. #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \ { \ - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS( \ - original, npy_cs_fast) + PyObject *_orig_seq = (PyObject *)(original); \ + const int _should_lock_cs = \ + PyList_CheckExact(_orig_seq); \ + PyCriticalSection _cs_fast; \ + if (_should_lock_cs) { \ + PyCriticalSection_Begin(&_cs_fast, _orig_seq); \ + } #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() \ - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(npy_cs_fast) \ - } - -// These macros are more flexible than the versions in the public CPython C API, -// but that comes at a cost. Here are some differences and limitations: -// -// * cs_name is a named label for the critical section. If you must nest -// critical sections, do *not* use the same name for multiple nesting -// critical sections. -// * The beginning and ending macros must happen within the same scope -// and the compiler won't necessarily enforce that. -// * The macros ending critical sections accept a named label. The label -// must match the opening critical section. -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) \ - PyObject *_##cs_name##_orig_seq = (PyObject *)(original); \ - const int _##cs_name##_should_lock_cs = \ - PyList_CheckExact(_##cs_name##_orig_seq); \ - PyCriticalSection _##cs_name; \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_Begin(&_##cs_name, _##cs_name##_orig_seq); \ - } -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) \ - if (_##cs_name##_should_lock_cs) { \ - PyCriticalSection_End(&_##cs_name); \ + if (_should_lock_cs) { \ + PyCriticalSection_End(&_cs_fast); \ + } \ } #else -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(original, cs_name) #define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { -#define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(cs_name) #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index 8cd763f971ed..bef6407a28a3 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -93,11 +93,16 @@ _npy_init_workspace( * With some caches, it may be possible to malloc/calloc very quickly in which * case we should not hesitate to replace this pattern. */ -#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ +#define NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ TYPE NAME##_static[fixed_size]; \ - TYPE *NAME; \ + TYPE *NAME; +#define NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) \ _npy_init_workspace((void **)&NAME, NAME##_static, (fixed_size), sizeof(TYPE), (size)) +#define NPY_ALLOC_WORKSPACE(NAME, TYPE, fixed_size, size) \ + NPY_DEFINE_WORKSPACE(NAME, TYPE, fixed_size) \ + NPY_INIT_WORKSPACE(NAME, TYPE, fixed_size, size) + static inline void _npy_free_workspace(void *buf, void *static_buf) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index f45328a2bb75..7557662ad56c 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1494,7 +1494,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, if (fast_seq == NULL) { return NULL; } - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(fast_seq) + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { ret = multiiter_wrong_number_of_args(); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 732919c347b0..3d82e6c7f448 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2762,20 +2762,20 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) { int ellipsis = 0, subindex = 0, ret = -1; npy_intp i, size; - PyObject *item; + PyObject *item, *seq; - obj = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK + seq = PySequence_Fast(obj, "the subscripts for each operand must " // noqa: borrowed-ref OK "be a list or a tuple"); - if (obj == NULL) { + if (seq == NULL) { return -1; } NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); - size = PySequence_Size(obj); + size = PySequence_Size(seq); for (i = 0; i < size; ++i) { - item = PySequence_Fast_GET_ITEM(obj, i); + item = PySequence_Fast_GET_ITEM(seq, i); /* Ellipsis */ if (item == Py_Ellipsis) { if (ellipsis) { @@ -2837,10 +2837,9 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) cleanup:; NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); - Py_DECREF(obj); + Py_DECREF(seq); return ret; - } /* diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 86198e9cd9f4..b68f7ad9708d 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -725,8 +725,12 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) int pre_alloc_fail = 0; int post_alloc_fail = 0; int nop; + NPY_DEFINE_WORKSPACE(op, PyArrayObject *, 2 * 8); + NPY_DEFINE_WORKSPACE(op_flags, npy_uint32, 8); + NPY_DEFINE_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS); + NPY_DEFINE_WORKSPACE(op_axes, int *, 8); - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(op_in, nditer_cs); + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(op_in); nop = npyiter_prepare_ops(op_in, &op_in_owned, &op_objs); if (nop < 0) { @@ -735,7 +739,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) } /* allocate workspace for Python objects (operands and dtypes) */ - NPY_ALLOC_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); + NPY_INIT_WORKSPACE(op, PyArrayObject *, 2 * 8, 2 * nop); if (op == NULL) { pre_alloc_fail = 1; goto cleanup; @@ -744,9 +748,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) op_request_dtypes = (PyArray_Descr **)(op + nop); /* And other workspaces (that do not need to clean up their content) */ - NPY_ALLOC_WORKSPACE(op_flags, npy_uint32, 8, nop); - NPY_ALLOC_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); - NPY_ALLOC_WORKSPACE(op_axes, int *, 8, nop); + NPY_INIT_WORKSPACE(op_flags, npy_uint32, 8, nop); + NPY_INIT_WORKSPACE(op_axes_storage, int, 8 * NPY_MAXDIMS, nop * NPY_MAXDIMS); + NPY_INIT_WORKSPACE(op_axes, int *, 8, nop); /* * Trying to allocate should be OK if one failed, check for error now * that we can use `goto finish` to clean up everything. @@ -763,9 +767,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds) goto cleanup; } -cleanup: +cleanup:; - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST_NO_BRACKETS(nditer_cs); + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); if (pre_alloc_fail) { goto pre_alloc_fail; From 8f68377c0e90f8ff9a996e801ad0ba2c5971ec3b Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Tue, 22 Jul 2025 06:31:01 -0400 Subject: [PATCH 0239/1718] BUG: Any dtype should call `square` on `arr ** 2` (#29392) * BUG: update fast_scalar_power to handle special-case squaring for any array type except object arrays * BUG: fix missing declaration * TST: add test to ensure `arr**2` calls square for structured dtypes * STY: remove whitespace * BUG: replace new variable `is_square` with direct op comparison in `fast_scalar_power` function --- numpy/_core/src/multiarray/number.c | 8 +++++++- numpy/_core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index b801d7e041e2..de4012641684 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -332,6 +332,7 @@ static int fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) { PyObject *fastop = NULL; + if (PyLong_CheckExact(o2)) { int overflow = 0; long exp = PyLong_AsLongAndOverflow(o2, &overflow); @@ -363,7 +364,12 @@ fast_scalar_power(PyObject *o1, PyObject *o2, int inplace, PyObject **result) } PyArrayObject *a1 = (PyArrayObject *)o1; - if (!(PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1))) { + if (PyArray_ISOBJECT(a1)) { + return 1; + } + if (fastop != n_ops.square && !PyArray_ISFLOAT(a1) && !PyArray_ISCOMPLEX(a1)) { + // we special-case squaring for any array type + // gh-29388 return 1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 04222025883e..e7647d2e23fe 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4215,6 +4215,13 @@ def pow_for(exp, arr): assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + def test_pos_array_ufunc_override(self): class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): From 1016f8a9abf715b5cd84440e2d27c36faf442046 Mon Sep 17 00:00:00 2001 From: Dan Raviv Date: Tue, 22 Jul 2025 12:53:49 -0700 Subject: [PATCH 0240/1718] DOC: Fix index name in notes for np.take --- numpy/_core/fromnumeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 9d01fca8aa32..1eeae1f33a01 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -170,7 +170,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Ni, Nk = a.shape[:axis], a.shape[axis+1:] for ii in ndindex(Ni): - for kk in ndindex(Nj): + for kk in ndindex(Nk): out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] For this reason, it is equivalent to (but faster than) the following use From 436662ee6cd73e959d5eff375f3e8eec5e0be8a2 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:57:43 +0100 Subject: [PATCH 0241/1718] TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` (#29418) --- numpy/__init__.pyi | 2 +- numpy/ma/core.py | 2 +- numpy/ma/core.pyi | 13 +++++++++++-- numpy/typing/tests/data/fail/ma.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 6 ++++++ numpy/typing/tests/data/reveal/ndarray_misc.pyi | 4 ++-- 6 files changed, 23 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d8c57c87cbbe..a196c000b6bc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1721,7 +1721,7 @@ class _ArrayOrScalarCommon: order: str | Sequence[str] | None = ..., *, stable: bool | None = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e7d8a20f6c6c..84e029439725 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5628,7 +5628,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, is used. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. - order : list, optional + order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a3d05556f7ce..a4bfeb3ffab7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1398,7 +1398,16 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + def argsort( + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.argmin @overload # type: ignore[override] @@ -1703,7 +1712,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # def __reduce__(self): ... - def __deepcopy__(self, memo=...): ... + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 5dc6706ebf81..160d30f885e3 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -141,3 +141,5 @@ MAR_c //= 2 # type: ignore[misc] MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 1d44c3f8ba3c..9aae83eda366 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,12 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 7f0a214f8f52..b219f4e5bec2 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -70,8 +70,8 @@ assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) -assert_type(f8.argsort(), npt.NDArray[Any]) -assert_type(AR_f8.argsort(), npt.NDArray[Any]) +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) assert_type(AR_f8.choose([0]), npt.NDArray[Any]) From e766381f73c29c7ef4c0d7f8d58012c78914c15c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:31:36 +0100 Subject: [PATCH 0242/1718] TYP: Type ``MaskedArray.view`` (#29383) --- numpy/__init__.pyi | 4 +-- numpy/ma/core.pyi | 47 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 +++++++ 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a196c000b6bc..934fe7ff0287 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2575,14 +2575,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: T) def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... @overload # (type: T) def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... @overload # (_: T) def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... - @overload # (dtype: ?, type: type[T]) + @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a4bfeb3ffab7..205dc83fad67 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,6 +17,7 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _HasDType, _HasDTypeWithRealAndImag, _ModeKind, _OrderKACF, @@ -76,12 +77,14 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _DTypeLike, _DTypeLikeBool, _IntLike_co, _ScalarLike_co, _Shape, _ShapeLike, ) +from numpy._typing._dtype_like import _VoidDTypeLike __all__ = [ "MAError", @@ -453,7 +456,49 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def view(self, dtype=..., type=..., fill_value=...): ... + + @overload # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view( + self, + /, + dtype: _DTypeT | _HasDType[_DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view( + self, + /, + dtype: _DTypeLike[_ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[_ArrayT], + fill_value: _ScalarLike_co | None = None + ) -> _ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ArrayT, /) + def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... @property diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 9aae83eda366..6cad138b9dc5 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -405,6 +405,18 @@ assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype='float32'), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype='float32', type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) From 7845ef8a42eb8a87830b704b434f4939be0e7591 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 17:18:56 +0000 Subject: [PATCH 0243/1718] MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.3 to 3.29.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d6bbdef45e766d081b84a2def353b0055f728d3e...4e828ff8d448a8a6e532957b1811f387a63867e8) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 71abe29f9d09..1aea33f531f4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index de3d582f4750..84fbe1f03cb1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v2.1.27 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 with: sarif_file: results.sarif From a0bb9edfb91c77b241294a5dba9e26158fb3ec1f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 23 Jul 2025 19:22:38 +0200 Subject: [PATCH 0244/1718] DOC: Remove outdated `numpy.exceptions` compatibility note. The exceptions were removed from the global `numpy` namespace in 2.0 (#24316). --- numpy/exceptions.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 0e8688ae9eba..cf70b4a4ce3b 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -7,8 +7,7 @@ .. versionadded:: NumPy 1.25 - The exceptions module is new in NumPy 1.25. Older exceptions remain - available through the main NumPy namespace for compatibility. + The exceptions module is new in NumPy 1.25. .. currentmodule:: numpy.exceptions From 8b2321d03c1abce671a15f9d08a97249bd2d161d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 23 Jul 2025 18:31:05 +0100 Subject: [PATCH 0245/1718] TYP: Type ``MaskedArray.reshape`` (#29404) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 87 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 8 +++ 3 files changed, 95 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 934fe7ff0287..302f0cdf15fc 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2468,6 +2468,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 205dc83fad67..0a039831a7d4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -17,9 +17,11 @@ from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( + _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, + _OrderACF, _OrderKACF, _PartitionKind, _SortKind, @@ -1126,7 +1128,90 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - def reshape(self, *s, **kwargs): ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 6cad138b9dc5..f2b7aa215c7a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -27,6 +27,7 @@ AR_LIKE_dt64: list[np.datetime64] AR_LIKE_o: list[np.object_] AR_number: NDArray[np.number] +MAR_c8: MaskedArray[np.complex64] MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] @@ -399,6 +400,13 @@ assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) assert_type(MAR_f8.round(), MaskedArray[np.float64]) assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + assert_type(MAR_f8.cumprod(), MaskedArray[Any]) assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) From eecfbc544dcaafa49003a8a2cd113ea46d763be4 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 16:22:40 +0800 Subject: [PATCH 0246/1718] ENH: Enable RVV acceleration for auto-vectorization in RISC-V Signed-off-by: Wang Yang --- numpy/_core/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 1b79aa39781c..ba482d0cf5a0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,6 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, + RVV, ] ], [ From 66ffb4c45f366b57b43457d531eed6e256f32086 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 24 Jul 2025 17:02:52 +0800 Subject: [PATCH 0247/1718] ci: rerun workflow due to mirror sync issue --- numpy/_core/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index ba482d0cf5a0..b4c769810ad8 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1068,7 +1068,7 @@ foreach gen_mtargets : [ VSX2, VX, LSX, - RVV, + RVV ] ], [ From 3702c9ab93c2b732b200dd19ac0670c68fb0702c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:03 +0000 Subject: [PATCH 0248/1718] MAINT: Bump larsoner/circleci-artifacts-redirector-action Bumps [larsoner/circleci-artifacts-redirector-action](https://github.com/larsoner/circleci-artifacts-redirector-action) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/larsoner/circleci-artifacts-redirector-action/releases) - [Commits](https://github.com/larsoner/circleci-artifacts-redirector-action/compare/7eafdb60666f57706a5525a2f5eb76224dc8779b...839631420e45a08af893032e5a5e8843bf47e8ff) --- updated-dependencies: - dependency-name: larsoner/circleci-artifacts-redirector-action dependency-version: 1.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/circleci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 3c84ce3c6890..12c51735bf81 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@7eafdb60666f57706a5525a2f5eb76224dc8779b # master + uses: larsoner/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} From f093acd2ef1804787b9976bd6d92bf75e8169e70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Jul 2025 13:17:08 +0000 Subject: [PATCH 0249/1718] MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.0.1 to 3.1.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/95d2f3a92fbf80abe066b09418bbf128a8923df2...ffd835cef18fa11522f608fc0fa973b89f5ddc87) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index ce0c2e803143..dbfb0ad48960 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index fd2047283a1f..ca41042e1ad3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@95d2f3a92fbf80abe066b09418bbf128a8923df2 # v3.0.1 + uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 30310834a888aa10863dc57de44b547757dd4466 Mon Sep 17 00:00:00 2001 From: "Aleksandr A. Voyt" Date: Thu, 24 Jul 2025 15:18:57 +0300 Subject: [PATCH 0250/1718] BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR symlink (#29434) Ensure `TestNumpyConfig.test_configtool_pkgconfigdir` resolves `PKG_CONFIG_DIR` using `.resolve()` to match the resolved path from `_configtool.py`. --- numpy/tests/test_configtool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index a9c23b5cc007..c4e9a9551c0c 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -35,7 +35,7 @@ def test_configtool_cflags(self): def test_configtool_pkgconfigdir(self): stdout = self.check_numpyconfig('--pkgconfigdir') - assert pathlib.Path(stdout) == PKG_CONFIG_DIR + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() @pytest.mark.skipif(not IS_INSTALLED, From 439f7632ab2a828d70a30d16d3104f8c9f5c1f13 Mon Sep 17 00:00:00 2001 From: kostayScr <11485271+kostayScr@users.noreply.github.com> Date: Thu, 24 Jul 2025 17:59:28 +0300 Subject: [PATCH 0251/1718] BUG: fix datetime/timedelta hash memory leak (#29411) * BUG: fix datetime/timedelta hash memory leak * get metadata directly from scalar object Co-authored-by: Nathan Goldbaum * BUG: remove unnecessary Py_DECREF Co-authored-by: Nathan Goldbaum * BUG: remove dtype variable declaration that is not used anymore --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/scalartypes.c.src | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5724afda7e47..5e3a3ba71d3e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -3911,7 +3911,6 @@ static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { PyArray_DatetimeMetaData *meta; - PyArray_Descr *dtype; npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); if (val == NPY_DATETIME_NAT) { @@ -3919,10 +3918,10 @@ static npy_hash_t return PyBaseObject_Type.tp_hash(obj); } - dtype = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(dtype); + meta = &((PyDatetimeScalarObject *)obj)->obmeta; - return @lname@_hash(meta, val); + npy_hash_t res = @lname@_hash(meta, val); + return res; } /**end repeat**/ From a4906a744754260e2ad166c14ca1b9ff8926cb6c Mon Sep 17 00:00:00 2001 From: Zebreus Date: Thu, 24 Jul 2025 17:32:17 +0200 Subject: [PATCH 0252/1718] BLD: allow targeting webassembly without emscripten --- numpy/_core/src/multiarray/lowlevel_strided_loops.c.src | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 0c4eb3dd9a8d..0a69a08e678e 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -850,7 +850,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * // Enable auto-vectorization for floating point casts with clang #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(ignore)") #endif @@ -965,7 +965,7 @@ static GCC_CAST_OPT_LEVEL int #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ - #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if defined(__clang__) && !defined(__EMSCRIPTEN__) && !defined(__wasm__) #if __clang_major__ >= 12 _Pragma("clang fp exceptions(strict)") #endif From 9812a3f701abc1030f93037a1165b2c0a9c8d101 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 24 Jul 2025 22:30:57 +0100 Subject: [PATCH 0253/1718] DOC: Correct more ndarray defaults (#29402) --- numpy/_core/_add_newdocs.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 90d33d4b810a..ed8cf50ee360 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3080,7 +3080,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('all', """ - a.all(axis=None, out=None, keepdims=False, *, where=True) + a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if all elements evaluate to True. @@ -3095,7 +3095,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('any', """ - a.any(axis=None, out=None, keepdims=False, *, where=True) + a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if any of the elements of `a` evaluate to True. @@ -3303,7 +3303,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', """ - a.clip(min=None, max=None, out=None, **kwargs) + a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3708,7 +3708,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('max', """ - a.max(axis=None, out=None, keepdims=False, initial=, where=True) + a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the maximum along a given axis. @@ -3723,7 +3723,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', """ - a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns the average of the array elements along given axis. @@ -3738,7 +3738,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('min', """ - a.min(axis=None, out=None, keepdims=False, initial=, where=True) + a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the minimum along a given axis. @@ -3768,8 +3768,8 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', """ - a.prod(axis=None, dtype=None, out=None, keepdims=False, - initial=1, where=True) + a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue) Return the product of the array elements over the given axis @@ -4240,7 +4240,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4255,7 +4255,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', """ - a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the sum of the array elements over the given axis. @@ -4518,7 +4518,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True, mean=np._NoValue) + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the variance of the array elements, along given axis. From ec816336bcc042794418693508e678164afd3b25 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 24 Jul 2025 16:34:43 -0600 Subject: [PATCH 0254/1718] MAINT: Update main after 2.3.2 release. - Forward port 2.3.2-changelog.rst, - Forward port 2.3.2-notes.rst, - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.3.2-changelog.rst | 38 ++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.2-notes.rst | 56 ++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 doc/changelog/2.3.2-changelog.rst create mode 100644 doc/source/release/2.3.2-notes.rst diff --git a/doc/changelog/2.3.2-changelog.rst b/doc/changelog/2.3.2-changelog.rst new file mode 100644 index 000000000000..5c893a510ae7 --- /dev/null +++ b/doc/changelog/2.3.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) diff --git a/doc/source/release.rst b/doc/source/release.rst index 59e6dd07b002..3af644a57562 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.2 2.3.1 2.3.0 2.2.6 diff --git a/doc/source/release/2.3.2-notes.rst b/doc/source/release/2.3.2-notes.rst new file mode 100644 index 000000000000..2acc400c89fe --- /dev/null +++ b/doc/source/release/2.3.2-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.2 Release Notes +========================= + +The NumPy 2.3.2 release is a patch release with a number of bug fixes and +maintenance updates. The highlights are: + +- Wheels for Python 3.14.0rc1 +- PyPy updated to the latest stable release +- OpenBLAS updated to 0.3.30 + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Joren Hammudoglu +* Maanas Arora +* Marco Edward Gorelli +* Matti Picus +* Nathan Goldbaum +* Sebastian Berg +* kostayScr + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29256 `__: MAINT: Prepare 2.3.x for further development +* `#29283 `__: TYP: Work around a mypy issue with bool arrays (#29248) +* `#29284 `__: BUG: fix fencepost error in StringDType internals +* `#29287 `__: BUG: handle case in mapiter where descriptors might get replaced... +* `#29350 `__: BUG: Fix shape error path in array-interface +* `#29412 `__: BUG: Allow reading non-npy files in npz and add test +* `#29413 `__: TST: Avoid uninitialized values in test (#29341) +* `#29414 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29415 `__: BUG: fix casting issue in center, ljust, rjust, and zfill (#29369) +* `#29416 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29417 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` (#29392) +* `#29424 `__: MAINT: use a stable pypy release in CI +* `#29425 `__: MAINT: Support python 314rc1 +* `#29429 `__: MAINT: Update highway to match main. +* `#29430 `__: BLD: use github to build macos-arm64 wheels with OpenBLAS and... +* `#29437 `__: BUG: fix datetime/timedelta hash memory leak (#29411) + + From 7c00e3ea28a556a9d6fcc69baac67d7896fbc0e1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 08:02:18 -0600 Subject: [PATCH 0255/1718] MAINT: Add Python 3.14 to classifier. [skip azp] [skip cirrus] [skip actions] --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index b0e58705ebd1..f9cfeadee599 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', + 'Programming Language :: Python :: 3.14', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', From c1bd73c9066db667d7b9138e1d3cb0c8db413559 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 25 Jul 2025 09:09:12 -0600 Subject: [PATCH 0256/1718] DOC: Update RELEASE_WALKTHROUGH.rst Update the release documentation to reflect current practice. [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 67 +++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 6d2194b5c4e6..c80c7d0463eb 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -26,18 +26,16 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, three files need to be edited: +When adding or dropping Python versions, two files need to be edited: - .github/workflows/wheels.yml # for github cibuildwheel -- tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds - pyproject.toml # for classifier and minimum version check. Make these changes in an ordinary PR against main and backport if necessary. Add ``[wheel build]`` at the end of the title line of the commit summary so that wheel builds will be run to test the changes. We currently release wheels for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. For Python 3.11 we were able to release within a week -of the rc1 announcement. +cibuildwheel support it. Backport pull requests @@ -46,6 +44,7 @@ Backport pull requests Changes that have been marked for this release must be backported to the maintenance/2.1.x branch. + Update 2.1.0 milestones ----------------------- @@ -79,7 +78,8 @@ commit message might be something like:: Set the release version ----------------------- -Check the ``pyproject.toml`` file and set the release version if needed:: +Check the ``pyproject.toml`` file and set the release version and update the +classifier if needed:: $ gvim pyproject.toml @@ -89,7 +89,7 @@ Check the ``pavement.py`` and ``doc/source/release.rst`` files Check that the ``pavement.py`` file points to the correct release notes. It should have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: +sure that the release notes have an entry in the ``release.rst`` file:: $ gvim pavement.py doc/source/release.rst @@ -102,13 +102,12 @@ The changelog is generated using the changelog tool:: $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be -checked for non-standard contributor names and dependabot entries removed. It -is also a good idea to remove any links that may be present in the PR titles -as they don't translate well to markdown, replace them with monospaced text. The -non-standard contributor names should be fixed by updating the ``.mailmap`` -file, which is a lot of work. It is best to make several trial runs before -reaching this point and ping the malefactors using a GitHub issue to get the -needed information. +checked for non-standard contributor names. It is also a good idea to remove +any links that may be present in the PR titles as they don't translate well to +markdown, replace them with monospaced text. The non-standard contributor names +should be fixed by updating the ``.mailmap`` file, which is a lot of work. It +is best to make several trial runs before reaching this point and ping the +malefactors using a GitHub issue to get the needed information. Finish the release notes @@ -170,26 +169,23 @@ If you need to delete the tag due to error:: --------------- Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. The CI run -on github actions (for all x86-based and macOS arm64 wheels) takes about 1 1/4 -hours. The CI runs on cirrus (for aarch64 and M1) take less time. You can check -for uploaded files at the `staging repository`_, but note that it is not -closely synched with what you see of the running jobs. +via cibuildwheel and upload wheels and an sdist to the staging repo. All wheels +are currently built on GitHub actions and take about 1 1/4 hours to build. If you wish to manually trigger a wheel build, you can do so: -- On github actions -> `Wheel builder`_ there is a "Run workflow" button, click +- On GitHub actions -> `Wheel builder`_ there is a "Run workflow" button, click on it and choose the tag to build -- On Cirrus we don't currently have an easy way to manually trigger builds and - uploads. -If a wheel build fails for unrelated reasons, you can rerun it individually: +If some wheel builds fail for unrelated reasons, you can re-run them: + +- On GitHub actions select `Wheel builder`_ click on the task that contains + the build you want to re-run, it will have the tag as the branch. On the + upper right will be a re-run button, hit it and select "re-run failed" -- On github actions select `Wheel builder`_ click on the commit that contains - the build you want to rerun. On the left there is a list of wheel builds, - select the one you want to rerun and on the resulting page hit the - counterclockwise arrows button. -- On cirrus, log into cirrusci, look for the v2.1.0 tag and rerun the failed jobs. +If some wheels fail to upload to anaconda, you can select those builds in the +`Wheel builder`_ and manually download the build artifact. This is a temporary +workaround, but sometimes the quickest way to get a release out. .. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files .. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml @@ -218,20 +214,19 @@ file is updated for continued development:: 5. Upload to PyPI ----------------- -Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``3.4.1`` was used here:: +Upload to PyPI using ``twine``:: $ cd ../numpy $ twine upload release/installers/*.whl $ twine upload release/installers/*.gz # Upload last. -If one of the commands breaks in the middle, you may need to selectively upload -the remaining files because PyPI does not allow the same file to be uploaded -twice. The source file should be uploaded last to avoid synchronization -problems that might occur if pip users access the files while this is in -process, causing pip to build from source rather than downloading a binary -wheel. PyPI only allows a single source distribution, here we have -chosen the zip archive. +The source file should be uploaded last to avoid synchronization problems that +might occur if pip users access the files while this is in process, causing pip +to build from source rather than downloading a binary wheel. PyPI only allows a +single source distribution, here we have chosen the gz version. If the +uploading breaks because of network related reasons, you can try re-running the +commands, possibly after a fix. Twine will now handle the error generated by +PyPI when the same file is uploaded twice. 6. Upload files to GitHub From 3203001d84ad387253697f1ba4af805093dc338e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 27 Jul 2025 15:39:25 -0600 Subject: [PATCH 0257/1718] MAINT: Replace pavement.py Replace `pavement.py` functionality with a `write_release.py` script. This gets rid of a paver dependency and simplifies the release process. - Add tools/write_release.py - Remove pavement.py [skip azp] [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 12 +- pavement.py | 184 -------------------------- requirements/release_requirements.txt | 3 - tools/write_release.py | 121 +++++++++++++++++ 4 files changed, 126 insertions(+), 194 deletions(-) delete mode 100644 pavement.py create mode 100644 tools/write_release.py diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c80c7d0463eb..a3787ae95ee5 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -84,14 +84,12 @@ classifier if needed:: $ gvim pyproject.toml -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- +Check the ``doc/source/release.rst`` file +----------------------------------------- -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the release notes have an entry in the ``release.rst`` file:: +make sure that the release notes have an entry in the ``release.rst`` file:: - $ gvim pavement.py doc/source/release.rst + $ gvim doc/source/release.rst Generate the changelog @@ -208,7 +206,7 @@ Anaconda staging directory using the ``tools/download-wheels.py`` script:: This needs to be done after all installers are downloaded, but before the pavement file is updated for continued development:: - $ paver write_release + $ python write_release 2.1.0 5. Upload to PyPI diff --git a/pavement.py b/pavement.py deleted file mode 100644 index 369b8703b0ba..000000000000 --- a/pavement.py +++ /dev/null @@ -1,184 +0,0 @@ -r""" -This paver file is intended to help with the release process as much as -possible. It relies on virtualenv to generate 'bootstrap' environments as -independent from the user system as possible (e.g. to make sure the sphinx doc -is built against the built numpy, not an installed one). - -Building changelog + notes -========================== - -Assumes you have git and the binaries/tarballs in installers/:: - - paver write_release - paver write_note - -This automatically put the checksum into README.rst, and writes the Changelog. - -TODO -==== - - the script is messy, lots of global variables - - make it more easily customizable (through command line args) - - missing targets: install & test, sdist test, debian packaging - - fix bdist_mpkg: we build the same source twice -> how to make sure we use - the same underlying python for egg install in venv and for bdist_mpkg -""" -import hashlib -import os -import textwrap - -# The paver package needs to be installed to run tasks -import paver -from paver.easy import Bunch, options, sh, task - -#----------------------------------- -# Things to be changed for a release -#----------------------------------- - -# Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' - - -#------------------------------------------------------- -# Hardcoded build/install dirs, virtualenv options, etc. -#------------------------------------------------------- - -# Where to put the release installers -options(installers=Bunch(releasedir="release", - installersdir=os.path.join("release", "installers")),) - - -#------------- -# README stuff -#------------- - -def _compute_hash(idirs, hashfunc): - """Hash files using given hashfunc. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - hashfunc : hash function - Function to be used to hash the files. - - """ - released = paver.path.path(idirs).listdir() - checksums = [] - for fpath in sorted(released): - with open(fpath, 'rb') as fin: - fhash = hashfunc(fin.read()) - checksums.append( - f'{fhash.hexdigest()} {os.path.basename(fpath)}') - return checksums - - -def compute_md5(idirs): - """Compute md5 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - return _compute_hash(idirs, hashlib.md5) - - -def compute_sha256(idirs): - """Compute sha256 hash of files in idirs. - - Parameters - ---------- - idirs : directory path - Directory containing files to be hashed. - - """ - # better checksum so gpg signed README.rst containing the sums can be used - # to verify the binaries instead of signing all binaries - return _compute_hash(idirs, hashlib.sha256) - - -def write_release_task(options, filename='README'): - """Append hashes of release files to release notes. - - This appends file hashes to the release notes and creates - four README files of the result in various formats: - - - README.rst - - README.rst.gpg - - README.md - - README.md.gpg - - The md file are created using `pandoc` so that the links are - properly updated. The gpg files are kept separate, so that - the unsigned files may be edited before signing if needed. - - Parameters - ---------- - options : - Set by ``task`` decorator. - filename : str - Filename of the modified notes. The file is written - in the release directory. - - """ - idirs = options.installers.installersdir - notes = paver.path.path(RELEASE_NOTES) - rst_readme = paver.path.path(filename + '.rst') - md_readme = paver.path.path(filename + '.md') - - # append hashes - with open(rst_readme, 'w') as freadme: - with open(notes) as fnotes: - freadme.write(fnotes.read()) - - freadme.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_md5(idirs)]) - - freadme.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)]) - - # generate md file using pandoc before signing - sh(f"pandoc -s -o {md_readme} {rst_readme}") - - # Sign files - if hasattr(options, 'gpg_key'): - cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}' - else: - cmd = 'gpg --clearsign --armor' - - sh(cmd + f' --output {rst_readme}.gpg {rst_readme}') - sh(cmd + f' --output {md_readme}.gpg {md_readme}') - - -@task -def write_release(options): - """Write the README files. - - Two README files are generated from the release notes, one in ``rst`` - markup for the general release, the other in ``md`` markup for the github - release notes. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - rdir = options.installers.releasedir - write_release_task(options, os.path.join(rdir, 'README')) diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index d23e69fa1fa8..eaa092560d2d 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -12,8 +12,5 @@ gitpython>=3.1.30 # uploading wheels twine -# building and notes -Paver - # uploading release documentation packaging diff --git a/tools/write_release.py b/tools/write_release.py new file mode 100644 index 000000000000..f86113acde8e --- /dev/null +++ b/tools/write_release.py @@ -0,0 +1,121 @@ +""" +Standalone script for writing release doc:: + + python tools/write_release + +Example:: + + python tools/write_release.py 1.7.0 + +Needs to be run from the root of the repository and assumes +that the output is in `release` and wheels and sdist in +`release/installers`. + +Translation from rst to md markdown requires Pandoc, you +will need to rely on your distribution to provide that. + +""" +import os +import subprocess +import textwrap +import argparse +from hashlib import md5 +from hashlib import sha256 +from pathlib import Path + +# Name of the notes directory +NOTES_DIR = "doc/source/release" +# Name of the output directory +OUTPUT_DIR = "release" +# Output base name, `.rst` or `.md` will be appended +OUTPUT_FILE = "README" + +def compute_hash(wheel_dir, hash_func): + """ + Compute hashes of files in wheel_dir. + + Parameters + ---------- + wheel_dir: str + Path to wheel directory from repo root. + hash_func: function + Hash function, i.e., md5, sha256, etc. + + Returns + ------- + list_of_strings: list + List of of strings. Each string is the hash + followed by the file basename. + + """ + released = os.listdir(wheel_dir) + checksums = [] + for fn in sorted(released): + fn_path = Path(f"{wheel_dir}/{fn}") + with open(fn_path, 'rb') as f: + m = hash_func(f.read()) + checksums.append(f"{m.hexdigest()} {fn}") + return checksums + + +def write_release(version): + """ + Copy the -notes.rst file to the OUTPUT_DIR, append + the md5 and sha256 hashes of the wheels and sdist, and produce + README.rst and README.md files. + + Parameters + ---------- + version: str + Release version, e.g., '2.3.2', etc. + + Returns + ------- + None. + + """ + notes = Path(f"{NOTES_DIR}/{version}-notes.rst") + wheel_dir = Path(f"{OUTPUT_DIR}/installers") + target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") + target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + + os.system(f"cp {notes} {target_rst}") + + with open(str(target_rst), 'a') as f: + f.writelines(textwrap.dedent( + """ + Checksums + ========= + + MD5 + --- + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, md5)]) + + f.writelines(textwrap.dedent( + """ + SHA256 + ------ + :: + + """)) + f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) + + # translate README.rst to md for posting on GitHub + rst_to_md = subprocess.Popen( + ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, error = rst_to_md.communicate() + if not rst_to_md.returncode == 0: + raise RuntimeError(f"{error} failed") + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "version", + help="NumPy version of the release, e.g. 2.3.2, etc.") + + args = parser.parse_args() + write_release(args.version) From d9acbceec552ecf6e737821b3c2385cfef590b77 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:33:55 -0600 Subject: [PATCH 0258/1718] MAINT: Update tools/write_release.py Co-authored-by: Joren Hammudoglu --- tools/write_release.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index f86113acde8e..5e976fe564f4 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -52,8 +52,7 @@ def compute_hash(wheel_dir, hash_func): checksums = [] for fn in sorted(released): fn_path = Path(f"{wheel_dir}/{fn}") - with open(fn_path, 'rb') as f: - m = hash_func(f.read()) + m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -74,10 +73,10 @@ def write_release(version): None. """ - notes = Path(f"{NOTES_DIR}/{version}-notes.rst") - wheel_dir = Path(f"{OUTPUT_DIR}/installers") - target_md = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.md") - target_rst = Path(f"{OUTPUT_DIR}/{OUTPUT_FILE}.rst") + notes = Path(NOTES_DIR) / f"{version}-notes.rst" + wheel_dir = Path(OUTPUT_DIR) / "installers" + target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" + target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" os.system(f"cp {notes} {target_rst}") @@ -104,12 +103,10 @@ def write_release(version): f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) # translate README.rst to md for posting on GitHub - rst_to_md = subprocess.Popen( - ["pandoc", "-s", "-o", f"{target_md}", f"{target_rst}", "--wrap=preserve"], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, error = rst_to_md.communicate() - if not rst_to_md.returncode == 0: - raise RuntimeError(f"{error} failed") + subprocess.run( + ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], + check=True, + ) if __name__ == '__main__': parser = argparse.ArgumentParser() From 6bab2a5606094b47f96d9047caa67565793561ab Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 28 Jul 2025 11:57:44 -0600 Subject: [PATCH 0259/1718] MAINT: Fix linting errors. --- tools/write_release.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index 5e976fe564f4..7662eb7b1288 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -11,16 +11,15 @@ that the output is in `release` and wheels and sdist in `release/installers`. -Translation from rst to md markdown requires Pandoc, you +Translation from rst to md markdown requires Pandoc, you will need to rely on your distribution to provide that. """ +import argparse import os import subprocess import textwrap -import argparse -from hashlib import md5 -from hashlib import sha256 +from hashlib import md5, sha256 from pathlib import Path # Name of the notes directory @@ -51,7 +50,7 @@ def compute_hash(wheel_dir, hash_func): released = os.listdir(wheel_dir) checksums = [] for fn in sorted(released): - fn_path = Path(f"{wheel_dir}/{fn}") + fn_path = Path(f"{wheel_dir}/{fn}") m = hash_func(fn_path.read_bytes()) checksums.append(f"{m.hexdigest()} {fn}") return checksums @@ -77,7 +76,7 @@ def write_release(version): wheel_dir = Path(OUTPUT_DIR) / "installers" target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" - + os.system(f"cp {notes} {target_rst}") with open(str(target_rst), 'a') as f: @@ -107,7 +106,8 @@ def write_release(version): ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], check=True, ) - + + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( From 5591f1109b45a482bdaea3d3e44a80212a188edd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 28 Jul 2025 21:33:18 +0200 Subject: [PATCH 0260/1718] BLD: provide explicit control over cpu-baseline detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new `cpu-baseline-detect` feature flag that can be used to more precisely control the use of CPU baseline detection. This can be used by packages to more precisely control used SIMD code independently of compiler flags specified. The option follows typical feature semantics -- with `auto` preserving the current behavior of enabling when relevant compiler flags are found, `enabled` forcing it on based on the implicit compiler defaults, and `disabled` forcing it off. Signed-off-by: Michał Górny --- meson.options | 2 ++ meson_cpu/meson.build | 22 ++++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/meson.options b/meson.options index b09992fe9b91..f17f9901664a 100644 --- a/meson.options +++ b/meson.options @@ -28,6 +28,8 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') +option('cpu-baseline-detect', type: 'feature', value: 'auto', + description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index e5b6d0fbe7be..1c4c6eecb308 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -46,20 +46,22 @@ if get_option('disable-optimization') CPU_CONF_BASELINE = 'none' CPU_CONF_DISPATCH = 'none' else - baseline_detect = false + baseline_detect = get_option('cpu-baseline-detect').enabled() c_args = get_option('c_args') - foreach arg : c_args - foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] - if arg.contains(carch) - message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') - baseline_detect = true + if get_option('cpu-baseline-detect').auto() + foreach arg : c_args + foreach carch : ['-march', '-mcpu', '-xhost', '/QxHost'] + if arg.contains(carch) + message('Appending option "detect" to "cpu-baseline" due to detecting global architecture c_arg "' + arg + '"') + baseline_detect = true + break + endif + endforeach + if baseline_detect break endif endforeach - if baseline_detect - break - endif - endforeach + endif # The required minimal set of required CPU features. CPU_CONF_BASELINE = get_option('cpu-baseline') if baseline_detect From bb7dd1ba604ba6c1766c6ca72fc21d378609f485 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 11:57:28 +0100 Subject: [PATCH 0261/1718] TYP: Add test which hits `np.array` constructor overload with `object: _SupportsArray[_ArrayT]` (#29428) Co-authored-by: Joren Hammudoglu --- numpy/typing/tests/data/reveal/array_constructors.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 766547e54b60..99073ac4543a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,7 +1,7 @@ import sys from collections import deque from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, Generic, TypeVar, assert_type import numpy as np import numpy.typing as npt @@ -10,12 +10,16 @@ _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) class SubClass(npt.NDArray[_ScalarT_co]): ... +class IntoSubClass(Generic[_ScalarT_co]): + def __array__(self) -> SubClass[_ScalarT_co]: ... + i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] mixed_shape: tuple[int, np.int64] @@ -39,6 +43,7 @@ assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) # https://github.com/numpy/numpy/issues/29245 assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) From c6471c59bbf1cb95b50eae91773ccc2f5fe1436e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 29 Jul 2025 15:44:00 +0100 Subject: [PATCH 0262/1718] TYP: Type ``MaskedArray.flat`` (#29466) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0a039831a7d4..09edffc889a5 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -528,9 +528,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self): ... + def flat(self) -> MaskedIterator: ... @flat.setter - def flat(self, value): ... + def flat(self, value: ArrayLike, /) -> None: ... @property def fill_value(self): ... @fill_value.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index b9be2b2e4384..793f7097ee81 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_f.flat = [9] + # Inplace addition MAR_b += AR_LIKE_b diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f2b7aa215c7a..4f9d971759e6 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -431,6 +431,8 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) +assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) + def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] From bb31564192e4789b94bd7cc4c703b2beb8ce0806 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 12:24:57 -0600 Subject: [PATCH 0263/1718] MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 (#29471) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.0 to 3.1.2. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ffd835cef18fa11522f608fc0fa973b89f5ddc87...9e4e50bd76b3190f55304387e333f6234823ea9b) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index dbfb0ad48960..5c1b35653d68 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ca41042e1ad3..3fd37bb60ee5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ffd835cef18fa11522f608fc0fa973b89f5ddc87 # v3.1.0 + uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From d621a3162e7a14e986e2dee29bbf9ff05f7b728d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 00:49:32 +0100 Subject: [PATCH 0264/1718] TYP: Type ``MaskedArray.recordmask`` (#29467) --- numpy/ma/core.pyi | 6 +++--- numpy/typing/tests/data/fail/ma.pyi | 5 +++++ numpy/typing/tests/data/reveal/ma.pyi | 3 +++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 09edffc889a5..764138048e1b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -509,13 +509,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> NDArray[MaskType] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self): ... + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @recordmask.setter - def recordmask(self, mask): ... + def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... def soften_mask(self) -> Self: ... @property diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 160d30f885e3..9354084fead0 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -17,6 +17,11 @@ AR_b: npt.NDArray[np.bool] MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] MAR_1d_f8.dtype = np.bool # type: ignore[assignment] +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 4f9d971759e6..d708eeed45e2 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -374,6 +374,9 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) From e4aee500a2bf497457efd825b7d578257c0e04f5 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:30:15 +0100 Subject: [PATCH 0265/1718] TYP: Type ``MaskedArray.__new__`` (#29457) Co-authored-by: Joren Hammudoglu --- numpy/ma/core.pyi | 63 +++++++++++++- numpy/typing/tests/data/fail/ma.pyi | 5 ++ numpy/typing/tests/data/reveal/ma.pyi | 114 +++++++++++++++----------- 3 files changed, 132 insertions(+), 50 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 764138048e1b..824ceef18822 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -455,7 +455,68 @@ class MaskedIterator: class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any - def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + @overload + def __new__( + cls, + data: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + def __array_finalize__(self, obj): ... def __array_wrap__(self, obj, context=..., return_scalar=...): ... diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 9354084fead0..ed973462e2d4 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -148,3 +148,8 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index d708eeed45e2..3edd300c0b5a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,14 +1,20 @@ -from typing import Any, Literal, TypeAlias, TypeVar, assert_type, NoReturn +from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type import numpy as np from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -class MaskedArraySubclass(MaskedArray[np.complex128]): ... +class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... + +class IntoMaskedArraySubClass(Generic[_ScalarT_co]): + def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... + +MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -43,7 +49,8 @@ MAR_V: MaskedArray[np.void] MAR_floating: MaskedArray[np.floating] MAR_number: MaskedArray[np.number] -MAR_subclass: MaskedArraySubclass +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] @@ -66,9 +73,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -76,9 +83,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -86,9 +93,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -96,9 +103,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -106,9 +113,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -116,9 +123,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -126,8 +133,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -135,8 +142,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -144,8 +151,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -153,8 +160,8 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.all(), np.bool) assert_type(MAR_f4.all(), np.bool) @@ -164,8 +171,8 @@ assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.any(), np.bool) assert_type(MAR_f4.any(), np.bool) @@ -175,22 +182,22 @@ assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) -assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) @@ -199,8 +206,8 @@ assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) @@ -391,17 +398,17 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) -assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) assert_type(MAR_f8.trace(), Any) -assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclass) -assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclass) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) assert_type(MAR_f8.round(), MaskedArray[np.float64]) -assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) @@ -411,10 +418,10 @@ assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) assert_type(MAR_f8.cumprod(), MaskedArray[Any]) -assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.cumsum(), MaskedArray[Any]) -assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.view(), MaskedArray[np.float64]) assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) @@ -439,6 +446,15 @@ assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + # Masked Array addition assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) @@ -780,27 +796,27 @@ assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] assert_type(MAR_f8.sum(), Any) assert_type(MAR_f8.sum(axis=0), Any) assert_type(MAR_f8.sum(keepdims=True), Any) -assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.std(), Any) assert_type(MAR_f8.std(axis=0), Any) assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.var(), Any) assert_type(MAR_f8.var(axis=0), Any) assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) -assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.mean(), Any) assert_type(MAR_f8.mean(axis=0), Any) assert_type(MAR_f8.mean(keepdims=True), Any) -assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.prod(), Any) assert_type(MAR_f8.prod(axis=0), Any) assert_type(MAR_f8.prod(keepdims=True), Any) -assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) # MaskedArray "true" division From 75169a9cba27bd28bb19c36e8d0cff6a49f35bf0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:34:37 +0100 Subject: [PATCH 0266/1718] TYP: Remove ``MaskedArray.__reduce__``, and punt on ``MaskedArray.__{eq,ne}__`` (#29470) --- numpy/ma/core.pyi | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 824ceef18822..79503caa7821 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -601,8 +601,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... def compress(self, condition, axis=..., out=...): ... - def __eq__(self, other): ... - def __ne__(self, other): ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] @@ -1902,7 +1906,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... # - def __reduce__(self): ... def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` From 00bbfefed97140b3c493b9154d4db62dfe2cf787 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 13:36:48 +0100 Subject: [PATCH 0267/1718] TYP: Type ``MaskedArray.__getitem__`` (#29472) --- numpy/__init__.pyi | 1 + numpy/ma/core.pyi | 16 +++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 12 ++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 302f0cdf15fc..643327f29d1b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2106,6 +2106,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, ) -> ndarray[_ShapeT, _DTypeT]: ... + # Keep in sync with `MaskedArray.__getitem__` @overload def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 79503caa7821..51038178ee6f 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -25,6 +25,7 @@ from numpy import ( _OrderKACF, _PartitionKind, _SortKind, + _ToIndices, amax, amin, bool_, @@ -54,6 +55,7 @@ from numpy import ( str_, timedelta64, unsignedinteger, + void, ) from numpy._globals import _NoValueType from numpy._typing import ( @@ -290,6 +292,7 @@ _MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] _MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -562,7 +565,18 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): fill_value: _ScalarLike_co | None = None ) -> MaskedArray[_ShapeT_co, dtype]: ... - def __getitem__(self, indx): ... + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 3edd300c0b5a..344a9ef70160 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -18,6 +18,7 @@ MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] @@ -54,6 +55,7 @@ MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] b: np.bool f4: np.float32 @@ -365,6 +367,16 @@ assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V['field_0'], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[['field_0', 'field_1']], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + assert_type(np.ma.nomask, np.bool[Literal[False]]) assert_type(np.ma.MaskType, type[np.bool]) From d53fe4aa45f44f52835e4b1946434b1a4acc76c8 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 30 Jul 2025 15:44:04 +0100 Subject: [PATCH 0268/1718] TYP: Type ``MaskedArray.fill_value`` (#29468) --- numpy/ma/core.pyi | 4 ++-- numpy/typing/tests/data/pass/ma.py | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 51038178ee6f..d79b4695bb32 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -607,9 +607,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self): ... + def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value=...): ... + def fill_value(self, value: _ScalarLike_co | None = None) -> None: ... get_fill_value: Any set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 793f7097ee81..02655d7f88c7 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -34,6 +34,8 @@ MAR_f.mask = AR_b MAR_f.mask = np.False_ +MAR_i.fill_value = 0 + MAR_f.flat = [9] # Inplace addition diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 344a9ef70160..52c0dfa6ab75 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -401,6 +401,8 @@ assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[in assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_i8.fill_value, np.int64) + assert_type(MAR_b.transpose(), MaskedArray[np.bool]) assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) From 97c6b8602791e266b0e073914e25121df0bb4936 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 30 Jul 2025 08:44:48 -0700 Subject: [PATCH 0269/1718] Add .file entry to all .s SVML files --- numpy/_core/src/umath/svml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/svml b/numpy/_core/src/umath/svml index 32bf2a984207..3a713b130183 160000 --- a/numpy/_core/src/umath/svml +++ b/numpy/_core/src/umath/svml @@ -1 +1 @@ -Subproject commit 32bf2a98420762a63ab418aaa0a7d6e17eb9627a +Subproject commit 3a713b13018325451c1b939d3914ceff5ec68e19 From 19574af8decf1285cd4a5ca10af8fe81fd30f6bc Mon Sep 17 00:00:00 2001 From: Bernard Roesler Date: Wed, 30 Jul 2025 12:17:09 -0400 Subject: [PATCH 0270/1718] BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) * BUG: Always return a real dtype from linalg.cond. Addresses gh-18304. The condition number of a matrix is the product of two norms, which are always non-negative and real-valued, so the condition number itself should be non-negative and real-valued. This commit returns the proper real dtype from `linalg.cond`, and includes tests for the condition number of a complex matrix in various norms. * ENH: Change type of complex results only. This commit addresses a reviewer comment on the blanket application of `abs(r)`. It specifically ensures the return type of complex-valued matrices will be the corresponding real type. --- numpy/linalg/_linalg.py | 1 + numpy/linalg/tests/test_linalg.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 52a2ffb8f50b..7471db7e1fe2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2011,6 +2011,7 @@ def cond(x, p=None): # contain nans in the entries where inversion failed. _assert_stacked_square(x) t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 8ad1c3ed6d16..4cc9ac7a5496 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -793,15 +793,28 @@ def do(self, a, b, tags): class TestCond(CondCases): - def test_basic_nonsvd(self): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): # Smoketest the non-svd norms A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) assert_almost_equal(linalg.cond(A, inf), 4) assert_almost_equal(linalg.cond(A, -inf), 2 / 3) assert_almost_equal(linalg.cond(A, 1), 4) assert_almost_equal(linalg.cond(A, -1), 0.5) assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + def test_singular(self): # Singular matrices have infinite condition number for # positive norms, and negative norms shouldn't raise From b4586aea61e94589e3c62e1ac6d2adc05e5a40f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:02:32 +0000 Subject: [PATCH 0271/1718] MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.4 to 3.29.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4e828ff8d448a8a6e532957b1811f387a63867e8...51f77329afa6477de8c49fc9c7046c15b9a4e79d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1aea33f531f4..2d5fdb08a9d4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3.29.4 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 84fbe1f03cb1..079813c3aea3 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v2.1.27 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 with: sarif_file: results.sarif From 46268caa719db95ab2198bcd619f0e3120d7c52e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 31 Jul 2025 05:09:16 +0900 Subject: [PATCH 0272/1718] =?UTF-8?q?DOC:=20Clarify=20that=20`numpy.printo?= =?UTF-8?q?ptions`=20applies=20only=20to=20`ndarray`,=20not=E2=80=A6=20(#2?= =?UTF-8?q?9450)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DOC: Clarify that `numpy.printoptions` applies only to `ndarray`, not scalars As discussed in #29409, numpy.printoptions applies only to `numpy.ndarray`, not to scalars. A Note describing this behavior has been added to the docstring. * Update numpy/_core/arrayprint.py [skip cirrus] [skip actions] [skip azp] --------- Co-authored-by: Nathan Goldbaum --- numpy/_core/arrayprint.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 2a684280610b..9eda8db40dfc 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -250,9 +250,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Notes ----- - `formatter` is always reset with a call to `set_printoptions`. - Use `printoptions` as a context manager to set the values temporarily. + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. Examples -------- @@ -352,6 +353,10 @@ def get_printoptions(): For a full description of these options, see `set_printoptions`. + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + See Also -------- set_printoptions, printoptions @@ -410,6 +415,10 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + """ token = _set_printoptions(*args, **kwargs) From 4126291bc7062cbe636edf810b006653b693f78d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 09:59:40 +0100 Subject: [PATCH 0273/1718] MAINT: remove unnecessary `kwargs` update in `MaskedArray.reshape` (#29403) --- numpy/ma/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 84e029439725..9b705460dd85 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -4818,7 +4818,6 @@ def reshape(self, *s, **kwargs): fill_value=999999) """ - kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask From a80542541130a2dc47eab2a6972019453cc0c518 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 31 Jul 2025 11:03:20 +0200 Subject: [PATCH 0274/1718] MAINT: Replace setting of array shape by reshape operation (#29314) This PR we replace some explicit setting of the shape of an array in favor of using reshape. Some cases are left out (e.g. masked arrays, tests to check the .shape setting works). This is because explicit shape setting is generally unsafe mutation that we would like to avoid/get rid of. --- numpy/_core/numeric.py | 5 +- numpy/_core/tests/test_einsum.py | 70 ++++++++++++--------------- numpy/_core/tests/test_multiarray.py | 22 ++++----- numpy/lib/_format_impl.py | 4 +- numpy/lib/_function_base_impl.py | 5 +- numpy/lib/tests/test_arrayterator.py | 3 +- numpy/linalg/_linalg.py | 9 ++-- numpy/linalg/tests/test_linalg.py | 9 ++-- numpy/linalg/tests/test_regression.py | 2 +- numpy/ma/tests/test_core.py | 23 +++++---- numpy/ma/tests/test_extras.py | 2 +- tools/swig/test/testFlat.py | 6 +-- 12 files changed, 74 insertions(+), 86 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 4886468f0864..ec2cbf6dd7fc 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1106,10 +1106,9 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: - >>> a = np.array(range(1, 9)) - >>> a.shape = (2, 2, 2) + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) - >>> A.shape = (2, 2) + >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], [3, 4]], diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 84d4af1707b6..82789eae0679 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -231,21 +231,20 @@ def __rmul__(self, other): def test_einsum_views(self): # pass-through for do_opt in [True, False]: - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum(a, [Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) b = np.einsum("ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) b = np.einsum(a, [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a) # output is writeable whenever input is writeable @@ -256,115 +255,110 @@ def test_einsum_views(self): assert_(not b.flags['WRITEABLE']) # transpose - a = np.arange(6) - a.shape = (2, 3) + a = np.arange(6).reshape((2, 3)) b = np.einsum("ji", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) b = np.einsum(a, [1, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.T) # diagonal - a = np.arange(9) - a.shape = (3, 3) + a = np.arange(9).reshape((3, 3)) b = np.einsum("ii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) b = np.einsum(a, [0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("...ii->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [[x[i, i] for i in range(3)] for x in a.transpose(1, 0, 2)]) # triple diagonal - a = np.arange(27) - a.shape = (3, 3, 3) + a = np.arange(27).reshape((3, 3, 3)) b = np.einsum("iii->i", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes - a = np.arange(24) - a.shape = (2, 3, 4) + a = np.arange(24).reshape((2, 3, 4)) b = np.einsum("ijk->jik", a, optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) - assert_(b.base is a) + assert_(b.base is a.base) assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype, do_opt=False): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e7647d2e23fe..cf2f899b7991 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2279,8 +2279,7 @@ def test_sort_axis(self): def test_sort_size_0(self): # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array sort with axis={axis}' assert_equal(np.sort(a, axis=axis), a, msg) @@ -2541,8 +2540,7 @@ def test_argsort(self): assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argsort with axis={axis}' assert_equal(np.argsort(a, axis=axis), @@ -2859,8 +2857,7 @@ def test_partition_integer(self): def test_partition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array partition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), a, msg) @@ -2871,8 +2868,7 @@ def test_partition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype): # check axis handling for multidimensional empty arrays kth = np.array(0, dtype=kth_dtype)[()] - a = np.array([]) - a.shape = (3, 2, 1, 0) + a = np.array([]).reshape((3, 2, 1, 0)) for axis in range(-a.ndim, a.ndim): msg = f'test empty array argpartition with axis={axis}' assert_equal(np.partition(a, kth, axis=axis), @@ -5367,7 +5363,7 @@ def test_ip_types(self): unchecked_types = [bytes, str, np.void] x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) for types in np._core.sctypes.values(): for T in types: if T not in unchecked_types: @@ -5378,20 +5374,20 @@ def test_ip_types(self): def test_raise(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24) * 100 - x.shape = 2, 3, 4 + x = x.reshape((2, 3, 4)) assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) @@ -5971,7 +5967,7 @@ class TestFlat: def setup_method(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) - a0.shape = (4, 5) + a0 = a0.reshape((4, 5)) a.flags.writeable = False self.a = a self.b = a[::2, ::2] diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 7378ba554810..bfda7fec73b6 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -879,10 +879,10 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, ) if fortran_order: - array.shape = shape[::-1] + array = array.reshape(shape[::-1]) array = array.transpose() else: - array.shape = shape + array = array.reshape(shape) return array diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f5690a829fc6..05823ca3741f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1317,7 +1317,10 @@ def gradient(f, *varargs, axis=None, edge_order=1): # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 - a.shape = b.shape = c.shape = shape + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + c * f[tuple(slice4)] diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index 800c9a2a5f77..42a85e58ff62 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -14,8 +14,7 @@ def test(): ndims = randint(5) + 1 shape = tuple(randint(10) + 1 for dim in range(ndims)) els = reduce(mul, shape) - a = np.arange(els) - a.shape = shape + a = np.arange(els).reshape(shape) buf_size = randint(2 * els) b = Arrayterator(a, buf_size) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 7471db7e1fe2..7e2f1ebf531b 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -317,8 +317,7 @@ def tensorsolve(a, b, axes=None): Examples -------- >>> import numpy as np - >>> a = np.eye(2*3*4) - >>> a.shape = (2*3, 4, 2, 3, 4) + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) @@ -495,8 +494,7 @@ def tensorinv(a, ind=2): Examples -------- >>> import numpy as np - >>> a = np.eye(4*6) - >>> a.shape = (4, 6, 8, 3) + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) @@ -505,8 +503,7 @@ def tensorinv(a, ind=2): >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True - >>> a = np.eye(4*6) - >>> a.shape = (24, 8, 3) + >>> a = np.eye(4*6).reshape((24, 8, 3)) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 4cc9ac7a5496..b3744024fd88 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -2218,8 +2218,7 @@ def test_non_square_handling(self, arr, ind): ((24, 8, 3), 1), ]) def test_tensorinv_shape(self, shape, ind): - a = np.eye(24) - a.shape = shape + a = np.eye(24).reshape(shape) ainv = linalg.tensorinv(a=a, ind=ind) expected = a.shape[ind:] + a.shape[:ind] actual = ainv.shape @@ -2229,15 +2228,13 @@ def test_tensorinv_shape(self, shape, ind): 0, -2, ]) def test_tensorinv_ind_limit(self, ind): - a = np.eye(24) - a.shape = (4, 6, 8, 3) + a = np.eye(24).reshape((4, 6, 8, 3)) with assert_raises(ValueError): linalg.tensorinv(a=a, ind=ind) def test_tensorinv_result(self): # mimic a docstring example - a = np.eye(24) - a.shape = (24, 8, 3) + a = np.eye(24).reshape((24, 8, 3)) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index c46f83adb0af..e02e955cfa40 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -33,7 +33,7 @@ def test_eig_build(self): 1.51971555e-15 + 0.j, -1.51308713e-15 + 0.j]) a = arange(13 * 13, dtype=float64) - a.shape = (13, 13) + a = a.reshape((13, 13)) a = a % 17 va, ve = linalg.eig(a) va.sort() diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2704857308a3..5327963999e0 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -216,11 +216,11 @@ def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d for s in [(4, 3), (6, 2)]: - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) @@ -246,7 +246,12 @@ def test_concatenate_alongaxis(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # Concatenation along an axis s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + assert_equal(xm.mask, np.reshape(m1, s)) assert_equal(ym.mask, np.reshape(m2, s)) xmym = concatenate((xm, ym), 1) @@ -762,8 +767,7 @@ def test_pickling_wstructured(self): def test_pickling_keepalignment(self): # Tests pickling w/ F_CONTIGUOUS arrays - a = arange(10) - a.shape = (-1, 2) + a = arange(10).reshape( (-1, 2)) b = a.T for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): test = pickle.loads(pickle.dumps(b, protocol=proto)) @@ -1176,8 +1180,7 @@ def test_basic_arithmetic(self): assert_equal(np.divide(x, y), divide(xm, ym)) def test_divide_on_different_shapes(self): - x = arange(6, dtype=float) - x.shape = (2, 3) + x = arange(6, dtype=float).reshape((2, 3)) y = arange(3, dtype=float) z = x / y diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 34a032087536..f9deb33fc2c5 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1077,7 +1077,7 @@ def test_3d(self): x = np.ma.arange(24).reshape(3, 4, 2) x[x % 3 == 0] = masked assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) - x.shape = (4, 3, 2) + x = x.reshape((4, 3, 2)) assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) x = np.ma.arange(24).reshape(4, 3, 2) x[x % 5 == 0] = masked diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index ce6f74819e86..543ee0c41d9f 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -43,7 +43,7 @@ def testProcess3D(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y) self.assertEqual(np.all((x + 1) == y), True) @@ -56,7 +56,7 @@ def testProcess3DTranspose(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) y = x.copy() process(y.T) self.assertEqual(np.all((x.T + 1) == y.T), True) @@ -69,7 +69,7 @@ def testProcessNoncontiguous(self): for i in range(24): pack_output += struct.pack(self.typeCode, i) x = np.frombuffer(pack_output, dtype=self.typeCode) - x.shape = (2, 3, 4) + x = x.reshape((2, 3, 4)) self.assertRaises(TypeError, process, x[:, :, 0]) From bac0a8e844a6cf18335177e6203e0e8a92cddeca Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 31 Jul 2025 09:52:27 -0400 Subject: [PATCH 0275/1718] DOC: Add 'See Also' refs for sign, copysign and signbit. (#29489) [skip azp] [skip cirrus] [skip actions] --- numpy/_core/code_generators/ufunc_docstrings.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index ddae87bd6012..e976be723287 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3792,6 +3792,11 @@ def add_newdoc(place, name, doc): The sign of `x`. $OUT_SCALAR_1 + See Also + -------- + signbit + copysign + Notes ----- There is more than one definition of sign in common use for complex @@ -3828,6 +3833,11 @@ def add_newdoc(place, name, doc): Output array, or reference to `out` if that was supplied. $OUT_SCALAR_1 + See Also + -------- + sign + copysign + Examples -------- >>> import numpy as np @@ -3859,6 +3869,11 @@ def add_newdoc(place, name, doc): The values of `x1` with the sign of `x2`. $OUT_SCALAR_2 + See Also + -------- + sign + signbit + Examples -------- >>> import numpy as np From 4ab0c51c5203ef58dfae19ecb90ef3f6ae79126a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:23:11 +0100 Subject: [PATCH 0276/1718] MAINT: Do not exclude `typing/tests/data` from ruff (#29479) --- numpy/typing/tests/data/fail/arithmetic.pyi | 2 +- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- numpy/typing/tests/data/fail/chararray.pyi | 1 + numpy/typing/tests/data/fail/comparisons.pyi | 8 ++-- numpy/typing/tests/data/fail/datasource.pyi | 1 + numpy/typing/tests/data/fail/fromnumeric.pyi | 6 +-- numpy/typing/tests/data/fail/ma.pyi | 34 +++++++------- .../tests/data/fail/nested_sequence.pyi | 1 + numpy/typing/tests/data/fail/npyio.pyi | 2 +- numpy/typing/tests/data/fail/scalars.pyi | 1 - numpy/typing/tests/data/fail/shape.pyi | 1 + numpy/typing/tests/data/fail/twodim_base.pyi | 2 +- numpy/typing/tests/data/fail/type_check.pyi | 1 - .../tests/data/misc/extended_precision.pyi | 4 +- numpy/typing/tests/data/pass/arithmetic.py | 6 ++- .../tests/data/pass/array_constructors.py | 1 + numpy/typing/tests/data/pass/array_like.py | 2 +- numpy/typing/tests/data/pass/arrayterator.py | 1 + numpy/typing/tests/data/pass/bitwise_ops.py | 2 +- numpy/typing/tests/data/pass/comparisons.py | 9 ++-- numpy/typing/tests/data/pass/index_tricks.py | 2 + numpy/typing/tests/data/pass/literal.py | 3 +- numpy/typing/tests/data/pass/mod.py | 2 +- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- numpy/typing/tests/data/pass/numeric.py | 2 + numpy/typing/tests/data/pass/random.py | 1 + numpy/typing/tests/data/pass/recfunctions.py | 18 ++++---- numpy/typing/tests/data/pass/scalars.py | 1 + numpy/typing/tests/data/pass/shape.py | 2 +- numpy/typing/tests/data/pass/simple.py | 2 +- numpy/typing/tests/data/pass/ufunclike.py | 2 + numpy/typing/tests/data/reveal/arithmetic.pyi | 3 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 2 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 3 +- .../typing/tests/data/reveal/comparisons.pyi | 8 ++-- .../typing/tests/data/reveal/fromnumeric.pyi | 1 - numpy/typing/tests/data/reveal/ma.pyi | 44 +++++++++---------- numpy/typing/tests/data/reveal/mod.pyi | 3 +- .../tests/data/reveal/ndarray_conversion.pyi | 1 - .../typing/tests/data/reveal/ndarray_misc.pyi | 3 +- .../tests/data/reveal/polynomial_polybase.pyi | 3 +- .../data/reveal/polynomial_polyutils.pyi | 3 +- .../tests/data/reveal/polynomial_series.pyi | 2 +- .../typing/tests/data/reveal/ufunc_config.pyi | 3 +- numpy/typing/tests/data/reveal/ufunclike.pyi | 2 +- ruff.toml | 3 +- 46 files changed, 110 insertions(+), 100 deletions(-) diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index e696083b8614..d62906ae87d9 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -30,7 +30,7 @@ AR_LIKE_M: list[np.datetime64] # NOTE: mypys `NoReturn` errors are, unfortunately, not that great _1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] _2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] -AR_i - bytes() # type: ignore[operator] +AR_i - b"" # type: ignore[operator] AR_f - AR_LIKE_m # type: ignore[operator] AR_f - AR_LIKE_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 3538ec7d64c7..29dfe79287ad 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -4,7 +4,7 @@ i8 = np.int64() i4 = np.int32() u8 = np.uint64() b_ = np.bool() -i = int() +i = 0 f8 = np.float64() diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index fb52f7349dd1..f1b7009439d1 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] diff --git a/numpy/typing/tests/data/fail/comparisons.pyi b/numpy/typing/tests/data/fail/comparisons.pyi index 3c8a94bff240..d2965b5c1a91 100644 --- a/numpy/typing/tests/data/fail/comparisons.pyi +++ b/numpy/typing/tests/data/fail/comparisons.pyi @@ -21,7 +21,7 @@ AR_M > AR_i # type: ignore[operator] AR_M > AR_f # type: ignore[operator] AR_M > AR_m # type: ignore[operator] -AR_i > str() # type: ignore[operator] -AR_i > bytes() # type: ignore[operator] -str() > AR_M # type: ignore[operator] -bytes() > AR_M # type: ignore[operator] +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/datasource.pyi b/numpy/typing/tests/data/fail/datasource.pyi index 267b672baea7..4c603cf693a1 100644 --- a/numpy/typing/tests/data/fail/datasource.pyi +++ b/numpy/typing/tests/data/fail/datasource.pyi @@ -1,4 +1,5 @@ from pathlib import Path + import numpy as np path: Path diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 51ef26810e21..d0a81f0bfbfe 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -32,18 +32,18 @@ np.swapaxes(A, 1, [0]) # type: ignore[call-overload] np.transpose(A, axes=1.0) # type: ignore[call-overload] np.partition(a, None) # type: ignore[call-overload] -np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] np.argpartition(a, None) # type: ignore[arg-type] np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] -np.sort(A, order=range(5)) # type: ignore[arg-type] +np.sort(A, order=range(5)) # type: ignore[arg-type] np.argsort(A, axis="bob") # type: ignore[arg-type] np.argsort(A, kind="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index ed973462e2d4..b5921660d617 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -82,7 +82,7 @@ MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] -MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] @@ -101,13 +101,13 @@ np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] @@ -116,10 +116,10 @@ np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] np.ma.size(AR_b, axis='0') # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] @@ -129,15 +129,15 @@ MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] -np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] -np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol='.5') # type: ignore[arg-type] MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] @@ -147,9 +147,9 @@ MAR_td64 **= 2 # type: ignore[misc] MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] -MAR_1d_f8.argsort(axis=(1,0)) # type: ignore[arg-type] +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] -np.ma.MaskedArray(np.array([1,2,3]), keep_mask='yes') # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), subok=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), ndim=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1,2,3]), order='Corinthian') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order='Corinthian') # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/nested_sequence.pyi b/numpy/typing/tests/data/fail/nested_sequence.pyi index a28d3df3c749..1004a36accc7 100644 --- a/numpy/typing/tests/data/fail/nested_sequence.pyi +++ b/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -1,4 +1,5 @@ from collections.abc import Sequence + from numpy._typing import _NestedSequence a: Sequence[float] diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index e204566a5877..d57ebe3a2e3e 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -1,8 +1,8 @@ import pathlib from typing import IO -import numpy.typing as npt import numpy as np +import numpy.typing as npt str_path: str bytes_path: bytes diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index bfbe9125e529..0560a855a80f 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -1,4 +1,3 @@ -import sys import numpy as np f2: np.float16 diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi index fea055583073..a024d7bda273 100644 --- a/numpy/typing/tests/data/fail/shape.pyi +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -1,4 +1,5 @@ from typing import Any + import numpy as np # test bounds of _ShapeT_co diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index d0f2b7ad8322..e146d68c7418 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, TypeVar +from typing import Any import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 94b6ee425af5..8b68e996304c 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -1,5 +1,4 @@ import numpy as np -import numpy.typing as npt DTYPE_i8: np.dtype[np.int64] diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 84b5f516bdde..7978faf4d5bd 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,8 +1,8 @@ +from typing import assert_type + import numpy as np from numpy._typing import _96Bit, _128Bit -from typing import assert_type - assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 3b2901cf2b51..62c978a2fc51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,9 +1,11 @@ from __future__ import annotations from typing import Any, cast + +import pytest + import numpy as np import numpy.typing as npt -import pytest c16 = np.complex128(1) f8 = np.float64(1) @@ -23,7 +25,7 @@ b = bool(1) c = complex(1) f = float(1) -i = int(1) +i = 1 class Object: diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index 17b6fab93ad8..d91d257cb17c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt + class Index: def __index__(self) -> int: return 0 diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 264ec55da053..f922beae34ce 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -5,7 +5,7 @@ import numpy as np if TYPE_CHECKING: - from numpy._typing import NDArray, ArrayLike, _SupportsArray + from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/arrayterator.py b/numpy/typing/tests/data/pass/arrayterator.py index 572be5e2fe29..a99c09a25231 100644 --- a/numpy/typing/tests/data/pass/arrayterator.py +++ b/numpy/typing/tests/data/pass/arrayterator.py @@ -2,6 +2,7 @@ from __future__ import annotations from typing import Any + import numpy as np AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) diff --git a/numpy/typing/tests/data/pass/bitwise_ops.py b/numpy/typing/tests/data/pass/bitwise_ops.py index 22a245d21809..2d4815b0d940 100644 --- a/numpy/typing/tests/data/pass/bitwise_ops.py +++ b/numpy/typing/tests/data/pass/bitwise_ops.py @@ -9,7 +9,7 @@ b_ = np.bool(1) b = bool(1) -i = int(1) +i = 1 AR = np.array([0, 1, 2], dtype=np.int32) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index a461d8b660da..b2e52762c7a8 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import cast, Any +from typing import Any, cast + import numpy as np c16 = np.complex128() @@ -18,10 +19,10 @@ b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 SEQ = (0, 1, 2, 3, 4) diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index dfc4ff2f314a..ea98156a8f2e 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np AR_LIKE_b = [[True, True], [True, True]] diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index c8fa476210e3..f1e0cb2a69d3 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,9 +1,10 @@ from __future__ import annotations -from typing import Any, TYPE_CHECKING from functools import partial +from typing import TYPE_CHECKING, Any import pytest + import numpy as np if TYPE_CHECKING: diff --git a/numpy/typing/tests/data/pass/mod.py b/numpy/typing/tests/data/pass/mod.py index 2b7e6cd85c73..464326486fa2 100644 --- a/numpy/typing/tests/data/pass/mod.py +++ b/numpy/typing/tests/data/pass/mod.py @@ -13,7 +13,7 @@ b = bool(1) f = float(1) -i = int(1) +i = 1 AR = np.array([1], dtype=np.bool) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index b3428f6b48c1..30ad270edd6e 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,14 +9,16 @@ from __future__ import annotations import operator -from typing import cast, Any +from typing import Any, cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... class IntSubClass(npt.NDArray[np.intp]): ... + i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 1eb14cf3a2a2..4c1ffa0b2776 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -6,11 +6,13 @@ """ from __future__ import annotations + from typing import cast import numpy as np import numpy.typing as npt + class SubClass(npt.NDArray[np.float64]): ... diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index bce204a7378e..fd07e378e553 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Any + import numpy as np SEED_NONE = None diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index 52a3d78a7622..cca0c3988708 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -12,7 +12,7 @@ def test_recursive_fill_fields() -> None: [(1, 10.0), (2, 20.0)], dtype=[("A", np.int64), ("B", np.float64)], ) - b = np.zeros((int(3),), dtype=a.dtype) + b = np.zeros((3,), dtype=a.dtype) out = rfn.recursive_fill_fields(a, b) assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) @@ -51,8 +51,8 @@ def test_get_fieldstructure() -> None: def test_merge_arrays() -> None: assert_type( rfn.merge_arrays(( - np.ones((int(2),), np.int_), - np.ones((int(3),), np.float64), + np.ones((2,), np.int_), + np.ones((3,), np.float64), )), np.recarray[tuple[int], np.dtype[np.void]], ) @@ -60,7 +60,7 @@ def test_merge_arrays() -> None: def test_drop_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.drop_fields(a, "a"), @@ -78,7 +78,7 @@ def test_drop_fields() -> None: def test_rename_fields() -> None: ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] - a = np.ones((int(3),), dtype=ndtype) + a = np.ones((3,), dtype=ndtype) assert_type( rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), @@ -92,7 +92,7 @@ def test_repack_fields() -> None: assert_type(rfn.repack_fields(dt), np.dtype[np.void]) assert_type(rfn.repack_fields(dt.type(0)), np.void) assert_type( - rfn.repack_fields(np.ones((int(3),), dtype=dt)), + rfn.repack_fields(np.ones((3,), dtype=dt)), np.ndarray[tuple[int], np.dtype[np.void]], ) @@ -133,14 +133,14 @@ def test_require_fields() -> None: def test_stack_arrays() -> None: - x = np.zeros((int(2),), np.int32) + x = np.zeros((2,), np.int32) assert_type( rfn.stack_arrays(x), np.ndarray[tuple[int], np.dtype[np.int32]], ) - z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) - zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 655903a50bce..eeb707b255e1 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,6 +1,7 @@ import datetime as dt import pytest + import numpy as np b = np.bool() diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py index 286c8a81dacf..e3b497bc0310 100644 --- a/numpy/typing/tests/data/pass/shape.py +++ b/numpy/typing/tests/data/pass/shape.py @@ -1,4 +1,4 @@ -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple import numpy as np diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 408d2482b0ef..003e9ee58bb1 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -1,9 +1,9 @@ """Simple expression that should pass with mypy.""" import operator +from collections.abc import Iterable import numpy as np import numpy.typing as npt -from collections.abc import Iterable # Basic checks array = np.array([1, 2]) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f993939ddba1..c02a68cc062c 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,7 @@ from __future__ import annotations + from typing import Any + import numpy as np diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 5dd78a197b8f..81a98a9d96d2 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -64,7 +64,6 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] - # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number]) @@ -556,7 +555,7 @@ assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) assert_type(f4 + f8, np.float32 | np.float64) -assert_type(i4 + f8,np.float64) +assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index 3b339edced32..de4077654bea 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,6 +1,6 @@ import contextlib from collections.abc import Callable -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 6c6b56197546..bef7c6739605 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,5 +1,4 @@ -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 2165d17fce34..6df5a3d94314 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,6 +1,6 @@ import decimal import fractions -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt @@ -20,10 +20,10 @@ td = np.timedelta64(0, "D") b_ = np.bool() -b = bool() +b = False c = complex() -f = float() -i = int() +f = 0.0 +i = 0 AR = np.array([0], dtype=np.int64) AR.setflags(write=False) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index a2ba83e6c4c8..f6cc9b9fe0d7 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,7 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" from typing import Any, assert_type -from typing import Literal as L import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 52c0dfa6ab75..40caad61f24c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -286,7 +286,7 @@ assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) -assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) assert_type(MAR_o.count(None, True), NDArray[np.int_]) @@ -294,7 +294,7 @@ assert_type(MAR_o.count(None, True), NDArray[np.int_]) assert_type(np.ma.count(MAR_byte), int) assert_type(np.ma.count(MAR_byte, axis=None), int) assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) -assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) @@ -302,13 +302,13 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) -assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) -assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) @@ -320,7 +320,7 @@ assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) -assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] @@ -345,7 +345,7 @@ assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | np.bool) assert_type(np.ma.getmask(np.int64(1)), np.bool) assert_type(np.ma.is_mask(MAR_1d), bool) @@ -458,12 +458,12 @@ assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedAr assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) def invalid_resize() -> None: - assert_type(MAR_f8.resize((1,1)), NoReturn) # type: ignore[arg-type] + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), [True, True, False], np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), dtype=np.float16), MaskedArray[np.float16]) -assert_type(np.ma.MaskedArray(np.array([1,2,3]), copy=True), MaskedArray[Any]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) # TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` @@ -1017,12 +1017,12 @@ assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_u4 ** AR_LIKE_o, Any) -assert_type(AR_LIKE_b ** MAR_u4 , MaskedArray[np.uint32]) -assert_type(AR_LIKE_u ** MAR_u4 , MaskedArray[np.unsignedinteger]) -assert_type(AR_LIKE_i ** MAR_u4 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_u4 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_u4 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_u4 , Any) +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) @@ -1032,11 +1032,11 @@ assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) assert_type(MAR_i8 ** AR_LIKE_o, Any) assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) -assert_type(AR_LIKE_u ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_i ** MAR_i8 , MaskedArray[np.signedinteger]) -assert_type(AR_LIKE_f ** MAR_i8 , MaskedArray[np.floating]) -assert_type(AR_LIKE_c ** MAR_i8 , MaskedArray[np.complexfloating]) -assert_type(AR_LIKE_o ** MAR_i8 , Any) +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 59a6a1016479..9bbbd5c52d7f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,6 +1,5 @@ import datetime as dt -from typing import Literal as L -from typing import assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index bbd42573a774..5cb9b98029dd 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,7 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) - # itemset does not return a value # tobytes is pretty simple # tofile does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b219f4e5bec2..2972a58c328f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,12 +6,11 @@ function-based counterpart in `../from_numeric.py`. """ -from collections.abc import Iterator import ctypes as ct import operator +from collections.abc import Iterator from types import ModuleType from typing import Any, Literal, assert_type - from typing_extensions import CapsuleType import numpy as np diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index bb927035e40c..11265b92ff67 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type -from typing import Literal as L +from typing import Any, Literal as L, LiteralString, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 45522e72102f..07d6c9d1af65 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,8 +1,7 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Any, TypeAlias, assert_type -from typing import Literal as L +from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.polynomial.polyutils as pu diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 93f0799c818d..0f4a9e09f2e7 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import TypeAlias, assert_type import numpy as np import numpy.polynomial as npp diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 748507530aa1..77c27eb3b4ca 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,10 +1,9 @@ """Typing tests for `_core._ufunc_config`.""" +from _typeshed import SupportsWrite from collections.abc import Callable from typing import Any, assert_type -from _typeshed import SupportsWrite - import numpy as np def func(a: str, b: int) -> None: ... diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index a0ede60e0158..aaae5e80e470 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt diff --git a/ruff.toml b/ruff.toml index 4666efdf39bc..b3cfea15b190 100644 --- a/ruff.toml +++ b/ruff.toml @@ -2,7 +2,6 @@ extend-exclude = [ "numpy/__config__.py", "numpy/distutils", "numpy/typing/_char_codes.py", - "numpy/typing/tests/data", "spin/cmds.py", # Submodules. "doc/source/_static/scipy-mathjax", @@ -96,6 +95,8 @@ ignore = [ "numpy/_typing/_array_like.py" = ["E501"] "numpy/_typing/_dtype_like.py" = ["E501"] "numpy*pyi" = ["E501"] +# "useless assignments" aren't so useless when you're testing that they don't make type checkers scream +"numpy/typing/tests/data/*" = ["B015", "B018", "E501"] "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] From 3231ee85e69f222dad7c39b3e662d87d486a069c Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:24:56 +0100 Subject: [PATCH 0277/1718] TYP: Type ``MaskedArray.compress`` (#29480) --- numpy/ma/core.pyi | 31 ++++++++++++++++++++++++++- numpy/typing/tests/data/reveal/ma.pyi | 5 +++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d79b4695bb32..70088a44a000 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -614,7 +614,36 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): set_fill_value: Any def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - def compress(self, condition, axis=..., out=...): ... + + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 40caad61f24c..441d39296ffa 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -301,6 +301,11 @@ assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) From 0fbd49aad6d941008ccbb32c919186770b153483 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:25:36 +0100 Subject: [PATCH 0278/1718] TYP: Type ``MaskedArray.__setitem__`` (#29478) --- numpy/ma/core.pyi | 1 - numpy/typing/tests/data/pass/ma.py | 32 +++++++++++++++++++++--------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 70088a44a000..c1e98a61b4de 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -577,7 +577,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... - def __setitem__(self, indx, value): ... @property def shape(self) -> _ShapeT_co: ... @shape.setter diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 02655d7f88c7..2e2246fb88d2 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,3 +1,4 @@ +import datetime as dt from typing import Any, TypeAlias, TypeVar, cast import numpy as np @@ -15,11 +16,16 @@ MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) -MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) @@ -38,6 +44,14 @@ MAR_f.flat = [9] +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + # Inplace addition MAR_b += AR_LIKE_b @@ -65,10 +79,10 @@ MAR_td64 += AR_LIKE_u MAR_td64 += AR_LIKE_i MAR_td64 += AR_LIKE_m -MAR_M_dt64 += AR_LIKE_b -MAR_M_dt64 += AR_LIKE_u -MAR_M_dt64 += AR_LIKE_i -MAR_M_dt64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m MAR_S += b'snakes' MAR_U += 'snakes' @@ -97,10 +111,10 @@ MAR_td64 -= AR_LIKE_u MAR_td64 -= AR_LIKE_i MAR_td64 -= AR_LIKE_m -MAR_M_dt64 -= AR_LIKE_b -MAR_M_dt64 -= AR_LIKE_u -MAR_M_dt64 -= AR_LIKE_i -MAR_M_dt64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m # Inplace floor division From 9da497f5c88a81669c954c3a9e328a15ce0a7220 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 1 Aug 2025 01:57:37 +0200 Subject: [PATCH 0279/1718] MAINT: bump `mypy` to `1.17.1` (#29493) --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 17de8d3eeb5e..dd7fa1a83e5b 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.16.1 + - mypy=1.17.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index e50919ef4f7b..b26a5f27bb05 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.16.1; platform_python_implementation != "PyPy" +mypy==1.17.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 60ad38e990073ee7bb6ca657139e67ea2bc8ad18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 17:13:36 +0000 Subject: [PATCH 0280/1718] MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.2 to 3.1.3. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/9e4e50bd76b3190f55304387e333f6234823ea9b...352e01339f0a173aa2a3eb57f01492e341e83865) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5c1b35653d68..2b2ab586ba18 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 3fd37bb60ee5..4fcd8a58f53a 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@9e4e50bd76b3190f55304387e333f6234823ea9b # v3.1.2 + uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From f5a6af86acab7fcd1644fad76b5fbe466a0a98dd Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Sun, 3 Aug 2025 03:56:15 +0100 Subject: [PATCH 0281/1718] TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` (#29483) --- numpy/ma/core.pyi | 9 +++++++-- numpy/typing/tests/data/reveal/ma.pyi | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c1e98a61b4de..e99e0a527773 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -54,6 +54,7 @@ from numpy import ( signedinteger, str_, timedelta64, + ufunc, unsignedinteger, void, ) @@ -520,8 +521,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __array_wrap__( + self, + obj: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[_ShapeT, _DTypeT]: ... @overload # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 441d39296ffa..a09fb2d75997 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -313,6 +313,8 @@ assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) From 4bb30632369548e1e91f37ecfd7eab4d9815b24f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Sun, 3 Aug 2025 05:07:01 +0200 Subject: [PATCH 0282/1718] Document `cpu-baeline-detect` --- doc/source/reference/simd/build-options.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 2b7039136e75..1f105e27077d 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -17,6 +17,11 @@ that target certain CPU features: During the runtime, NumPy modules will fail to load if any of specified features are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler + flags. Default value is ``auto`` that enables detection if ``-march=`` + or a similar compiler flag is used. The other possible values are ``enabled`` + and ``disabled`` to respective enable or disable it unconditionally. + - ``cpu-dispatch``: dispatched set of additional CPU features. Default value is ``max -xop -fma4`` which enables all CPU features, except for AMD legacy features (in case of X86). @@ -182,7 +187,7 @@ Behaviors - ``cpu-baseline`` will be treated as "native" if compiler native flag ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS``:: + ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: export CFLAGS="-march=native" pip install . From ae772111208d2e0e536dac94830d13b13198b99e Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 5 Aug 2025 00:17:45 +0800 Subject: [PATCH 0283/1718] ENH: Use extern C in arraytypes.h.src file for cpp files (#29504) Signed-off-by: Wang Yang --- numpy/_core/src/multiarray/arraytypes.h.src | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index a5613aa8dad6..ca8dbeaa67eb 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "common.h" NPY_NO_EXPORT int @@ -165,4 +169,8 @@ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT int BOOL_argmax, NPY_NO_EXPORT npy_intp count_nonzero_trivial_dispatcher(npy_intp count, const char* data, npy_intp stride, int dtype_num); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAYTYPES_H_ */ From 02d0964feeec4af27787c02242eff26961200705 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 4 Aug 2025 14:22:59 -0400 Subject: [PATCH 0284/1718] BUG: Casting from one timedelta64 to another didn't handle NAT. Closes gh-29497. --- numpy/_core/src/multiarray/datetime.c | 15 +++++++++------ numpy/_core/tests/test_datetime.py | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index d820474532ca..0dac36a0903b 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -3118,15 +3118,18 @@ cast_datetime_to_datetime(PyArray_DatetimeMetaData *src_meta, */ NPY_NO_EXPORT int cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, - PyArray_DatetimeMetaData *dst_meta, - npy_timedelta src_dt, - npy_timedelta *dst_dt) + PyArray_DatetimeMetaData *dst_meta, + npy_timedelta src_dt, + npy_timedelta *dst_dt) { npy_int64 num = 0, denom = 0; - /* If the metadata is the same, short-circuit the conversion */ - if (src_meta->base == dst_meta->base && - src_meta->num == dst_meta->num) { + /* + * If the metadata is the same or if src_dt is NAT, short-circuit + * the conversion. + */ + if ((src_meta->base == dst_meta->base && src_meta->num == dst_meta->num) + || src_dt == NPY_DATETIME_NAT) { *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 888bb7db293b..70b55c500f3d 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -844,6 +844,21 @@ def test_timedelta_array_str(self): a = np.array([-1, 'NaT', 1234567], dtype=' Date: Tue, 5 Aug 2025 13:27:04 +0200 Subject: [PATCH 0285/1718] BLD: update vendored Meson to 1.8.3 (#29509) --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index f754c4258805..e72c717199fa 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit f754c4258805056ed7be09830d96af45215d341b +Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055 From 287cf8f7edadc99999da64f8be053f55b9c5d63e Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 5 Aug 2025 22:05:55 +0900 Subject: [PATCH 0286/1718] DOC: Add narrative documentation for printing NumPy arrays (#29502) * DOC: Add narrative documentation for printing NumPy arrays Resolves #29476. This commit adds a narrative guide on how to print NumPy arrays to the "how-to" section. [skip cirrus] [skip actions] [skip azp] * DOC: apply suggested changes to narrative documentation [skip cirrus] [skip actions] [skip azp] --- doc/source/user/how-to-print.rst | 112 +++++++++++++++++++++++++++++++ doc/source/user/howtos_index.rst | 1 + 2 files changed, 113 insertions(+) create mode 100644 doc/source/user/how-to-print.rst diff --git a/doc/source/user/how-to-print.rst b/doc/source/user/how-to-print.rst new file mode 100644 index 000000000000..6195b6ed4c70 --- /dev/null +++ b/doc/source/user/how-to-print.rst @@ -0,0 +1,112 @@ +.. _how-to-print: + +======================= + Printing NumPy Arrays +======================= + + +This page explains how to control the formatting of printed NumPy arrays. +Note that these printing options apply only to arrays, not to scalars. + +Defining printing options +========================= + +Applying settings globally +-------------------------- + +Use :func:`numpy.set_printoptions` to change printing options for the entire runtime session. To inspect current print settings, use :func:`numpy.get_printoptions`: + + >>> np.set_printoptions(precision=2) + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, 'floatmode': 'maxprec', 'precision': 2, 'suppress': False, 'linewidth': 75, 'nanstr': 'nan', 'infstr': 'inf', 'sign': '-', 'formatter': None, 'legacy': False, 'override_repr': None} + +To restore the default settings, use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + +Applying settings temporarily +----------------------------- + +Use :func:`numpy.printoptions` as a context manager to temporarily override print settings within a specific scope: + + + >>> arr = np.array([0.155, 0.184, 0.173]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.15 0.18 0.17] + + +All keywords that apply to :func:`numpy.set_printoptions` also apply to :func:`numpy.printoptions`. + + +Changing the number of digits of precision +========================================== + +The default number of fractional digits displayed is 8. You can change this number using ``precision`` keyword. + + >>> arr = np.array([0.1, 0.184, 0.17322]) + >>> with np.printoptions(precision=2): + ... print(arr) + [0.1 0.18 0.17] + + +The ``floatmode`` option determines how the ``precision`` setting is interpreted. +By default, ``floatmode=maxprec_equal`` displays values with the minimal number of digits needed to uniquely represent them, +using the same number of digits across all elements. +If you want to show exactly the same number of digits specified by ``precision``, use ``floatmode=fixed``: + + >>> arr = np.array([0.1, 0.184, 0.173], dtype=np.float32) + >>> with np.printoptions(precision=2, floatmode="fixed"): + ... print(arr) + [0.10 0.18 0.17] + + +Changing how `nan` and `inf` are displayed +========================================== + +By default, `numpy.nan` is displayed as `nan` and `numpy.inf` is displayed as `inf`. +You can override these representations using the ``nanstr`` and ``infstr`` options: + + >>> arr = np.array([np.inf, np.nan, 0]) + >>> with np.printoptions(nanstr="NAN", infstr="INF"): + ... print(arr) + [INF NAN 0.] + + +Controlling scientific notations +================================ + +By default, NumPy uses scientific notation when: + +- The absolute value of the smallest number is less than ``1e-4``, or +- The ratio of the largest to the smallest absolute value is greater than ``1e3`` + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> print(arr) + [2.00e-05 2.10e+05 3.14e+00] + +To suppress scientific notation and always use fixed-point notation, set ``suppress=True``: + + >>> arr = np.array([0.00002, 210000.0, 3.14]) + >>> with np.printoptions(suppress=True): + ... print(arr) + [ 0.00002 210000. 3.14 ] + + + +Applying custom formatting functions +==================================== + +You can apply custom formatting functions to specific or all data types using ``formatter`` keyword. +See :func:`numpy.set_printoptions` for more details on supported format keys. + +For example, to format `datetime64` values with a custom function: + + >>> arr = np.array([np.datetime64("2025-01-01"), np.datetime64("2024-01-01")]) + >>> with np.printoptions(formatter={"datetime":lambda x: f"(Year: {x.item().year}, Month: {x.item().month})"}): + ... print(arr) + [(Year: 2025, Month: 1) (Year: 2024, Month: 1)] + diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst index ca30f7e9115d..a8a8229dd7dd 100644 --- a/doc/source/user/howtos_index.rst +++ b/doc/source/user/howtos_index.rst @@ -16,3 +16,4 @@ the package, see the :ref:`API reference `. how-to-index how-to-verify-bug how-to-partition + how-to-print From 6ad69259282d007bdfd15f12ea638b50b7e95c76 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 5 Aug 2025 10:44:27 -0600 Subject: [PATCH 0287/1718] TST: don't explicitly specify -j in TSAN build [skip cirrus] [skip azp] [skip circleci] --- .github/workflows/compiler_sanitizers.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 0581f7fc591b..a02f6f0c1609 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -89,7 +89,7 @@ jobs: run: pip uninstall -y pytest-xdist - name: Build NumPy with ThreadSanitizer - run: python -m spin build -j2 -- -Db_sanitize=thread + run: python -m spin build -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | From d1a2ccc53b8c4ac13f1d10c6c1db4d5be1eecadd Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:52:40 -0400 Subject: [PATCH 0288/1718] MAINT: Bump hypothesis to 6.137.1 (#29513) --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index b26a5f27bb05..44f73a844591 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -2,7 +2,7 @@ Cython wheel==0.38.1 setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' -hypothesis==6.104.1 +hypothesis==6.137.1 pytest==7.4.0 pytest-cov==4.1.0 meson From 72574772c1cefe1ccdabc254994a4b45602f977f Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 13:20:28 -0400 Subject: [PATCH 0289/1718] BUG: random: Fix handling of very small p in Generator.binomial. Use `log1p` to compute an expression of `1 - p` with more precision. To not change the legacy interface, a copy of the unaltered function `random_binomial_inversion` is used in the legacy distributions code. --- .../random/src/distributions/distributions.c | 2 +- .../random/src/legacy/legacy-distributions.c | 42 ++++++++++++++++++- numpy/random/tests/test_generator_mt19937.py | 18 ++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index aa81a4a173d4..5ff694b2cde9 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -770,7 +770,7 @@ RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n, binomial->psave = p; binomial->has_binomial = 1; binomial->q = q = 1.0 - p; - binomial->r = qn = exp(n * log(q)); + binomial->r = qn = exp(n * log1p(-p)); binomial->c = np = n * p; binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); } else { diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 14d9ce25f255..385c4c239a57 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -228,6 +228,44 @@ double legacy_exponential(aug_bitgen_t *aug_state, double scale) { return scale * legacy_standard_exponential(aug_state); } +static RAND_INT_TYPE legacy_random_binomial_inversion( + bitgen_t *bitgen_state, RAND_INT_TYPE n, double p, binomial_t *binomial +) +{ + double q, qn, np, px, U; + RAND_INT_TYPE X, bound; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->q = q = 1.0 - p; + binomial->r = qn = exp(n * log(q)); + binomial->c = np = n * p; + binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1)); + } else { + q = binomial->q; + qn = binomial->r; + np = binomial->c; + bound = binomial->m; + } + X = 0; + px = qn; + U = next_double(bitgen_state); + while (U > px) { + X++; + if (X > bound) { + X = 0; + px = qn; + U = next_double(bitgen_state); + } else { + U -= px; + px = ((n - X + 1) * p * px) / (X * q); + } + } + return X; +} static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, @@ -237,14 +275,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p <= 0.5) { if (p * n <= 30.0) { - return random_binomial_inversion(bitgen_state, n, p, binomial); + return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { return random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { - return n - random_binomial_inversion(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { return n - random_binomial_btpe(bitgen_state, n, q, binomial); } diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 50c232d4a8e7..c32612218fcf 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -99,6 +99,24 @@ def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n*p + sigma = np.sqrt(n*p*(1 - p)/sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6*sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + class TestMultinomial: def test_basic(self): From 3f7f06fd9a22bbe1c1450dc993fdb3fd57d91dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 17:34:59 +0000 Subject: [PATCH 0290/1718] MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.3.0 to 5.0.0. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/d3f86a106a0bac45b974a628896c90dbdf5c8093...634f93cb2916e3fdff6788551b99b062d0335ce0) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b2ab586ba18..8269a8dcd705 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -73,7 +73,7 @@ jobs: (github.event_name == 'schedule') steps: - name: Download wheel artifact(s) - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 with: path: wheelhouse/ merge-multiple: true From 8a153faba14b7e65583682bf6af7ecf12f173c70 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Wed, 6 Aug 2025 10:58:42 -0700 Subject: [PATCH 0291/1718] CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 (#29487) --- .github/workflows/compiler_sanitizers.yml | 51 ++++++++++++++++++++--- tools/ci/ubsan_suppressions_arm64.txt | 51 +++++++++++++++++++++++ tools/ci/ubsan_suppressions_x86_64.txt | 31 ++++++++++++++ 3 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 tools/ci/ubsan_suppressions_arm64.txt create mode 100644 tools/ci/ubsan_suppressions_x86_64.txt diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index a02f6f0c1609..dc65449ba752 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -21,7 +21,7 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - clang_ASAN: + clang_ASAN_UBSAN: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: macos-latest @@ -64,12 +64,14 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -j2 -- -Db_sanitize=address + python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + # Ignore test_casting_floatingpoint_errors on macOS for now - causes crash inside UBSAN ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - python -m spin test -- -v -s --timeout=600 --durations=10 + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_arm64.txt \ + python -m spin test -- -k "not test_casting_floatingpoint_errors" -v -s --timeout=600 --durations=10 clang_TSAN: # To enable this workflow on a fork, comment out: @@ -78,9 +80,9 @@ jobs: container: image: ghcr.io/nascheme/numpy-tsan:3.14t options: --shm-size=2g # increase memory for large matrix ops - + steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -98,3 +100,42 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 + + ubuntu_UBSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: pyenv --version + - name: Build python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t + pyenv global 3.14t + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + - name: Build numpy with UndefinedBehaviorSanitizer + run: python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false + - name: Test + run: | + # pass -s to pytest to see UBSAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ + UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_x86_64.txt \ + spin test -- -v -s --timeout=600 --durations=10 diff --git a/tools/ci/ubsan_suppressions_arm64.txt b/tools/ci/ubsan_suppressions_arm64.txt new file mode 100644 index 000000000000..69de4a4c425f --- /dev/null +++ b/tools/ci/ubsan_suppressions_arm64.txt @@ -0,0 +1,51 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for arm64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/common/simd/neon/memory.h +pointer-overflow:_core/src/multiarray/datetime_busdaycal.c +pointer-overflow:_core/src/multiarray/nditer_templ.c +pointer-overflow:_core/src/multiarray/nditer_constr.c +pointer-overflow:_core/src/umath/loops_arithm_fp.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_complex.dispatch.c.src +pointer-overflow:_core/src/umath/loops_unary_fp_le.dispatch.c.src +pointer-overflow:_core/src/umath/string_buffer.h +pointer-overflow:linalg/umath_linalg.cpp +pointer-overflow:numpy/random/bit_generator.pyx.c + +float-cast-overflow:_core/src/multiarray/lowlevel_strided_loops.c.src + +# flagged in CI - call to function through pointer to incorrect function type +# Many functions in the modules/files listed below cause undefined behavior in CI +# general disable this check until further investigation, but keep the specific files +# as a starting point for resolving the checks later +function:_core/src/* +function:numpy/random/* +# function:_core/src/common/cblasfunc.c +# function:_core/src/common/npy_argparse.c +# function:_core/src/multiarray/number.c +# function:_core/src/multiarray/ctors.c +# function:_core/src/multiarray/convert_datatype.c +# function:_core/src/multiarray/dtype_transfer.c +# function:_core/src/multiarray/dtype_traversal.c +# function:_core/src/multiarray/getset.c +# function:_core/src/multiarray/scalarapi.c +# function:_core/src/multiarray/scalartypes.c.src +# function:_core/src/umath/* +# function:numpy/random/* diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt new file mode 100644 index 000000000000..d9872a691a81 --- /dev/null +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -0,0 +1,31 @@ +# This file contains suppressions for the default (with GIL) build to prevent runtime errors +# when numpy is built with -Db_sanitize=undefined for x86_64 architecture +# +# reference: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html#available-checks + +# Per this prior discussion, integer overflow is not a concern +# https://github.com/numpy/numpy/issues/24209#issuecomment-2160154181 +signed-integer-overflow:* + +# all alignment runtime errors ignored in favor of this being tracked via TypeSanitizer +# otherwise ubsan may detect system file alignment errors outside numpy +alignment:* + +# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant +shift-base:_core/src/common/simd/sse/arithmetic.h +shift-base:_core/src/common/simd/avx2/arithmetic.h +# suggested fix for runtime error: use INT_MIN constant +shift-base:_core/src/umath/_rational_tests.c +# suggested fix for runtime error: check for overflow if signed +shift-base:_core/src/npymath/npy_math_internal.h + + +# suggested fix for runtime error: check that pointer is not null before calling function +nonnull-attribute:_core/src/multiarray/array_coercion.c +nonnull-attribute:_core/src/multiarray/ctors.c +nonnull-attribute:_core/src/multiarray/datetime_busdaycal.c +nonnull-attribute:_core/src/multiarray/scalarapi.c +nonnull-attribute:_core/src/multiarray/calculation.c + +# suggested fix for runtime error: null check before loop +pointer-overflow:_core/src/multiarray/nditer_templ.c From 7ffe59ff1585de25b5f4e4da92fd7efc93aca2c8 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 6 Aug 2025 17:00:15 -0400 Subject: [PATCH 0292/1718] STY: Add whitespace to appease ruff (rule E226). --- numpy/random/tests/test_generator_mt19937.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index c32612218fcf..a06212efce0d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -105,17 +105,17 @@ def test_p_extremely_small(self): sample_size = 20000000 x = random.binomial(n, p, size=sample_size) sample_mean = x.mean() - expected_mean = n*p - sigma = np.sqrt(n*p*(1 - p)/sample_size) + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) # Note: the parameters were chosen so that expected_mean - 6*sigma # is a positive value. The first `assert` below validates that # assumption (in case someone edits the parameters in the future). # The second `assert` is the actual test. - low_bound = expected_mean - 6*sigma + low_bound = expected_mean - 6 * sigma assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" test_msg = (f"sample mean {sample_mean} deviates from the expected mean " f"{expected_mean} by more than 6*sigma") - assert abs(expected_mean - sample_mean) < 6*sigma, test_msg + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg class TestMultinomial: From 0a7162cbe2bee464cf6a3ab4efc4b013f7467691 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:24 +0000 Subject: [PATCH 0293/1718] MAINT: Bump actions/cache from 4.2.3 to 4.2.4 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/5a3ec84eff668545956fd18022155c47e93e2684...0400d5f644dc74513175e3cd8d07132dd4860809) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 1293e9c37c2f..84e221439aa6 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -213,7 +213,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 418dc7d52fc1..8931b76c5dee 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -46,7 +46,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -70,7 +70,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From bd6507407a335486e1bee38b6c0039d98fe3115c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:51:37 +0000 Subject: [PATCH 0294/1718] MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.5 to 3.29.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/51f77329afa6477de8c49fc9c7046c15b9a4e79d...a4e1a019f5e24960714ff6296aee04b736cbc3cf) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2d5fdb08a9d4..59489ea79e65 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 079813c3aea3..e2508a09bba7 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v2.1.27 + uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 with: sarif_file: results.sarif From ed46cdeb9bc788c8fdd8c6c318e9af933b7e0edb Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:14:24 -0700 Subject: [PATCH 0295/1718] DOC:clarify build compatibility to dev depending page Review page to make distinction clearer per GH discussion Standardized the use of C-API vs C API Closes #27265 --- doc/source/dev/depending_on_numpy.rst | 33 ++++++++++++++------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 70476a3cc1b3..fd5c13885a74 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -26,12 +26,13 @@ well known scientific Python projects, does **not** use semantic versioning. Instead, backwards incompatible API changes require deprecation warnings for at least two releases. For more details, see :ref:`NEP23`. -NumPy has both a Python API and a C API. The C API can be used directly or via -Cython, f2py, or other such tools. If your package uses the C API, then ABI -(application binary interface) stability of NumPy is important. NumPy's ABI is -forward but not backward compatible. This means: binaries compiled against a -given target version of NumPy's C API will still run correctly with newer NumPy -versions, but not with older versions. +NumPy provides both a Python API and a C-API. The C-API can be accessed +directly or through tools like Cython or f2py. If your package uses the +C-API, it's important to understand NumPy's application binary interface +(ABI) compatibility: NumPy's ABI is forward compatible but not backward +compatible. This means that binaries compiled against an older version of +NumPy will still work with newer versions, but binaries compiled against a +newer version will not necessarily work with older ones. Modules can also be safely built against NumPy 2.0 or later in :ref:`CPython's abi3 mode `, which allows @@ -87,16 +88,16 @@ Build-time dependency `__. -If a package either uses the NumPy C API directly or it uses some other tool +If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy will expose an API that is backwards compatible with the -oldest NumPy version that supports the currently oldest compatible Python -version. NumPy 1.25.0 supports Python 3.9 and higher and NumPy 1.19 is the -first version to support Python 3.9. Thus, we guarantee that, when using -defaults, NumPy 1.25 will expose a C-API compatible with NumPy 1.19. -(the exact version is set within NumPy-internal header files). +By default, NumPy exposes a API that is backward compatible with the earliest +NumPy version that supports the oldest Python version currently supported by +NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the +earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee +NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release will require recompilation (see NumPy 2.0-specific advice further down). @@ -157,7 +158,7 @@ frequently, (b) use a large part of NumPy's API surface, and (c) is worried that changes in NumPy may break your code, you can set an upper bound of ``=2.0`` (or go. We'll focus on the "keep compatibility with 1.xx and 2.x" now, which is a little more involved. -*Example for a package using the NumPy C API (via C/Cython/etc.) which wants to support +*Example for a package using the NumPy C-API (via C/Cython/etc.) which wants to support NumPy 1.23.5 and up*: .. code:: ini From 4640d9317f76dcef11f1d9f92d35218ec3b32fd7 Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Thu, 7 Aug 2025 16:45:46 -0700 Subject: [PATCH 0296/1718] Update depending_on_numpy.rst --- doc/source/dev/depending_on_numpy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index fd5c13885a74..e3c03b0fea65 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -96,7 +96,7 @@ By default, NumPy exposes a API that is backward compatible with the earliest NumPy version that supports the oldest Python version currently supported by NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee -NumPy 1.25 will, when using defaults, expose a C API compatible with NumPy +NumPy 1.25 will, when using defaults, expose a C-API compatible with NumPy 1.19. (the exact version is set within NumPy-internal header files). NumPy is also forward compatible for all minor releases, but a major release From a0515ad027cc61100987bbd8c783d1c121fbd803 Mon Sep 17 00:00:00 2001 From: Lysandros Nikolaou Date: Fri, 8 Aug 2025 17:57:02 +0300 Subject: [PATCH 0297/1718] ENH: Use array indexing preparation routines for flatiter objects (#28590) * [ENH] Use array indexing preparation routines for flatiter objects * Fix assign subscript and add tests for it * Fix regression test * Remove unnecessary dims array * Add release note * Remove unnecessary branch from iter_subscript * Add more benchmarks * Add special cases to fix performance regression * Address some feedback - Add test with int16 index - Cast integer index array to intp before using it - Remove unused functions - Change error message back to array - Move docstring to prepare_index_noarray * Address more feedback * Address more feedback; raise error on non-array index * Fix linting errors * Update changelog item * Fix tests * Fix typing tests * Address review feedback * Fix linter errors * Address feedback * Remove wrong comments * Fix linting errors * Fix test_deprecations * Apply suggestions from code review * Update numpy/_core/tests/test_indexing.py --------- Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_indexing.py | 19 + .../upcoming_changes/28590.improvement.rst | 33 ++ numpy/_core/src/multiarray/iterators.c | 524 +++++++----------- numpy/_core/src/multiarray/mapping.c | 133 +++-- numpy/_core/src/multiarray/mapping.h | 22 + numpy/_core/tests/test_deprecations.py | 36 ++ numpy/_core/tests/test_indexing.py | 245 +++++++- numpy/_core/tests/test_regression.py | 4 +- numpy/lib/_shape_base_impl.py | 13 +- numpy/matrixlib/tests/test_masked_matrix.py | 2 +- numpy/typing/tests/data/fail/flatiter.pyi | 2 + numpy/typing/tests/data/pass/flatiter.py | 1 - 12 files changed, 624 insertions(+), 410 deletions(-) create mode 100644 doc/release/upcoming_changes/28590.improvement.rst diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 6ac124cac88d..81c812e81e19 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -134,6 +134,7 @@ def setup(self): self.m_half = np.copy(self.m_all) self.m_half[::2] = False self.m_none = np.repeat(False, 200 * 50000) + self.m_index_2d = np.arange(200 * 50000).reshape((100, 100000)) def time_flat_bool_index_none(self): self.a.flat[self.m_none] @@ -143,3 +144,21 @@ def time_flat_bool_index_half(self): def time_flat_bool_index_all(self): self.a.flat[self.m_all] + + def time_flat_fancy_index_2d(self): + self.a.flat[self.m_index_2d] + + def time_flat_empty_tuple_index(self): + self.a.flat[()] + + def time_flat_ellipsis_index(self): + self.a.flat[...] + + def time_flat_bool_index_0d(self): + self.a.flat[True] + + def time_flat_int_index(self): + self.a.flat[1_000_000] + + def time_flat_slice_index(self): + self.a.flat[1_000_000:2_000_000] diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst new file mode 100644 index 000000000000..35f5cb3c2ad2 --- /dev/null +++ b/doc/release/upcoming_changes/28590.improvement.rst @@ -0,0 +1,33 @@ +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid `flatiter` indices that previously raised `ValueError` + now correctly raise `IndexError`, aligning with `ndarray` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 7557662ad56c..704fd4738589 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -29,78 +29,6 @@ #define ELLIPSIS_INDEX -2 #define SINGLE_INDEX -3 -/* - * Tries to convert 'o' into an npy_intp interpreted as an - * index. Returns 1 if it was successful, 0 otherwise. Does - * not set an exception. - */ -static int -coerce_index(PyObject *o, npy_intp *v) -{ - *v = PyArray_PyIntAsIntp(o); - - if ((*v) == -1 && PyErr_Occurred()) { - PyErr_Clear(); - return 0; - } - return 1; -} - -/* - * This function converts one element of the indexing tuple - * into a step size and a number of steps, returning the - * starting index. Non-slices are signalled in 'n_steps', - * as NEWAXIS_INDEX, ELLIPSIS_INDEX, or SINGLE_INDEX. - */ -NPY_NO_EXPORT npy_intp -parse_index_entry(PyObject *op, npy_intp *step_size, - npy_intp *n_steps, npy_intp max, - int axis, int check_index) -{ - npy_intp i; - - if (op == Py_None) { - *n_steps = NEWAXIS_INDEX; - i = 0; - } - else if (op == Py_Ellipsis) { - *n_steps = ELLIPSIS_INDEX; - i = 0; - } - else if (PySlice_Check(op)) { - npy_intp stop; - if (PySlice_GetIndicesEx(op, max, &i, &stop, step_size, n_steps) < 0) { - goto fail; - } - if (*n_steps <= 0) { - *n_steps = 0; - *step_size = 1; - i = 0; - } - } - else if (coerce_index(op, &i)) { - *n_steps = SINGLE_INDEX; - *step_size = 0; - if (check_index) { - if (check_and_adjust_index(&i, max, axis, NULL) < 0) { - goto fail; - } - } - } - else { - PyErr_SetString(PyExc_IndexError, - "each index entry must be either a " - "slice, an integer, Ellipsis, or " - "newaxis"); - goto fail; - } - return i; - - fail: - return -1; -} - - /*********************** Element-wise Array Iterator ***********************/ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ @@ -427,7 +355,7 @@ iter_length(PyArrayIterObject *self) } -static PyArrayObject * +static PyObject * iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, NPY_cast_info *cast_info) { @@ -484,7 +412,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind, } PyArray_ITER_RESET(self); } - return ret; + return (PyObject *) ret; } static PyObject * @@ -562,195 +490,154 @@ iter_subscript_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype = NULL; - PyArray_Descr *dtype; - npy_intp start, step_size; - npy_intp n_steps; - PyArrayObject *ret; - char *dptr; - int size; - PyObject *obj = NULL; - PyObject *new; + PyObject *ret = NULL; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - obj = iter_subscript(self, ind); - Py_DECREF(ind); - return obj; + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + return NULL; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { - goto fail; - } - if (len == 0) { - Py_INCREF(self->ao); - return (PyObject *)self->ao; - } - ind = PyTuple_GET_ITEM(ind, 0); + + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - /* - * Tuples >1d not accepted --- i.e. no newaxis - * Could implement this with adjusted strides and dimensions in iterator - * Check for Boolean -- this is first because Bool is a subclass of Int - */ - PyArray_ITER_RESET(self); + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { + goto finish; + } - if (PyBool_Check(ind)) { - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - goto fail; + ret = iter_subscript(self, ind); + Py_DECREF(ind); + goto finish; + } + + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - return PyArray_ToScalar(self->dataptr, self->ao); + if (indices[0].value) { + ret = PyArray_ToScalar(self->dataptr, self->ao); + goto finish; } else { /* empty array */ npy_intp ii = 0; - dtype = PyArray_DESCR(self->ao); Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), - dtype, - 1, &ii, - NULL, NULL, 0, - (PyObject *)self->ao); - return (PyObject *)ret; + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), + dtype, + 1, &ii, + NULL, NULL, 0, + (PyObject *)self->ao); + goto finish; } } - dtype = PyArray_DESCR(self->ao); - size = dtype->elsize; + PyArray_ITER_RESET(self); - /* set up a cast to handle item copying */ + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { + goto finish; + } + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_ToScalar(self->dataptr, self->ao); + PyArray_ITER_RESET(self); + goto finish; + } + /* set up a cast to handle item copying */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; + /* We can assume the newly allocated output array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, size, size, dtype, dtype, 0, &cast_info, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { - goto fail; + goto finish; } - /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, - self->size, 0, 1); - if (start == -1) { - goto fail; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); - goto fail; + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { + goto finish; } + PyArray_ITER_GOTO1D(self, start); - if (n_steps == SINGLE_INDEX) { /* Integer */ - PyObject *tmp; - tmp = PyArray_ToScalar(self->dataptr, self->ao); - PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return tmp; - } Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self->ao), + ret = PyArray_NewFromDescr(Py_TYPE(self->ao), dtype, 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); if (ret == NULL) { - goto fail; + goto finish; } - dptr = PyArray_DATA(ret); + + char *dptr = PyArray_DATA((PyArrayObject *) ret); while (n_steps--) { char *args[2] = {self->dataptr, dptr}; - npy_intp transfer_strides[2] = {size, size}; + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { - goto fail; + goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); - dptr += size; + dptr += dtype_size; } PyArray_ITER_RESET(self); - NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; + goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - if (obj == NULL) { - goto fail; - } - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *) indices[0].object, &cast_info); + goto finish; } - if (!PyArray_Check(obj)) { - PyArrayObject *tmp_arr = (PyArrayObject *) PyArray_FROM_O(obj); - if (tmp_arr == NULL) { - goto fail; - } - - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); - Py_SETREF(obj, PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST)); - Py_DECREF(tmp_arr); - if (obj == NULL) { - goto fail; - } - } - else { - Py_SETREF(obj, (PyObject *) tmp_arr); + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } - } - - /* Check for Boolean array */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj, &cast_info); + ret = iter_subscript_int(self, cast_array, &cast_info); + Py_DECREF(cast_array); goto finish; } - /* Only integer arrays left */ - if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { - goto fail; - } - - Py_INCREF(indtype); - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new, - &cast_info); - Py_DECREF(new); - - finish: - Py_DECREF(indtype); - Py_DECREF(obj); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return (PyObject *)ret; - - fail: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); } - Py_XDECREF(indtype); - Py_XDECREF(obj); - NPY_cast_info_xfree(&cast_info); - - return NULL; - + return ret; } @@ -858,140 +745,132 @@ iter_ass_sub_int(PyArrayIterObject *self, PyArrayObject *ind, NPY_NO_EXPORT int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyArrayObject *arrval = NULL; - PyArrayIterObject *val_it = NULL; - PyArray_Descr *type; - PyArray_Descr *indtype = NULL; - int retval = -1; - npy_intp start, step_size; - npy_intp n_steps; - PyObject *obj = NULL; - NPY_cast_info cast_info = {.func = NULL}; - if (val == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete iterator elements"); return -1; } - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) { return -1; + } - if (ind == Py_Ellipsis) { - ind = PySlice_New(NULL, NULL, NULL); - retval = iter_ass_subscript(self, ind, val); - Py_DECREF(ind); - return retval; + int ret = -1; + + int index_type; + int index_num = -1; + int ndim, fancy_ndim; + npy_intp start, stop, step, n_steps; + npy_index_info indices[NPY_MAXDIMS * 2 + 1]; + + PyArray_Descr *dtype = PyArray_DESCR(self->ao); + npy_intp dtype_size = dtype->elsize; + NPY_cast_info cast_info = {.func = NULL}; + + /* Prepare the indices */ + index_type = prepare_index_noarray(1, &self->size, ind, indices, &index_num, + &ndim, &fancy_ndim, 1, 1); + + if (index_type < 0) { + goto finish; + } + else if (indices[0].type == HAS_NEWAXIS) { + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); + goto finish; } - if (PyTuple_Check(ind)) { - int len; - len = PyTuple_GET_SIZE(ind); - if (len > 1) { + // Single ellipsis index + else if (index_type == HAS_ELLIPSIS) { + if (PyTuple_Check(ind)) { + PyErr_SetString(PyExc_IndexError, "Assigning to a flat iterator with a 0-D index is not supported"); + goto finish; + } + + ind = PySlice_New(NULL, NULL, NULL); + if (ind == NULL) { goto finish; } - ind = PyTuple_GET_ITEM(ind, 0); - } - type = PyArray_DESCR(self->ao); + ret = iter_ass_subscript(self, ind, val); + Py_DECREF(ind); + goto finish; + } - /* - * Check for Boolean -- this is first because - * Bool is a subclass of Int - */ - - if (PyBool_Check(ind)) { - retval = 0; - int istrue = PyObject_IsTrue(ind); - if (istrue == -1) { - return -1; + // Single boolean index + else if (indices[0].type == HAS_0D_BOOL) { + /* Deprecated 2025-07, NumPy 2.4 */ + if (DEPRECATE("Indexing flat iterators with a 0-dimensional boolean index is deprecated " + "and may be removed in a future version. (Deprecated NumPy 2.4)") < 0) { + goto finish; } - if (istrue) { - retval = PyArray_Pack( - PyArray_DESCR(self->ao), self->dataptr, val); + ret = 0; + if (indices[0].value) { + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); } goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) { - goto skip; - } - start = PyArray_PyIntAsIntp(ind); - if (error_converting(start)) { - PyErr_Clear(); - } - else { - if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) { + PyArray_ITER_RESET(self); + + if (index_type == HAS_INTEGER) { + if (check_and_adjust_index(&indices[0].value, self->size, -1, NULL) < 0) { goto finish; } - PyArray_ITER_GOTO1D(self, start); - retval = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); + PyArray_ITER_GOTO1D(self, indices[0].value); + ret = PyArray_Pack(PyArray_DESCR(self->ao), self->dataptr, val); PyArray_ITER_RESET(self); - if (retval < 0) { + if (ret < 0) { PyErr_SetString(PyExc_ValueError, "Error setting single item of array."); } goto finish; } - skip: - Py_INCREF(type); - arrval = (PyArrayObject *)PyArray_FromAny(val, type, 0, 0, + Py_INCREF(dtype); + PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { - return -1; + goto finish; } - val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } if (val_it->size == 0) { - retval = 0; + ret = 0; goto finish; } /* set up cast to handle single-element copies into arrval */ NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; npy_intp one = 1; - int itemsize = type->elsize; /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, itemsize, itemsize, type, type, 0, + is_aligned, dtype_size, dtype_size, dtype, dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } - /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_index_entry(ind, &step_size, &n_steps, self->size, 0, 0); - if (start == -1) { - goto finish; - } - if (n_steps == ELLIPSIS_INDEX || n_steps == NEWAXIS_INDEX) { - PyErr_SetString(PyExc_IndexError, - "cannot use Ellipsis or newaxes here"); + if (index_type == HAS_SLICE) { + if (PySlice_GetIndicesEx(indices[0].object, + self->size, + &start, &stop, &step, &n_steps) < 0) { goto finish; } + PyArray_ITER_GOTO1D(self, start); - npy_intp transfer_strides[2] = {itemsize, itemsize}; - if (n_steps == SINGLE_INDEX) { - char *args[2] = {PyArray_DATA(arrval), self->dataptr}; - if (cast_info.func(&cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { - goto finish; - } - PyArray_ITER_RESET(self); - retval = 0; - goto finish; - } + npy_intp transfer_strides[2] = {dtype_size, dtype_size}; while (n_steps--) { char *args[2] = {val_it->dataptr, self->dataptr}; if (cast_info.func(&cast_info.context, args, &one, transfer_strides, cast_info.auxdata) < 0) { goto finish; } - start += step_size; + start += step; PyArray_ITER_GOTO1D(self, start); PyArray_ITER_NEXT(val_it); if (val_it->index == val_it->size) { @@ -999,60 +878,37 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } } PyArray_ITER_RESET(self); - retval = 0; + ret = 0; goto finish; } - /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(NPY_INTP); - if (PyList_Check(ind)) { - Py_INCREF(indtype); - obj = PyArray_FromAny(ind, indtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); - } - else { - Py_INCREF(ind); - obj = ind; + if (index_type == HAS_BOOL) { + ret = iter_ass_sub_Bool(self, (PyArrayObject *) indices[0].object, val_it, &cast_info); + goto finish; } - if (obj != NULL && PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj)==NPY_BOOL) { - if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval=0; - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - Py_INCREF(indtype); - new = PyArray_CheckFromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_BEHAVED_NS, NULL); - Py_DECREF(obj); - obj = new; - if (new == NULL) { - goto finish; - } - if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, &cast_info) < 0) { - goto finish; - } - retval = 0; + if (index_type == HAS_FANCY) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + PyArrayObject *cast_array = (PyArrayObject *) + PyArray_FromArray((PyArrayObject *) indices[0].object, indtype, NPY_ARRAY_FORCECAST); + if (cast_array == NULL) { + goto finish; } + ret = iter_ass_sub_int(self, cast_array, val_it, &cast_info); + Py_DECREF(cast_array); + goto finish; } - finish: - if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); - } - Py_XDECREF(indtype); - Py_XDECREF(obj); - Py_XDECREF(val_it); - Py_XDECREF(arrval); + PyErr_SetString(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`) and integer or boolean " + "arrays are valid indices" + ); +finish: NPY_cast_info_xfree(&cast_info); - return retval; - + for (int i = 0; i < index_num; i++) { + Py_XDECREF(indices[i].object); + } + return ret; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 12f25534f1d0..28d6a5a26938 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -33,23 +33,6 @@ #include "umathmodule.h" -#define HAS_INTEGER 1 -#define HAS_NEWAXIS 2 -#define HAS_SLICE 4 -#define HAS_ELLIPSIS 8 -/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ -#define HAS_FANCY 16 -#define HAS_BOOL 32 -/* NOTE: Only set if it is neither fancy nor purely integer index! */ -#define HAS_SCALAR_ARRAY 64 -/* - * Indicate that this is a fancy index that comes from a 0d boolean. - * This means that the index does not operate along a real axis. The - * corresponding index type is just HAS_FANCY. - */ -#define HAS_0D_BOOL (HAS_FANCY | 128) - - static int _nonzero_indices(PyObject *myBool, PyArrayObject **arrays); @@ -263,20 +246,22 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param self the array being indexed + * @param array_ndims The number of dimensions of the array being indexed (1 for iterators) + * @param array_dims The dimensions of the array being indexed (self->size for iterators) * @param index the index object * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) * @param num number of indices found * @param ndim dimension of the indexing result * @param out_fancy_ndim dimension of the fancy/advanced indices part * @param allow_boolean whether to allow the boolean special case + * @param is_flatiter_object Whether the object indexed is an iterator * * @returns the index_type or -1 on failure and fills the number of indices. */ NPY_NO_EXPORT int -prepare_index(PyArrayObject *self, PyObject *index, - npy_index_info *indices, - int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object) { int new_ndim, fancy_ndim, used_ndim, index_ndim; int curr_idx, get_idx; @@ -314,8 +299,8 @@ prepare_index(PyArrayObject *self, PyObject *index, while (get_idx < index_ndim) { if (curr_idx > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); goto failed_building_indices; } @@ -377,7 +362,7 @@ prepare_index(PyArrayObject *self, PyObject *index, * Since this is always an error if it was not a boolean, we can * allow the 0-d special case before the rest. */ - else if (PyArray_NDIM(self) != 0) { + else if (array_ndims != 0) { /* * Single integer index, there are two cases here. * It could be an array, a 0-d array is handled @@ -418,17 +403,55 @@ prepare_index(PyArrayObject *self, PyObject *index, goto failed_building_indices; } + // We raise here because we changed the behavior for boolean + // indices for flat iterators from being handled as integers + // to being regular boolean indices. + // TODO: This should go away fairly soon and lists of booleans + // should be handled as regular boolean indices. + if (is_flatiter_object && PyArray_ISBOOL(tmp_arr) && !PyBool_Check(index)) { + Py_DECREF(tmp_arr); + PyErr_Format(PyExc_IndexError, + "boolean indices for iterators are not supported because " + "of previous behavior that was confusing (valid boolean " + "indices are expected to work in the future)" + ); + goto failed_building_indices; + } + /* * For example an empty list can be cast to an integer array, * however it will default to a float one. */ - if (PyArray_SIZE(tmp_arr) == 0) { - PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); + if (PyArray_SIZE(tmp_arr) == 0 + || (is_flatiter_object && !PyArray_ISINTEGER(tmp_arr) && !PyArray_ISBOOL(tmp_arr))) { + PyArray_Descr *indtype = PyArray_DescrFromType(NPY_INTP); arr = (PyArrayObject *)PyArray_FromArray(tmp_arr, indtype, NPY_ARRAY_FORCECAST); + + // If the cast succeeded (which means that the previous flat iterator + // indexing routine would have succeeded as well), we need to issue a + // deprecation warning. + if (arr + && is_flatiter_object + && PyArray_SIZE(tmp_arr) != 0 + && !PyArray_ISINTEGER(tmp_arr) + && !PyArray_ISBOOL(tmp_arr) + && DEPRECATE("Invalid non-array indices for iterator objects are deprecated and will be " + "removed in a future version. (Deprecated NumPy 2.4)") < 0) { + Py_DECREF(tmp_arr); + goto failed_building_indices; + } Py_DECREF(tmp_arr); if (arr == NULL) { + // Raise a helpful error if this was a ValueError (i.e. could not cast) + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); + } goto failed_building_indices; } } @@ -458,9 +481,9 @@ prepare_index(PyArrayObject *self, PyObject *index, * this is always an error. The check ensures that these errors are raised * and match those of the generic path. */ - if ((PyArray_NDIM(arr) == PyArray_NDIM(self)) + if ((PyArray_NDIM(arr) == array_ndims) && PyArray_CompareLists(PyArray_DIMS(arr), - PyArray_DIMS(self), + array_dims, PyArray_NDIM(arr))) { index_type = HAS_BOOL; @@ -468,8 +491,8 @@ prepare_index(PyArrayObject *self, PyObject *index, indices[curr_idx].object = (PyObject *)arr; /* keep track anyway, just to be complete */ - used_ndim = PyArray_NDIM(self); - fancy_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; + fancy_ndim = array_ndims; curr_idx += 1; break; } @@ -524,8 +547,8 @@ prepare_index(PyArrayObject *self, PyObject *index, /* Check that we will not run out of indices to store new ones */ if (curr_idx + n >= NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); + PyErr_Format(PyExc_IndexError, + "too many indices for %s", is_flatiter_object ? "flat iterator" : "array"); for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } @@ -603,10 +626,11 @@ prepare_index(PyArrayObject *self, PyObject *index, } else { /* The input was not an array, so give a general error message */ - PyErr_SetString(PyExc_IndexError, - "only integers, slices (`:`), ellipsis (`...`), " - "numpy.newaxis (`None`) and integer or boolean " - "arrays are valid indices"); + PyErr_Format(PyExc_IndexError, + "only integers, slices (`:`), ellipsis (`...`)%s and integer or boolean " + "arrays are valid indices", + is_flatiter_object ? "" : ", numpy.newaxis (`None`)" + ); } Py_DECREF(arr); goto failed_building_indices; @@ -616,10 +640,10 @@ prepare_index(PyArrayObject *self, PyObject *index, * Compare dimension of the index to the real ndim. this is * to find the ellipsis value or append an ellipsis if necessary. */ - if (used_ndim < PyArray_NDIM(self)) { + if (used_ndim < array_ndims) { if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); + indices[ellipsis_pos].value = array_ndims - used_ndim; + used_ndim = array_ndims; new_ndim += indices[ellipsis_pos].value; } else { @@ -630,19 +654,21 @@ prepare_index(PyArrayObject *self, PyObject *index, index_type |= HAS_ELLIPSIS; indices[curr_idx].object = NULL; indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + indices[curr_idx].value = array_ndims - used_ndim; ellipsis_pos = curr_idx; - used_ndim = PyArray_NDIM(self); + used_ndim = array_ndims; new_ndim += indices[curr_idx].value; curr_idx += 1; } } - else if (used_ndim > PyArray_NDIM(self)) { + else if (used_ndim > array_ndims) { PyErr_Format(PyExc_IndexError, - "too many indices for array: " - "array is %d-dimensional, but %d were indexed", - PyArray_NDIM(self), + "too many indices for %s: " + "%s is %d-dimensional, but %d were indexed", + is_flatiter_object ? "flat iterator" : "array", + is_flatiter_object ? "flat iterator" : "array", + array_ndims, used_ndim); goto failed_building_indices; } @@ -697,14 +723,15 @@ prepare_index(PyArrayObject *self, PyObject *index, used_ndim = 0; for (i = 0; i < curr_idx; i++) { if ((indices[i].type == HAS_FANCY) && indices[i].value > 0) { - if (indices[i].value != PyArray_DIM(self, used_ndim)) { + if (indices[i].value != array_dims[used_ndim]) { char err_msg[174]; PyOS_snprintf(err_msg, sizeof(err_msg), - "boolean index did not match indexed array along " + "boolean index did not match indexed %s along " "axis %d; size of axis is %" NPY_INTP_FMT " but size of corresponding boolean axis is %" NPY_INTP_FMT, - used_ndim, PyArray_DIM(self, used_ndim), + is_flatiter_object ? "flat iterator" : "array", + used_ndim, array_dims[used_ndim], indices[i].value); PyErr_SetString(PyExc_IndexError, err_msg); goto failed_building_indices; @@ -740,6 +767,16 @@ prepare_index(PyArrayObject *self, PyObject *index, return -1; } +NPY_NO_EXPORT int +prepare_index(PyArrayObject *self, PyObject *index, + npy_index_info *indices, + int *num, int *ndim, int *out_fancy_ndim, int allow_boolean) +{ + return prepare_index_noarray(PyArray_NDIM(self), PyArray_DIMS(self), + index, indices, num, ndim, out_fancy_ndim, + allow_boolean, 0); +} + /** * Check if self has memory overlap with one of the index arrays, or with extra_op. diff --git a/numpy/_core/src/multiarray/mapping.h b/numpy/_core/src/multiarray/mapping.h index 528cb6604892..d4577c78fdbb 100644 --- a/numpy/_core/src/multiarray/mapping.h +++ b/numpy/_core/src/multiarray/mapping.h @@ -3,6 +3,23 @@ extern NPY_NO_EXPORT PyMappingMethods array_as_mapping; +/* Indexing types */ +#define HAS_INTEGER 1 +#define HAS_NEWAXIS 2 +#define HAS_SLICE 4 +#define HAS_ELLIPSIS 8 +/* HAS_FANCY can be mixed with HAS_0D_BOOL, be careful when to use & or == */ +#define HAS_FANCY 16 +#define HAS_BOOL 32 +/* NOTE: Only set if it is neither fancy nor purely integer index! */ +#define HAS_SCALAR_ARRAY 64 +/* + * Indicate that this is a fancy index that comes from a 0d boolean. + * This means that the index does not operate along a real axis. The + * corresponding index type is just HAS_FANCY. + */ +#define HAS_0D_BOOL (HAS_FANCY | 128) + /* * Object to store information needed for advanced (also fancy) indexing. @@ -113,6 +130,11 @@ typedef struct { } npy_index_info; +NPY_NO_EXPORT int +prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, + npy_index_info *indices, int *num, int *ndim, int *out_fancy_ndim, + int allow_boolean, int is_flatiter_object); + NPY_NO_EXPORT Py_ssize_t array_length(PyArrayObject *self); diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index d4f2e9984266..75f092a51808 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -480,3 +480,39 @@ def test_deprecated(self): def test_not_deprecated(self, align): # if the user passes a bool, it is accepted. self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 2a8a669d0787..c65468ebd24a 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -614,22 +614,6 @@ def test_nontuple_ndindex(self): assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) assert_raises(IndexError, a.__getitem__, [slice(None)]) - def test_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([0, 5, 6]) - assert_equal(a.flat[b.flat], np.array([0, 5, 6])) - - def test_empty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array([], dtype="S") - assert_equal(a.flat[b.flat], np.array([])) - - def test_nonempty_string_flat_index_on_flatiter(self): - a = np.arange(9).reshape((3, 3)) - b = np.array(["a"], dtype="S") - with pytest.raises(IndexError, match="unsupported iterator index"): - a.flat[b.flat] - class TestFieldIndexing: def test_scalar_return_type(self): @@ -1453,3 +1437,232 @@ def test_setitem(self): a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 2aeb29a1320d..8ad9a26fcc9a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -851,8 +851,8 @@ def ia(x, s, v): assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): - assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(11, dtype=float)) def test_mem_scalar_indexing(self): # Ticket #603 diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index c44d603611ee..8b200cd8daa4 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,6 +1,7 @@ import functools import warnings +import numpy as np import numpy._core.numeric as _nx from numpy._core import atleast_3d, overrides, vstack from numpy._core._multiarray_umath import _array_converter @@ -171,15 +172,13 @@ def take_along_axis(arr, indices, axis=-1): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat - arr_shape = (len(arr),) # flatiter has no .shape + arr = np.array(arr.flat) axis = 0 else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - return arr[_make_along_axis_idx(arr_shape, indices, axis)] + return arr[_make_along_axis_idx(arr.shape, indices, axis)] def _put_along_axis_dispatcher(arr, indices, values, axis): @@ -263,15 +262,13 @@ def put_along_axis(arr, indices, values, axis): if indices.ndim != 1: raise ValueError( 'when axis=None, `indices` must have a single dimension.') - arr = arr.flat + arr = np.array(arr.flat) axis = 0 - arr_shape = (len(arr),) # flatiter has no .shape else: axis = normalize_axis_index(axis, arr.ndim) - arr_shape = arr.shape # use the fancy index - arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index e6df047ee6ca..3f9414ff7d30 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -116,7 +116,7 @@ def test_flat(self): # Test setting test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] + testflat[:] = testflat[np.array([2, 1, 0])] assert_equal(test, control) testflat[0] = 9 # test that matrices keep the correct shape (#4615) diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index 06e23fed9e3f..cdf2a8b78b7b 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -18,3 +18,5 @@ a.copy(order='C') # type: ignore[call-arg] a[np.bool()] # type: ignore[index] a[Index()] # type: ignore[call-overload] a[supports_array] # type: ignore[index] + +a[[0, 1, 2]] diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index e64e4261b8e7..cc7c6069a89a 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -9,7 +9,6 @@ iter(a) next(a) a[0] -a[[0, 1, 2]] a[...] a[:] a.__array__() From c7aae66faf994facdf74f648cd94554f11740a5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 17:34:04 +0000 Subject: [PATCH 0298/1718] MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.6 to 3.29.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/a4e1a019f5e24960714ff6296aee04b736cbc3cf...76621b61decf072c1cee8dd1ce2d2a82d33c17ed) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 59489ea79e65..cbf79b02142b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v3.29.6 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e2508a09bba7..cf637aa947a0 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@a4e1a019f5e24960714ff6296aee04b736cbc3cf # v2.1.27 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 with: sarif_file: results.sarif From e11fea7c7b0567b9b0e5558d295a1cbca4877be7 Mon Sep 17 00:00:00 2001 From: Zhi Li Date: Fri, 8 Aug 2025 16:35:54 -0400 Subject: [PATCH 0299/1718] DOC: Add 'today' string to datetime64 documentation (#29514) --- doc/source/reference/arrays.datetime.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8dbff88c918e..2095dc3aea49 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,7 +53,8 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. +letters, for a "Not A Time" value. The string "today" is also supported and +returns the current UTC date with day precision. .. admonition:: Example @@ -91,6 +92,11 @@ letters, for a "Not A Time" value. >>> np.datetime64('nat') np.datetime64('NaT') + The current date: + + >>> np.datetime64('today') + np.datetime64('2025-08-05') # result will depend on the current date + When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. From 4d7161dc035a2666eb005dbe4256534d8747912e Mon Sep 17 00:00:00 2001 From: Zhi Li Date: Fri, 8 Aug 2025 22:17:44 -0400 Subject: [PATCH 0300/1718] DOC: Add 'now' string to datetime64 documentation [skip azp] [skip actions] [skip cirrus] --- doc/source/reference/arrays.datetime.rst | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 2095dc3aea49..ac402c973fd8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -53,8 +53,13 @@ months ('M'), weeks ('W'), and days ('D'), while the time units are hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and some additional SI-prefix seconds-based units. The `datetime64` data type also accepts the string "NAT", in any combination of lowercase/uppercase -letters, for a "Not A Time" value. The string "today" is also supported and -returns the current UTC date with day precision. +letters, for a "Not A Time" value. The string "now" is also supported and +returns the current UTC time. By default, it uses second ('s') precision, but +you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result +to that precision. Units finer than seconds (such as 'ms' or 'ns') are +supported but will show fractional parts as zeros, effectively truncating to +whole seconds. The string "today" is also supported and returns the current UTC +date with day precision. .. admonition:: Example @@ -92,6 +97,17 @@ returns the current UTC date with day precision. >>> np.datetime64('nat') np.datetime64('NaT') + The current time (UTC, default second precision): + + >>> np.datetime64('now') + np.datetime64('2025-08-05T02:22:14') # result will depend on the current time + + >>> np.datetime64('now', 'D') + np.datetime64('2025-08-05') + + >>> np.datetime64('now', 'ms') + np.datetime64('2025-08-05T02:22:14.000') + The current date: >>> np.datetime64('today') From 0f39fd27ff149b25f74fc58d589c92b4fd881226 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Oct 2024 20:43:03 +0100 Subject: [PATCH 0301/1718] BLD: update licensing metadata to use PEP 639 --- LICENSES_bundled.txt | 36 ------------- meson.build | 9 ++-- .../_core/src/multiarray/dragon4_LICENSE.txt | 27 ++++++++++ pyproject.toml | 52 +++++++++++++++++-- tools/wheels/check_license.py | 2 +- tools/wheels/cibw_before_build.sh | 1 - 6 files changed, 78 insertions(+), 49 deletions(-) delete mode 100644 LICENSES_bundled.txt create mode 100644 numpy/_core/src/multiarray/dragon4_LICENSE.txt diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt deleted file mode 100644 index b3d8aa8bed06..000000000000 --- a/LICENSES_bundled.txt +++ /dev/null @@ -1,36 +0,0 @@ -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/meson.build b/meson.build index 0d436352cbbd..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -1,12 +1,9 @@ project( 'NumPy', 'c', 'cpp', 'cython', - version: run_command( - # This should become `numpy/_version.py` in NumPy 2.0 - ['numpy/_build_utils/gitversion.py'], - check: true).stdout().strip(), - license: 'BSD-3', - meson_version: '>=1.5.2', # version in vendored-meson is 1.5.2 + version: run_command(['numpy/_build_utils/gitversion.py'], check: true).stdout().strip(), + license: 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0', + meson_version: '>=1.8.3', # version in vendored-meson default_options: [ 'buildtype=debugoptimized', 'b_ndebug=if-release', diff --git a/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 000000000000..7bd49e7074a8 --- /dev/null +++ b/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/pyproject.toml b/pyproject.toml index f9cfeadee599..b678be83a486 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,13 @@ [build-system] build-backend = "mesonpy" requires = [ - "meson-python>=0.15.0", + "meson-python>=0.18.0", "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] name = "numpy" version = "2.4.0.dev0" -# TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) -license = {file = "LICENSE.txt"} - description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ @@ -22,7 +19,6 @@ classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', @@ -40,6 +36,52 @@ classifiers = [ 'Operating System :: Unix', 'Operating System :: MacOS', ] +# License info: +# - The main NumPy project license is BSD-3-Clause. +# - The SPDX license expression below reflects installed numpy packages when +# built from source (e.g., with `python -m build --wheel`), with no vendoring. +# - That SPDX expression is therefore incomplete for: +# (a) sdists - see the comment below `license-files` for other licenses +# included in the sdist +# (b) wheels on PyPI - most wheels include vendored libraries with additional licenses: +# - libopenblas : BSD-3-Clause AND BSD-3-Clause-Attribution (all except arm64 macOS>=14) +# - libgfortran : GPL-3.0-with-GCC-exception (all except arm64 macOS>=14) +# - libquadmath : LGPL-2.1-or-later (all except arm64 macOS>=14 and Windows) +# The licenses for these vendored components are dynamically included +# in the build process for PyPI wheels. +license = 'BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0' +license-files = [ + 'LICENSE.txt', # BSD-3-Clause + 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib + 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD + 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT + 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause + 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause + 'numpy/fft/pocketfft/LICENSE.md', # BSD-3-Clause + 'numpy/ma/LICENSE', # BSD-3-Clause + 'numpy/linalg/lapack_lite/LICENSE.txt', # BSD-3-Clause + 'numpy/random/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/distributions/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/mt19937/LICENSE.md', # BSD-3-Clause AND MIT + 'numpy/random/src/pcg64/LICENSE.md', # MIT + 'numpy/random/src/philox/LICENSE.md', # BSD-3-Clause + 'numpy/random/src/sfc64/LICENSE.md', # MIT + 'numpy/random/src/splitmix64/LICENSE.md', # CC0-1.0 +] +# The license files below apply only to files in the repo and sdist, not to +# installed `numpy` packages or wheels (build/doc tools don't affect the +# license of the installed package). We have to make a choice whether to add +# those to the SPDX expression above since PEP 639 is unclear on the +# differences; we choose to make the SPDX expression reflect *a wheel built +# from the sources*. +# '.spin/LICENSE', # BSD-3-Clause +# 'doc/source/_static/scipy-mathjax/LICENSE', # Apache-2.0 +# 'numpy/_build_utils/tempita/LICENSE.txt', # BSD-3-Clause +# 'vendored-meson/meson/COPYING', # Apache-2.0 +# Note that the commented out license files are still included in the sdist, +# just not in Core Metadata and in the .dist-info directory. + [project.scripts] f2py = 'numpy.f2py.f2py2e:main' diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py index 572295b4ca2f..9aa50015d00b 100644 --- a/tools/wheels/check_license.py +++ b/tools/wheels/check_license.py @@ -38,7 +38,7 @@ def main(): distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) # Check license text - license_txt = distinfo_path / "LICENSE.txt" + license_txt = distinfo_path / "licenses" / "LICENSE.txt" with open(license_txt, encoding="utf-8") as f: text = f.read() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index e41e5d37316b..ed2640471fed 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -10,7 +10,6 @@ rm -rf build echo "" >> $PROJECT_DIR/LICENSE.txt echo "----" >> $PROJECT_DIR/LICENSE.txt echo "" >> $PROJECT_DIR/LICENSE.txt -cat $PROJECT_DIR/LICENSES_bundled.txt >> $PROJECT_DIR/LICENSE.txt if [[ $RUNNER_OS == "Linux" ]] ; then cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt elif [[ $RUNNER_OS == "macOS" ]]; then From 7c34da750a1f46de91ac66d34e5feed62de2d018 Mon Sep 17 00:00:00 2001 From: Maryanne Wachter Date: Mon, 11 Aug 2025 13:03:15 -0700 Subject: [PATCH 0302/1718] BUG: left bit shift undefined behavior (#29539) --- numpy/_core/src/common/simd/avx2/arithmetic.h | 6 +++--- numpy/_core/src/common/simd/sse/arithmetic.h | 6 +++--- tools/ci/ubsan_suppressions_x86_64.txt | 3 --- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/common/simd/avx2/arithmetic.h b/numpy/_core/src/common/simd/avx2/arithmetic.h index 58d842a6d3a4..15b9be85dc51 100644 --- a/numpy/_core/src/common/simd/avx2/arithmetic.h +++ b/numpy/_core/src/common/simd/avx2/arithmetic.h @@ -215,9 +215,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m256i q = _mm256_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m256i sigb = npyv_setall_s64(1LL << 63); - q = _mm256_srl_epi64(_mm256_add_epi64(q, sigb), shf1); - q = _mm256_sub_epi64(q, _mm256_srl_epi64(sigb, shf1)); + const __m256i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm256_srl_epi64(_mm256_add_epi64(q, sbit), shf1); + q = _mm256_sub_epi64(q, _mm256_srl_epi64(sbit, shf1)); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm256_sub_epi64(q, asign); diff --git a/numpy/_core/src/common/simd/sse/arithmetic.h b/numpy/_core/src/common/simd/sse/arithmetic.h index 357b136d25cd..b50942ab75ad 100644 --- a/numpy/_core/src/common/simd/sse/arithmetic.h +++ b/numpy/_core/src/common/simd/sse/arithmetic.h @@ -251,9 +251,9 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) // q = (a + mulhi) >> sh __m128i q = _mm_add_epi64(a, mulhi); // emulate arithmetic right shift - const __m128i sigb = npyv_setall_s64(1LL << 63); - q = _mm_srl_epi64(_mm_add_epi64(q, sigb), divisor.val[1]); - q = _mm_sub_epi64(q, _mm_srl_epi64(sigb, divisor.val[1])); + const __m128i sbit = npyv_setall_s64(0x8000000000000000); + q = _mm_srl_epi64(_mm_add_epi64(q, sbit), divisor.val[1]); + q = _mm_sub_epi64(q, _mm_srl_epi64(sbit, divisor.val[1])); // q = q - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign q = _mm_sub_epi64(q, asign); diff --git a/tools/ci/ubsan_suppressions_x86_64.txt b/tools/ci/ubsan_suppressions_x86_64.txt index d9872a691a81..5e4316ce3715 100644 --- a/tools/ci/ubsan_suppressions_x86_64.txt +++ b/tools/ci/ubsan_suppressions_x86_64.txt @@ -11,9 +11,6 @@ signed-integer-overflow:* # otherwise ubsan may detect system file alignment errors outside numpy alignment:* -# suggested fix for runtime error: replace left bit shift with LLONG_MIN constant -shift-base:_core/src/common/simd/sse/arithmetic.h -shift-base:_core/src/common/simd/avx2/arithmetic.h # suggested fix for runtime error: use INT_MIN constant shift-base:_core/src/umath/_rational_tests.c # suggested fix for runtime error: check for overflow if signed From a2f142b6039413dfb43f79aee16dfa3b19c37d1f Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 11 Aug 2025 21:29:06 +0100 Subject: [PATCH 0303/1718] TYP: Type ``MaskedIterator`` (#29526) --- numpy/ma/core.pyi | 85 ++++++++++++++++++++++++--- numpy/typing/tests/data/pass/ma.py | 7 ++- numpy/typing/tests/data/reveal/ma.pyi | 10 +++- 3 files changed, 91 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e99e0a527773..55e5867247bb 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,15 +1,20 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 +import datetime as dt from _typeshed import Incomplete -from collections.abc import Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, + Generic, Literal, Never, NoReturn, Self, + SupportsComplex, + SupportsFloat, SupportsIndex, + SupportsInt, TypeAlias, overload, ) @@ -37,6 +42,7 @@ from numpy import ( dtype, dtypes, expand_dims, + flexible, float16, float32, float64, @@ -82,9 +88,11 @@ from numpy._typing import ( _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _CharLike_co, _DTypeLike, _DTypeLikeBool, _IntLike_co, + _NestedSequence, _ScalarLike_co, _Shape, _ShapeLike, @@ -296,6 +304,12 @@ _MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] _ArrayInt_co: TypeAlias = NDArray[integer | bool_] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + MaskType = bool_ nomask: bool_[Literal[False]] @@ -447,15 +461,68 @@ masked_print_option: _MaskedPrintOption def flatten_structured_array(a): ... -class MaskedIterator: - ma: Any +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] dataiter: Any maskiter: Any - def __init__(self, ma): ... - def __iter__(self): ... - def __getitem__(self, indx): ... - def __setitem__(self, index, value): ... - def __next__(self): ... + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + def __next__(self) -> Any: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any @@ -607,7 +674,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property - def flat(self) -> MaskedIterator: ... + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter def flat(self, value: ArrayLike, /) -> None: ... @property diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 2e2246fb88d2..3ccea66861eb 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -42,7 +42,12 @@ MAR_i.fill_value = 0 -MAR_f.flat = [9] +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) MAR_b[MAR_i > 0] = False MAR_i[:] = 1 diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a09fb2d75997..bd4e4e98373a 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -462,7 +462,15 @@ assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) -assert_type(MAR_f8.flat, np.ma.core.MaskedIterator) +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) def invalid_resize() -> None: assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] From dbae155b78e2dd47dbf3697d40732ddc75bac1ea Mon Sep 17 00:00:00 2001 From: MyUserNameWasTakenLinux Date: Mon, 11 Aug 2025 15:36:40 -0600 Subject: [PATCH 0304/1718] STY: fix typo in dtypemeta.c [skip azp][skip actions] --- numpy/_core/src/multiarray/dtypemeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 5ac7ef3b2320..293d7dfee1b3 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1398,7 +1398,7 @@ PyArray_DTypeMeta *_Void_dtype = NULL; /*NUMPY_API - * Fetch the ArrFuncs struct which new lives on the DType and not the + * Fetch the ArrFuncs struct which now lives on the DType and not the * descriptor. Use of this struct should be avoided but remains necessary * for certain functionality. * From 05dbc23b5245b4f0e6fec7ef2329cffb85d3c62e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 01:44:01 +0000 Subject: [PATCH 0305/1718] MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 6 +++--- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 6 +++--- .github/workflows/windows.yml | 6 +++--- 15 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..267d4c5b768f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index dc65449ba752..e91468b85068 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -26,7 +26,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -82,7 +82,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -106,7 +106,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 174d04efb567..9d95da102fee 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -18,7 +18,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 5036a94ce399..158a826825f1 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 8269a8dcd705..a1d5b399988e 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -43,7 +43,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 7ac5ea673d61..40e372cbf3e4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -33,7 +33,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-depth: 0 @@ -60,7 +60,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -75,7 +75,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -124,7 +124,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -199,7 +199,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-22.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -241,7 +241,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -280,7 +280,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -315,13 +315,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -353,7 +353,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 54d217cc12fb..633de1fbb84c 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -65,7 +65,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -194,7 +194,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -223,7 +223,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -260,7 +260,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -285,7 +285,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -349,7 +349,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -386,7 +386,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 84e221439aa6..53780ae097a3 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -91,7 +91,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -198,7 +198,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..dcc689739d20 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -176,7 +176,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -191,7 +191,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -242,7 +242,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8931b76c5dee..da0972678b15 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -29,7 +29,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -118,7 +118,7 @@ jobs: version: ["3.11", "3.14t-dev"] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 81fa57239b9b..e362a29f628a 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -50,7 +50,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index bfbf34fa7817..544c568a522d 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -24,7 +24,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50fb7a12f432 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v3.1.0 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 4fcd8a58f53a..e87af415d3c5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -49,7 +49,7 @@ jobs: message: ${{ steps.commit_message.outputs.message }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 # Gets the correct commit message for pull request with: ref: ${{ github.event.pull_request.head.sha }} @@ -115,7 +115,7 @@ jobs: IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false @@ -245,7 +245,7 @@ jobs: # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} steps: - name: Checkout numpy - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e723b787a5de..dba4f74496b2 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -99,7 +99,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true @@ -173,7 +173,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true From 8276cb4cb3f3031bad510a776b6be8b3ffc1695f Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Tue, 12 Aug 2025 19:18:07 +0900 Subject: [PATCH 0306/1718] fix: File exists error --- tools/ci/check_c_api_usage.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh index 9f4203284823..b04410d7667d 100644 --- a/tools/ci/check_c_api_usage.sh +++ b/tools/ci/check_c_api_usage.sh @@ -24,7 +24,7 @@ echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." # Prepare a result file mkdir -p .tmp -OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX.txt) +OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX) echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT FAIL=0 From bb198ee93eb15afd80b2706d1b9b483edaf4bf55 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 19:59:18 +0000 Subject: [PATCH 0307/1718] MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.8 to 3.29.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/76621b61decf072c1cee8dd1ce2d2a82d33c17ed...df559355d593797519d70b90fc8edd5db049e7a2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbf79b02142b..61d9c6ec2248 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.8 + uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cf637aa947a0..50871457563e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v2.1.27 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v2.1.27 with: sarif_file: results.sarif From 7683d859f6e81735d66289bd1a8a3d3b84186d67 Mon Sep 17 00:00:00 2001 From: Leonardo Paredes <49594379+phdparedes@users.noreply.github.com> Date: Wed, 13 Aug 2025 07:25:51 -0700 Subject: [PATCH 0308/1718] BUG: allow `MaskedArray.fill_value` be a string when `dtype=StringDType` (#29423) * BUG: allow ma.MaskedArray.fill_value be a string when dtype=StringDType This fix adds the StringDType data type character code 'T' to the list of accepted data types that can use a 'fill_value' as string. Currently just accepts None. "See #29421" Issue. * TST: add tests for StringDType MaskedArray use * TST: add tests for fill_value in StringDType MaskedArray * TST: fix format in the test functions added * Add default fill value 'N/A' for StringDType * TST: Add tests for StringDType masked array default fill and slicing * DOC: add release note for StringDType fill_value support (gh-29423) --------- Co-authored-by: Nathan Goldbaum --- .../upcoming_changes/29423.new_feature.rst | 7 ++ numpy/ma/core.py | 26 ++++--- numpy/ma/tests/test_core.py | 77 +++++++++++++++++-- 3 files changed, 93 insertions(+), 17 deletions(-) create mode 100644 doc/release/upcoming_changes/29423.new_feature.rst diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst new file mode 100644 index 000000000000..7e83604b0049 --- /dev/null +++ b/doc/release/upcoming_changes/29423.new_feature.rst @@ -0,0 +1,7 @@ +``StringDType`` fill_value support in `numpy.ma.MaskedArray` +------------------------------------------------------------ +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when +using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing +and views. The default is ``'N/A'`` and may be overridden by any valid string. +This fixes issue `gh‑29421 `__ and was +implemented in pull request `gh‑29423 `__. diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9b705460dd85..621dbd94640b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -181,7 +181,8 @@ class MaskError(MAError): 'S': b'N/A', 'u': 999999, 'V': b'???', - 'U': 'N/A' + 'U': 'N/A', + 'T': 'N/A' } # Add datetime64 and timedelta64 types @@ -264,16 +265,17 @@ def default_fill_value(obj): The default filling value depends on the datatype of the input array or the type of the input scalar: - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. @@ -498,7 +500,7 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) - elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 5327963999e0..bdef61dac3ba 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1810,7 +1810,7 @@ def test_eq_ne_structured_extra(self): el_by_el = [m1[name] != m2[name] for name in dt.names] assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_eq_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1842,7 +1842,7 @@ def test_eq_for_strings(self, dt, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) - @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('fill', [None, 'A']) def test_ne_for_strings(self, dt, fill): # Test the equality of structured arrays @@ -1992,15 +1992,23 @@ def test_comparisons_for_numeric(self, op, dt1, dt2, fill): assert_equal(test.mask, [True, False]) assert_(test.fill_value == True) + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) @pytest.mark.parametrize('op', [operator.le, operator.lt, operator.ge, operator.gt]) @pytest.mark.parametrize('fill', [None, "N/A"]) - def test_comparisons_strings(self, op, fill): + def test_comparisons_strings(self, dt, op, fill): # See gh-21770, mask propagation is broken for strings (and some other # cases) so we explicitly test strings here. # In principle only == and != may need special handling... - ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) - ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") @@ -5692,6 +5700,65 @@ def test_default_fill_value_complex(): assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + def test_ufunc_with_output(): # check that giving an output argument always returns that output. # Regression test for gh-8416. From 87e208c900ef0ac46bb3ea861c811f94f478698f Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Thu, 14 Aug 2025 09:20:12 -0400 Subject: [PATCH 0309/1718] BUG: Fix metadata not roundtripping when pickling datetime (#29555) * BUG: fix metadata not roundtripping for datetime pickles * STYLE: Remove unnecessary newline * REF: Simplify metadata handling in arraydescr_setstate function * TST: add test to ensure datetime dtype metadata round-trips on pickling * REF: clearer operation order * TST: add new regression test (gh-29555) * BUG: don't steal reference to Py_None * REF: move metadata variable declarations below for clarity --- numpy/_core/src/multiarray/descriptor.c | 44 ++++++++++++------------- numpy/_core/tests/test_datetime.py | 9 +++++ 2 files changed, 31 insertions(+), 22 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 135deee83c97..852a2c768948 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2671,8 +2671,10 @@ _get_pickleabletype_from_datetime_metadata(PyArray_Descr *dtype) if (dtype->metadata != NULL) { Py_INCREF(dtype->metadata); PyTuple_SET_ITEM(ret, 0, dtype->metadata); - } else { - PyTuple_SET_ITEM(ret, 0, PyDict_New()); + } + else { + PyTuple_SET_ITEM(ret, 0, Py_None); + Py_INCREF(Py_None); } /* Convert the datetime metadata into a tuple */ @@ -3197,16 +3199,8 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) self->flags = _descr_find_object((PyArray_Descr *)self); } - /* - * We have a borrowed reference to metadata so no need - * to alter reference count when throwing away Py_None. - */ - if (metadata == Py_None) { - metadata = NULL; - } - - if (PyDataType_ISDATETIME(self) && (metadata != NULL)) { - PyObject *old_metadata; + PyObject *old_metadata, *new_metadata; + if (PyDataType_ISDATETIME(self)) { PyArray_DatetimeMetaData temp_dt_data; if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) { @@ -3223,20 +3217,26 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) return NULL; } - old_metadata = self->metadata; - self->metadata = PyTuple_GET_ITEM(metadata, 0); + new_metadata = PyTuple_GET_ITEM(metadata, 0); memcpy((char *) &((PyArray_DatetimeDTypeMetaData *)self->c_metadata)->meta, - (char *) &temp_dt_data, - sizeof(PyArray_DatetimeMetaData)); - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + (char *) &temp_dt_data, + sizeof(PyArray_DatetimeMetaData)); } else { - PyObject *old_metadata = self->metadata; - self->metadata = metadata; - Py_XINCREF(self->metadata); - Py_XDECREF(old_metadata); + new_metadata = metadata; + } + + old_metadata = self->metadata; + /* + * We have a borrowed reference to metadata so no need + * to alter reference count when throwing away Py_None. + */ + if (new_metadata == Py_None) { + new_metadata = NULL; } + self->metadata = new_metadata; + Py_XINCREF(new_metadata); + Py_XDECREF(old_metadata); Py_RETURN_NONE; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 70b55c500f3d..21151ed11ba0 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -889,6 +889,15 @@ def test_pickle(self): b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None + def test_setstate(self): "Verify that datetime dtype __setstate__ can handle bad arguments" dt = np.dtype('>M8[us]') From 8b89d0f127eb76c8f84c14b4814a18814f3bdec7 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 14 Aug 2025 23:07:19 +0100 Subject: [PATCH 0310/1718] maint: Use double quotes in .pyi files (#29548) --- numpy/__init__.pyi | 180 +++++++++--------- numpy/_array_api_info.pyi | 2 +- numpy/_core/numerictypes.pyi | 18 +- numpy/ctypeslib/_ctypeslib.pyi | 12 +- numpy/dtypes.pyi | 66 +++---- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_type_check_impl.pyi | 44 ++--- numpy/ma/core.pyi | 22 +-- .../tests/data/fail/array_constructors.pyi | 10 +- numpy/typing/tests/data/fail/flatiter.pyi | 2 +- numpy/typing/tests/data/fail/fromnumeric.pyi | 4 +- numpy/typing/tests/data/fail/ma.pyi | 50 ++--- numpy/typing/tests/data/fail/scalars.pyi | 10 +- .../tests/data/reveal/array_constructors.pyi | 18 +- numpy/typing/tests/data/reveal/arrayprint.pyi | 2 +- numpy/typing/tests/data/reveal/getlimits.pyi | 4 +- .../tests/data/reveal/lib_function_base.pyi | 4 +- numpy/typing/tests/data/reveal/ma.pyi | 28 +-- .../typing/tests/data/reveal/numerictypes.pyi | 2 +- .../tests/data/reveal/polynomial_polybase.pyi | 14 +- numpy/typing/tests/data/reveal/scalars.pyi | 2 +- numpy/typing/tests/data/reveal/testing.pyi | 2 +- numpy/version.pyi | 12 +- ruff.toml | 3 + 24 files changed, 258 insertions(+), 255 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 643327f29d1b..bf9ed8c9b802 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4841,96 +4841,96 @@ class ufunc: ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` -absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] -add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] -arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] -arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] -arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] -arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] -arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] -arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] -arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] -bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] -bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] -bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] -cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] -ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] -copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] -cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] -cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] -deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] -degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] -divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] -equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] -exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] -exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] -expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] -fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] -float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] -floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] -floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] -fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] -fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] -fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] -frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] -gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] -greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] -greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] -heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] -hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] -invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] -isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] -isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] -isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] -isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] -lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] -ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] -left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] -less: _UFunc_Nin2_Nout1[L['less'], L[23], None] -less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] -log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] -log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] -log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] -log: _UFunc_Nin1_Nout1[L['log'], L[10], None] -logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] -logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] -logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] -logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] -logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] -logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] -matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] -maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] -minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] -multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] -negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] -nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] -not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] -positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] -power: _UFunc_Nin2_Nout1[L['power'], L[18], None] -rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] -radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] -reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] -remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] -right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] -rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] -sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] -signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] -sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] -sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] -spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] -sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] -square: _UFunc_Nin1_Nout1[L['square'], L[18], None] -subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] -tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] -tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] -vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index ee9f8a5660c3..b4592ba2c2ee 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -118,7 +118,7 @@ _EmptyDict: TypeAlias = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal['numpy']] + __module__: ClassVar[Literal["numpy"]] def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 4f810da6904b..26ed4b9d9524 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -144,15 +144,15 @@ __all__ = [ @type_check_only class _TypeCodes(TypedDict): - Character: L['c'] - Integer: L['bhilqnp'] - UnsignedInteger: L['BHILQNP'] - Float: L['efdg'] - Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQnNpP'] - AllFloat: L['efdgFDG'] - Datetime: L['Mm'] - All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 996b2c0be388..e10e9fb51ab7 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -60,12 +60,12 @@ _DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) _ScalarT = TypeVar("_ScalarT", bound=generic) _FlagsKind: TypeAlias = L[ - 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', - 'F_CONTIGUOUS', 'FORTRAN', 'F', - 'ALIGNED', 'A', - 'WRITEABLE', 'W', - 'OWNDATA', 'O', - 'WRITEBACKIFCOPY', 'X', + "C_CONTIGUOUS", "CONTIGUOUS", "C", + "F_CONTIGUOUS", "FORTRAN", "F", + "ALIGNED", "A", + "WRITEABLE", "W", + "OWNDATA", "O", + "WRITEBACKIFCOPY", "X", ] # TODO: Add a shape typevar once we have variadic typevars (PEP 646) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 36844a90d31f..3e34113edd4f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -17,39 +17,39 @@ from typing_extensions import TypeVar import numpy as np __all__ = [ # noqa: RUF022 - 'BoolDType', - 'Int8DType', - 'ByteDType', - 'UInt8DType', - 'UByteDType', - 'Int16DType', - 'ShortDType', - 'UInt16DType', - 'UShortDType', - 'Int32DType', - 'IntDType', - 'UInt32DType', - 'UIntDType', - 'Int64DType', - 'LongDType', - 'UInt64DType', - 'ULongDType', - 'LongLongDType', - 'ULongLongDType', - 'Float16DType', - 'Float32DType', - 'Float64DType', - 'LongDoubleDType', - 'Complex64DType', - 'Complex128DType', - 'CLongDoubleDType', - 'ObjectDType', - 'BytesDType', - 'StrDType', - 'VoidDType', - 'DateTime64DType', - 'TimeDelta64DType', - 'StringDType', + "BoolDType", + "Int8DType", + "ByteDType", + "UInt8DType", + "UByteDType", + "Int16DType", + "ShortDType", + "UInt16DType", + "UShortDType", + "Int32DType", + "IntDType", + "UInt32DType", + "UIntDType", + "Int64DType", + "LongDType", + "UInt64DType", + "ULongDType", + "LongLongDType", + "ULongLongDType", + "Float16DType", + "Float32DType", + "Float64DType", + "LongDoubleDType", + "Complex64DType", + "Complex128DType", + "CLongDoubleDType", + "ObjectDType", + "BytesDType", + "StrDType", + "VoidDType", + "DateTime64DType", + "TimeDelta64DType", + "StringDType", ] # Helper base classes (typing-only) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 14e48f1cc3fd..41d6204cd684 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -107,7 +107,7 @@ _ScalarT2 = TypeVar("_ScalarT2", bound=generic) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray) _2Tuple: TypeAlias = tuple[_T, _T] -_MeshgridIdx: TypeAlias = L['ij', 'xy'] +_MeshgridIdx: TypeAlias = L["ij", "xy"] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 5e98ad22ca8b..8b665cd9a400 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -154,49 +154,49 @@ def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # @overload -def typename(char: L['S1']) -> L['character']: ... +def typename(char: L["S1"]) -> L["character"]: ... @overload -def typename(char: L['?']) -> L['bool']: ... +def typename(char: L["?"]) -> L["bool"]: ... @overload -def typename(char: L['b']) -> L['signed char']: ... +def typename(char: L["b"]) -> L["signed char"]: ... @overload -def typename(char: L['B']) -> L['unsigned char']: ... +def typename(char: L["B"]) -> L["unsigned char"]: ... @overload -def typename(char: L['h']) -> L['short']: ... +def typename(char: L["h"]) -> L["short"]: ... @overload -def typename(char: L['H']) -> L['unsigned short']: ... +def typename(char: L["H"]) -> L["unsigned short"]: ... @overload -def typename(char: L['i']) -> L['integer']: ... +def typename(char: L["i"]) -> L["integer"]: ... @overload -def typename(char: L['I']) -> L['unsigned integer']: ... +def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload -def typename(char: L['l']) -> L['long integer']: ... +def typename(char: L["l"]) -> L["long integer"]: ... @overload -def typename(char: L['L']) -> L['unsigned long integer']: ... +def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload -def typename(char: L['q']) -> L['long long integer']: ... +def typename(char: L["q"]) -> L["long long integer"]: ... @overload -def typename(char: L['Q']) -> L['unsigned long long integer']: ... +def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload -def typename(char: L['f']) -> L['single precision']: ... +def typename(char: L["f"]) -> L["single precision"]: ... @overload -def typename(char: L['d']) -> L['double precision']: ... +def typename(char: L["d"]) -> L["double precision"]: ... @overload -def typename(char: L['g']) -> L['long precision']: ... +def typename(char: L["g"]) -> L["long precision"]: ... @overload -def typename(char: L['F']) -> L['complex single precision']: ... +def typename(char: L["F"]) -> L["complex single precision"]: ... @overload -def typename(char: L['D']) -> L['complex double precision']: ... +def typename(char: L["D"]) -> L["complex double precision"]: ... @overload -def typename(char: L['G']) -> L['complex long double precision']: ... +def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload -def typename(char: L['S']) -> L['string']: ... +def typename(char: L["S"]) -> L["string"]: ... @overload -def typename(char: L['U']) -> L['unicode']: ... +def typename(char: L["U"]) -> L["unicode"]: ... @overload -def typename(char: L['V']) -> L['void']: ... +def typename(char: L["V"]) -> L["void"]: ... @overload -def typename(char: L['O']) -> L['object']: ... +def typename(char: L["O"]) -> L["object"]: ... # NOTE: The [overload-overlap] mypy errors are false positives @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 55e5867247bb..a033e735d3f5 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1958,7 +1958,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' + mode: _ModeKind = "raise" ) -> _ScalarT: ... @overload def take( @@ -1966,7 +1966,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[_ScalarT]: ... @overload def take( @@ -1974,7 +1974,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( @@ -1983,7 +1983,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... copy: Any @@ -2326,7 +2326,7 @@ def take( indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = 'raise' + mode: _ModeKind = "raise" ) -> _ScalarT: ... @overload def take( @@ -2334,7 +2334,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[_ScalarT]: ... @overload def take( @@ -2342,7 +2342,7 @@ def take( indices: _IntLike_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( @@ -2350,7 +2350,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload def take( @@ -2358,7 +2358,7 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( @@ -2367,7 +2367,7 @@ def take( axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = 'raise', + mode: _ModeKind = "raise", ) -> _ArrayT: ... def power(a, b, third=...): ... @@ -2402,7 +2402,7 @@ def concatenate(arrays, axis=...): ... def diag(v, k=...): ... def left_shift(a, n): ... def right_shift(a, n): ... -def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = 'raise') -> None: ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... def transpose(a, axes=...): ... def reshape(a, new_shape, order=...): ... diff --git a/numpy/typing/tests/data/fail/array_constructors.pyi b/numpy/typing/tests/data/fail/array_constructors.pyi index cadc2ae595e7..6ed619958c1c 100644 --- a/numpy/typing/tests/data/fail/array_constructors.pyi +++ b/numpy/typing/tests/data/fail/array_constructors.pyi @@ -15,17 +15,17 @@ np.ones() # type: ignore[call-overload] np.array(0, float, True) # type: ignore[call-overload] -np.linspace(None, 'bob') # type: ignore[call-overload] +np.linspace(None, "bob") # type: ignore[call-overload] np.linspace(0, 2, num=10.0) # type: ignore[call-overload] -np.linspace(0, 2, endpoint='True') # type: ignore[call-overload] -np.linspace(0, 2, retstep=b'False') # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] np.linspace(0, 2, dtype=0) # type: ignore[call-overload] np.linspace(0, 2, axis=None) # type: ignore[call-overload] -np.logspace(None, 'bob') # type: ignore[call-overload] +np.logspace(None, "bob") # type: ignore[call-overload] np.logspace(0, 2, base=None) # type: ignore[call-overload] -np.geomspace(None, 'bob') # type: ignore[call-overload] +np.geomspace(None, "bob") # type: ignore[call-overload] np.stack(generator) # type: ignore[call-overload] np.hstack({1, 2}) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index cdf2a8b78b7b..be63a082535d 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -10,7 +10,7 @@ supports_array: npt._SupportsArray[np.dtype[np.float64]] a.base = object() # type: ignore[assignment, misc] a.coords = object() # type: ignore[assignment, misc] a.index = object() # type: ignore[assignment, misc] -a.copy(order='C') # type: ignore[call-arg] +a.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index d0a81f0bfbfe..92b0cb366207 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -137,12 +137,12 @@ np.mean(AR_M) # type: ignore[arg-type] np.std(a, axis=1.0) # type: ignore[call-overload] np.std(a, out=False) # type: ignore[call-overload] -np.std(a, ddof='test') # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] np.std(a, keepdims=1.0) # type: ignore[call-overload] np.std(AR_U) # type: ignore[arg-type] np.var(a, axis=1.0) # type: ignore[call-overload] np.var(a, out=False) # type: ignore[call-overload] -np.var(a, ddof='test') # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] np.var(a, keepdims=1.0) # type: ignore[call-overload] np.var(AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index b5921660d617..084ae971bdd0 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -84,11 +84,11 @@ MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] -MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] -MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] @@ -100,46 +100,46 @@ np.ma.take(axis=1.0) # type: ignore[call-overload] np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] -MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] -MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] -np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] -np.ma.size(AR_b, axis='0') # type: ignore[arg-type] +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] -MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] -MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] -MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] -np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] -np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol='.5') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] -MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] MAR_b *= 2 # type: ignore[arg-type] MAR_c //= 2 # type: ignore[misc] @@ -149,7 +149,7 @@ MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] -np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask='yes') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] -np.ma.MaskedArray(np.array([1, 2, 3]), order='Corinthian') # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 0560a855a80f..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -57,15 +57,15 @@ np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] np.uint64(value=0) # type: ignore[call-arg] np.complex128(value=0.0j) # type: ignore[call-overload] -np.str_(value='bob') # type: ignore[call-overload] -np.bytes_(value=b'test') # type: ignore[call-overload] -np.void(value=b'test') # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] np.bool(value=True) # type: ignore[call-overload] np.datetime64(value="2019") # type: ignore[call-overload] np.timedelta64(value=0) # type: ignore[call-overload] -np.bytes_(b"hello", encoding='utf-8') # type: ignore[call-overload] -np.str_("hello", encoding='utf-8') # type: ignore[call-overload] +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] f8.item(1) # type: ignore[call-overload] f8.item((0, 1)) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 99073ac4543a..e95e85093f6a 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -29,14 +29,14 @@ assert_type(np.empty_like(A), npt.NDArray[np.float64]) assert_type(np.empty_like(B), SubClass[np.float64]) assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) assert_type(np.array(B, subok=True), SubClass[np.float64]) @@ -49,12 +49,12 @@ assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) @@ -65,32 +65,32 @@ assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index de4077654bea..17e175edc2b7 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -12,7 +12,7 @@ func_int: Callable[[np.integer], str] assert_type(np.get_printoptions(), _FormatOptions) assert_type( - np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), str, ) assert_type(np.format_float_scientific(1.0), str) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 825daba43064..3a1157121750 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -17,7 +17,7 @@ iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo('f2'), np.finfo[np.floating]) +assert_type(np.finfo("f2"), np.finfo[np.floating]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -41,7 +41,7 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo('i2'), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[Any]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) assert_type(iinfo_i8.kind, LiteralString) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 3ce8d375201b..06096f5a7749 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -73,8 +73,8 @@ assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index bd4e4e98373a..a6857ef0a3dd 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -188,7 +188,7 @@ assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f4.sort(), None) -assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) @@ -214,10 +214,10 @@ assert_type(np.ma.take([1], [0]), MaskedArray[Any]) assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) assert_type(MAR_f4.partition(1), None) -assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -311,22 +311,22 @@ assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) assert_type(MAR_f4.put(4, 999), None) -assert_type(MAR_f4.put(4, 999, mode='clip'), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) assert_type(np.ma.put(MAR_f4, 4, 999), None) -assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) -assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) assert_type(MAR_i8.filled(), NDArray[np.int64]) assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) -assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) # PyRight detects this one correctly, but mypy doesn't. # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 @@ -345,7 +345,7 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) -assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # PyRight detects this one correctly, but mypy doesn't: @@ -381,8 +381,8 @@ assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) assert_type(MAR_2d_f4[0, 0], Any) assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) -assert_type(MAR_2d_V['field_0'], np.ma.MaskedArray[tuple[int, int], np.dtype]) -assert_type(MAR_2d_V[['field_0', 'field_1']], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) assert_type(np.ma.nomask, np.bool[Literal[False]]) assert_type(np.ma.MaskType, type[np.bool]) @@ -406,7 +406,7 @@ assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], n assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) -assert_type(MAR_2d_f4.anom(0, 'float16'), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) assert_type(MAR_i8.fill_value, np.int64) @@ -451,15 +451,15 @@ assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32] assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) -assert_type(MAR_f8.view(dtype='float32'), MaskedArray[Any]) -assert_type(MAR_f8.view(dtype='float32', type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) -assert_type(MAR_f8.argsort(axis=0, kind='heap', order=('x', 'y')), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 4a3e02c9afa6..784167467532 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -48,4 +48,4 @@ assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) -assert_type(np.sctypeDict['uint8'], type[np.generic]) +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 11265b92ff67..2870d50310a2 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -66,11 +66,11 @@ PS_all: ( # static- and classmethods assert_type(type(PS_poly).basis_name, None) -assert_type(type(PS_cheb).basis_name, L['T']) -assert_type(type(PS_herm).basis_name, L['H']) -assert_type(type(PS_herme).basis_name, L['He']) -assert_type(type(PS_lag).basis_name, L['L']) -assert_type(type(PS_leg).basis_name, L['P']) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) assert_type(type(PS_all).__hash__, None) assert_type(type(PS_all).__array_ufunc__, None) @@ -90,10 +90,10 @@ assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) assert_type(type(PS_poly).identity(), npp.Polynomial) -assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) -assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index d7b277735c7c..67444e33dfc3 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -42,7 +42,7 @@ assert_type(c8.dtype, np.dtype[np.complex64]) assert_type(c8.real, np.float32) assert_type(c16.imag, np.float64) -assert_type(np.str_('foo'), np.str_) +assert_type(np.str_("foo"), np.str_) assert_type(V[0], Any) assert_type(V["field1"], Any) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index d70bc971c15f..52d9ef6b3a5d 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -90,7 +90,7 @@ assert_type(np.testing.assert_equal({1}, {1}), None) assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) -assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) diff --git a/numpy/version.pyi b/numpy/version.pyi index 113cde3f5621..b3284d7608b0 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,12 +1,12 @@ from typing import Final, LiteralString __all__ = ( - '__version__', - 'full_version', - 'git_revision', - 'release', - 'short_version', - 'version', + "__version__", + "full_version", + "git_revision", + "release", + "short_version", + "version", ) version: Final[LiteralString] diff --git a/ruff.toml b/ruff.toml index b3cfea15b190..124f343bc061 100644 --- a/ruff.toml +++ b/ruff.toml @@ -37,6 +37,7 @@ extend-select = [ "PGH", "PLE", "UP", + "Q", ] ignore = [ "B006", # Do not use mutable data structures for argument defaults @@ -97,6 +98,8 @@ ignore = [ "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream "numpy/typing/tests/data/*" = ["B015", "B018", "E501"] +# too disruptive to enable all at once +"**/*.py" = ["Q"] "__init__.py" = ["F401", "F403", "F405"] "__init__.pyi" = ["F401"] From 7545bc7fdf90548a296900b89de2b14e2d19250c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 15 Aug 2025 01:03:51 +0200 Subject: [PATCH 0311/1718] STY: ruff rule name comments --- ruff.toml | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/ruff.toml b/ruff.toml index 124f343bc061..e0f0dd98872e 100644 --- a/ruff.toml +++ b/ruff.toml @@ -21,25 +21,26 @@ line-ending = "lf" [lint] preview = true extend-select = [ - "B", - "C4", - "ISC", - "LOG", - "G", - "PIE", - "TID", - "FLY", - "I", - "PD", - "PERF", - "E", - "W", - "PGH", - "PLE", - "UP", - "Q", + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "ISC", # flake8-implicit-str-concat + "LOG", # flake8-logging + "G", # flake8-logging-format + "PIE", # flake8-pie + "Q", # flake8-quotes + "TID", # flake8-tidy-imports + "FLY", # flynt + "I", # isort + "PD", # pandas-vet + "PERF", # perflint + "E", # pycodestyle/error + "W", # pycodestyle/warning + "PGH", # pygrep-hooks + "PLE", # pylint/error + "UP", # pyupgrade ] ignore = [ + # flake8-bugbear "B006", # Do not use mutable data structures for argument defaults "B007", # Loop control variable not used within loop body "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` @@ -47,11 +48,17 @@ ignore = [ "B028", # No explicit `stacklevel` keyword argument found "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling "B905", #`zip()` without an explicit `strict=` parameter + # flake8-comprehensions "C408", # Unnecessary `dict()` call (rewrite as a literal) + # flake8-implicit-str-concat "ISC002", # Implicitly concatenated string literals over multiple lines + # flake8-pie "PIE790", # Unnecessary `pass` statement + # pandas-vet "PD901", # Avoid using the generic variable name `df` for DataFrames + # perflint "PERF401", # Use a list comprehension to create a transformed list + # pycodestyle/error "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment @@ -61,10 +68,12 @@ ignore = [ "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check "E731", # Do not assign a `lambda` expression, use a `def` "E741", # Ambiguous variable name + # pyflakes "F403", # `from ... import *` used; unable to detect undefined names "F405", # may be undefined, or defined from star imports "F821", # Undefined name "F841", # Local variable is assigned to but never used + # pyupgrade "UP015" , # Unnecessary mode argument "UP031", # TODO: Use format specifiers instead of percent format ] From 4e00e4df15528323f6d76bbd40157b582861af86 Mon Sep 17 00:00:00 2001 From: kibitzing Date: Sat, 16 Aug 2025 21:20:52 +0900 Subject: [PATCH 0312/1718] ENH: Add ndmax parameter to np.array to control recursion depth (#29569) * ENH: add ndmax parameter to np.array * ENH: validate ndmax argument is positive * TST: add ndmax tests for array creation * ENH: allow np.array with ndmax=0 to create 0-D array * MNT: simplify ndmax validation and error message * MNT: improve consistency in error message formatting * DOC: add comment explaining legacy behavior of max_depth=0 * TST: update tests to reflect new ndmax validation and error message * DOC: add documentation and examples for np.array ndmax parameter * DOC: add release note for numpy.array ndmax parameter --- .../upcoming_changes/29569.new_feature.rst | 27 +++++++ numpy/_core/_add_newdocs.py | 26 ++++++- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/ctors.c | 19 ++++- numpy/_core/src/multiarray/multiarraymodule.c | 28 ++++--- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/tests/test_multiarray.py | 73 +++++++++++++++++++ 7 files changed, 162 insertions(+), 15 deletions(-) create mode 100644 doc/release/upcoming_changes/29569.new_feature.rst diff --git a/doc/release/upcoming_changes/29569.new_feature.rst b/doc/release/upcoming_changes/29569.new_feature.rst new file mode 100644 index 000000000000..ac014c07c7a0 --- /dev/null +++ b/doc/release/upcoming_changes/29569.new_feature.rst @@ -0,0 +1,27 @@ +``ndmax`` option for `numpy.array` +---------------------------------------------------- +The ``ndmax`` option is now available for `numpy.array`. +It explicitly limits the maximum number of dimensions created from nested sequences. + +This is particularly useful when creating arrays of list-like objects with ``dtype=object``. +By default, NumPy recurses through all nesting levels to create the highest possible +dimensional array, but this behavior may not be desired when the intent is to preserve +nested structures as objects. The ``ndmax`` parameter provides explicit control over +this recursion depth. + +.. code-block:: python + + # Default behavior: Creates a 2D array + >>> a = np.array([[1, 2], [3, 4]], dtype=object) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + # With ndmax=1: Creates a 1D array + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index ed8cf50ee360..e3009a490bd3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -806,7 +806,7 @@ add_newdoc('numpy._core.multiarray', 'array', """ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - like=None) + ndmax=None, like=None) Create an array. @@ -855,6 +855,15 @@ Specifies the minimum number of dimensions that the resulting array should have. Ones will be prepended to the shape as needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default, NumPy recurses through all + nesting levels (up to the compile-time constant ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=object`` is required. + + .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -926,6 +935,21 @@ matrix([[1, 2], [3, 4]]) + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + """) add_newdoc('numpy._core.multiarray', 'asarray', diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 496173038954..10dc83ac657f 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, 0, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &item->scalar_input); if (item->array == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 498fa78118b3..38da6f314848 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1508,6 +1508,16 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, return NULL; } + /* + * The internal implementation treats 0 as actually wanting a zero-dimensional + * array, but the API for this function has typically treated it as + * "anything is fine", so convert here. + * TODO: should we use another value as a placeholder instead? + */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, @@ -1563,7 +1573,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, Py_BEGIN_CRITICAL_SECTION(op); ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); if (ndim < 0) { @@ -1583,7 +1593,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, npy_free_coercion_cache(cache); goto cleanup; } - if (max_depth != 0 && ndim > max_depth) { + if (ndim > max_depth && (in_DType == NULL || in_DType->type_num != NPY_OBJECT)) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); npy_free_coercion_cache(cache); @@ -1798,6 +1808,11 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, return NULL; } + /* See comment in PyArray_FromAny for rationale */ + if (max_depth == 0 || max_depth > NPY_MAXDIMS) { + max_depth = NPY_MAXDIMS; + } + PyObject* ret = PyArray_CheckFromAny_int( op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, context); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3d82e6c7f448..a7fdf3efba17 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1560,7 +1560,7 @@ _prepend_ones(PyArrayObject *arr, int nd, int ndmin, NPY_ORDER order) static inline PyObject * _array_fromobject_generic( PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, - NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin) + NPY_COPYMODE copy, NPY_ORDER order, npy_bool subok, int ndmin, int ndmax) { PyArrayObject *oparr = NULL, *ret = NULL; PyArray_Descr *oldtype = NULL; @@ -1570,10 +1570,9 @@ _array_fromobject_generic( Py_XINCREF(in_descr); PyArray_Descr *dtype = in_descr; - if (ndmin > NPY_MAXDIMS) { + if (ndmin > ndmax) { PyErr_Format(PyExc_ValueError, - "ndmin bigger than allowable number of dimensions " - "NPY_MAXDIMS (=%d)", NPY_MAXDIMS); + "ndmin must be <= ndmax (%d)", ndmax); goto finish; } /* fast exit if simple call */ @@ -1682,7 +1681,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, 0, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags, NULL); finish: Py_XDECREF(dtype); @@ -1713,6 +1712,7 @@ array_array(PyObject *NPY_UNUSED(ignored), npy_bool subok = NPY_FALSE; NPY_COPYMODE copy = NPY_COPY_ALWAYS; int ndmin = 0; + int ndmax = NPY_MAXDIMS; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; PyObject *like = Py_None; @@ -1726,6 +1726,7 @@ array_array(PyObject *NPY_UNUSED(ignored), "$order", &PyArray_OrderConverter, &order, "$subok", &PyArray_BoolConverter, &subok, "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, + "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1747,8 +1748,15 @@ array_array(PyObject *NPY_UNUSED(ignored), op = args[0]; } + if (ndmax > NPY_MAXDIMS || ndmax < 0) { + PyErr_Format(PyExc_ValueError, "ndmax must be in the range [0, NPY_MAXDIMS (%d)] ", NPY_MAXDIMS); + Py_XDECREF(dt_info.descr); + Py_XDECREF(dt_info.dtype); + return NULL; + } + PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin); + op, dt_info.descr, dt_info.dtype, copy, order, subok, ndmin, ndmax); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1794,7 +1802,7 @@ array_asarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_FALSE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1840,7 +1848,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1882,7 +1890,7 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_CORDER, NPY_FALSE, - 1); + 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1924,7 +1932,7 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), PyObject *res = _array_fromobject_generic( op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, NPY_FORTRANORDER, - NPY_FALSE, 1); + NPY_FALSE, 1, NPY_MAXDIMS); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5e3a3ba71d3e..a6170936a5f3 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -226,7 +226,7 @@ find_binary_operation_path( */ int was_scalar; PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( - other, NULL, NULL, 0, 0, 0, NULL, &was_scalar); + other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); if (arr == NULL) { return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index cf2f899b7991..da4eeb91cfc2 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1279,6 +1279,79 @@ def test_creation_from_dtypemeta(self, func): assert_array_equal(arr1, arr2) assert arr2.dtype == dtype + def test_ndmax_less_than_actual_dims_dtype_object(self): + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=1, dtype=object) + assert arr.ndim == 1 + assert arr.shape == (2,) + assert arr.dtype == object + + data = [[[1], [2]], [[3], [4]]] + arr = np.array(data, ndmax=2, dtype=object) + assert arr.ndim == 2 + assert arr.shape == (2, 2) + assert arr.dtype == object + + def test_ndmax_equal_to_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=2) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_greater_than_actual_dims(self): + data = [[1, 2], [3, 4]] + arr = np.array(data, ndmax=3) + assert arr.ndim == 2 + assert_array_equal(arr, np.array(data)) + + def test_ndmax_less_than_actual_dims(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, + match="setting an array element with a sequence. " + "The requested array would exceed the maximum number of dimension of 2."): + np.array(data, ndmax=2) + + def test_ndmax_is_zero(self): + data = [1, 2, 3] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5, 6]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + data = [[1, 2, 3], [4, 5]] + arr = np.array(data, ndmax=0, dtype=object) + assert arr.ndim == 0 + assert arr.shape == () + assert arr.dtype == object + + def test_ndmax_less_than_ndmin(self): + data = [[[1], [2]], [[3], [4]]] + with pytest.raises(ValueError, match="ndmin must be <= ndmax"): + np.array(data, ndmax=1, ndmin=2) + + def test_ndmax_is_negative(self): + data = [1, 2, 3] + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=-1) + + def test_ndmax_greather_than_NPY_MAXDIMS(self): + data = [1, 2, 3] + # current NPY_MAXDIMS is 64 + with pytest.raises(ValueError, match="ndmax must be in the range"): + np.array(data, ndmax=65) + class TestStructured: def test_subarray_field_access(self): From b9dcaa3ab616920be14ad931a228a0ee0378057e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 16 Aug 2025 20:51:10 +0200 Subject: [PATCH 0313/1718] TYP: add ``ndmax`` parameter to ``np..array`` This follows #29569, and also fills in the missing parameter defaults, towards #28428. --- numpy/_core/multiarray.pyi | 59 +++++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 560822d68466..5a3fa46bee20 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -441,57 +441,62 @@ def empty_like( @overload def array( object: _ArrayT, - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload def array( object: _SupportsArray[_ArrayT], - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", subok: L[True], - ndmin: L[0] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0] = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload def array( object: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def array( object: Any, dtype: _DTypeLike[_ScalarT], *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def array( object: Any, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = None, *, - copy: bool | _CopyMode | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - ndmin: int = ..., - like: _SupportsArrayFunc | None = ..., + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... # From 0d5bec96a0a826064b3fc3099ac9d3903f7af659 Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Sun, 13 Jul 2025 12:47:45 -0700 Subject: [PATCH 0314/1718] ENH: Allow subscript access for `np.bool` by adding `__class_getitem__` Fixes #29247. Implements `booleantype_class_getitem_abc` and adds a `__class_getitem__` method to `np.bool` to enable subscript access (e.g., `np.bool[int]`). --- numpy/_core/src/multiarray/scalartypes.c.src | 28 ++++++++++++++++++++ numpy/_core/tests/test_scalar_methods.py | 8 ++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 5e3a3ba71d3e..80b9f8e337c4 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2447,6 +2447,23 @@ numbertype_class_getitem_abc(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } + +static PyObject * +booleantype_class_getitem_abc(PyObject *cls, PyObject *args) +{ + const Py_ssize_t args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; + int args_len_expected = 1; + + if ((args_len > args_len_expected) || (args_len == 0)) { + return PyErr_Format(PyExc_TypeError, + "Too %s arguments for %s", + args_len > args_len_expected ? "many" : "few", + ((PyTypeObject *)cls)->tp_name); + } + return Py_GenericAlias(cls, args); +} + + /* * Use for concrete np.number subclasses, making them act as if they * were subtyped from e.g. np.signedinteger[object], thus lacking any @@ -2853,6 +2870,16 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; + +static PyMethodDef booleantype_methods[] = { + /* for typing */ + {"__class_getitem__", + (PyCFunction)booleantype_class_getitem_abc, + METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; + + /**begin repeat * #name = cfloat,clongdouble# */ @@ -4571,6 +4598,7 @@ initialize_numeric_types(void) PyBoolArrType_Type.tp_str = genbool_type_str; PyBoolArrType_Type.tp_repr = genbool_type_repr; + PyBoolArrType_Type.tp_methods = booleantype_methods; /**begin repeat diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 2d508a08bb4d..e2943be4f660 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -171,8 +171,12 @@ def test_abc_non_numeric(self, cls: type[np.generic]) -> None: @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - with pytest.raises(TypeError): - cls[Any] + if cls == np.bool: + # np.bool allows subscript + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] @pytest.mark.parametrize("arg_len", range(4)) def test_subscript_tuple(self, arg_len: int) -> None: From 003281b94b9c906bc11ceb516e9c1ed1a9cd722f Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Sun, 17 Aug 2025 14:42:04 +0900 Subject: [PATCH 0315/1718] ENH: Delegate checking of type argument count for `np.bool` to static type checkers --- numpy/_core/src/multiarray/scalartypes.c.src | 27 +++----------------- numpy/_core/tests/test_scalar_methods.py | 6 ++++- 2 files changed, 8 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 80b9f8e337c4..dc8d047917ae 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2447,23 +2447,6 @@ numbertype_class_getitem_abc(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } - -static PyObject * -booleantype_class_getitem_abc(PyObject *cls, PyObject *args) -{ - const Py_ssize_t args_len = PyTuple_Check(args) ? PyTuple_Size(args) : 1; - int args_len_expected = 1; - - if ((args_len > args_len_expected) || (args_len == 0)) { - return PyErr_Format(PyExc_TypeError, - "Too %s arguments for %s", - args_len > args_len_expected ? "many" : "few", - ((PyTypeObject *)cls)->tp_name); - } - return Py_GenericAlias(cls, args); -} - - /* * Use for concrete np.number subclasses, making them act as if they * were subtyped from e.g. np.signedinteger[object], thus lacking any @@ -2870,16 +2853,12 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; - static PyMethodDef booleantype_methods[] = { - /* for typing */ - {"__class_getitem__", - (PyCFunction)booleantype_class_getitem_abc, - METH_CLASS | METH_O, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; - /**begin repeat * #name = cfloat,clongdouble# */ diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index e2943be4f660..26dad71794e3 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -4,7 +4,7 @@ import fractions import platform import types -from typing import Any +from typing import Any, Literal import pytest @@ -190,6 +190,10 @@ def test_subscript_tuple(self, arg_len: int) -> None: def test_subscript_scalar(self) -> None: assert np.number[Any] + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + class TestBitCount: # derived in part from the cpython test "test_bit_count" From 4136d1c20d73e8e1bdc7f1a6a24a138cb44cd0dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Melissa=20Weber=20Mendon=C3=A7a?= Date: Sun, 17 Aug 2025 16:07:11 -0300 Subject: [PATCH 0316/1718] DOC: Add link to homepage in doc landing page [skip azp][skip cirrus][skip actions] --- doc/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 02f3a8dc12b0..00d1bb62e6b3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,6 +21,7 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: +`Home `_ | `Installation `_ | `Source Repository `_ | `Issue Tracker `_ | From e7d3a29e1497bb65f8a6aad93689df0ae2cd0b6a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 18 Aug 2025 12:33:37 +0100 Subject: [PATCH 0317/1718] TYP: Type default values in stubs in `numpy/ma` (#29531) --- numpy/ma/core.pyi | 114 +++++++++++++++++++++--------------------- numpy/ma/extras.pyi | 38 +++++++------- numpy/ma/mrecords.pyi | 48 +++++++++--------- 3 files changed, 100 insertions(+), 100 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a033e735d3f5..9adab779776a 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -328,10 +328,10 @@ def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... @overload def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... -def getdata(a, subok=...): ... +def getdata(a, subok=True): ... get_data = getdata -def fix_invalid(a, mask=..., copy=..., fill_value=...): ... +def fix_invalid(a, mask=..., copy=True, fill_value=None): ... class _MaskedUFunc: f: Any @@ -350,9 +350,9 @@ class _MaskedBinaryOperation(_MaskedUFunc): filly: Any def __init__(self, mbfunc, fillx=..., filly=...): ... def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=..., dtype=...): ... + def reduce(self, target, axis=0, dtype=None): ... def outer(self, a, b): ... - def accumulate(self, target, axis=...): ... + def accumulate(self, target, axis=0): ... class _DomainedBinaryOperation(_MaskedUFunc): domain: Any @@ -433,29 +433,29 @@ def getmaskarray(arr): ... # which isn't necessarily a ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -def make_mask(m, copy=..., shrink=..., dtype=...): ... -def make_mask_none(newshape, dtype=...): ... -def mask_or(m1, m2, copy=..., shrink=...): ... +def make_mask(m, copy=False, shrink=True, dtype=...): ... +def make_mask_none(newshape, dtype=None): ... +def mask_or(m1, m2, copy=False, shrink=True): ... def flatten_mask(mask): ... -def masked_where(condition, a, copy=...): ... -def masked_greater(x, value, copy=...): ... -def masked_greater_equal(x, value, copy=...): ... -def masked_less(x, value, copy=...): ... -def masked_less_equal(x, value, copy=...): ... -def masked_not_equal(x, value, copy=...): ... -def masked_equal(x, value, copy=...): ... -def masked_inside(x, v1, v2, copy=...): ... -def masked_outside(x, v1, v2, copy=...): ... -def masked_object(x, value, copy=..., shrink=...): ... -def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... -def masked_invalid(a, copy=...): ... +def masked_where(condition, a, copy=True): ... +def masked_greater(x, value, copy=True): ... +def masked_greater_equal(x, value, copy=True): ... +def masked_less(x, value, copy=True): ... +def masked_less_equal(x, value, copy=True): ... +def masked_not_equal(x, value, copy=True): ... +def masked_equal(x, value, copy=True): ... +def masked_inside(x, v1, v2, copy=True): ... +def masked_outside(x, v1, v2, copy=True): ... +def masked_object(x, value, copy=True, shrink=True): ... +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): ... +def masked_invalid(a, copy=True): ... class _MaskedPrintOption: def __init__(self, display): ... def display(self): ... def set_display(self, s): ... def enabled(self): ... - def enable(self, shrink=...): ... + def enable(self, shrink=1): ... masked_print_option: _MaskedPrintOption @@ -1302,7 +1302,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... @overload - def count(self, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... @@ -1492,19 +1492,19 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def trace( self, # >= 2D MaskedArray - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( self, # >= 2D MaskedArray - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1520,9 +1520,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. @overload - def dot(self, b: ArrayLike, out: None = ..., strict: bool = ...) -> _MaskedArray[Any]: ... + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = ...) -> _ArrayT: ... + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... @overload def sum( @@ -2043,7 +2043,7 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setitem__(self, indx, value): ... def __iter__(self): ... def __len__(self): ... - def filled(self, fill_value=...): ... + def filled(self, fill_value=None): ... def tolist(self): ... def isMaskedArray(x): ... @@ -2076,16 +2076,16 @@ masked_array = MaskedArray def array( data, - dtype=..., - copy=..., - order=..., + dtype=None, + copy=False, + order=None, mask=..., - fill_value=..., - keep_mask=..., - hard_mask=..., - shrink=..., - subok=..., - ndmin=..., + fill_value=None, + keep_mask=True, + hard_mask=False, + shrink=True, + subok=True, + ndmin=0, ): ... def is_masked(x: object) -> bool: ... @@ -2370,8 +2370,8 @@ def take( mode: _ModeKind = "raise", ) -> _ArrayT: ... -def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +def power(a, b, third=None): ... +def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... @overload def sort( a: _ArrayT, @@ -2398,22 +2398,22 @@ def sort( def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... -def concatenate(arrays, axis=...): ... -def diag(v, k=...): ... +def concatenate(arrays, axis=0): ... +def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def transpose(a, axes=...): ... -def reshape(a, new_shape, order=...): ... +def transpose(a, axes=None): ... +def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... -def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... def where(condition, x=..., y=...): ... -def choose(indices, choices, out=..., mode=...): ... -def round_(a, decimals=..., out=...): ... +def choose(indices, choices, out=None, mode="raise"): ... +def round_(a, decimals=0, out=None): ... round = round_ def inner(a, b): ... @@ -2422,15 +2422,15 @@ innerproduct = inner def outer(a, b): ... outerproduct = outer -def correlate(a, v, mode=..., propagate_mask=...): ... -def convolve(a, v, mode=..., propagate_mask=...): ... +def correlate(a, v, mode="valid", propagate_mask=True): ... +def convolve(a, v, mode="full", propagate_mask=True): ... def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def asarray(a, dtype=..., order=...): ... -def asanyarray(a, dtype=...): ... +def asarray(a, dtype=None, order=None): ... +def asanyarray(a, dtype=None): ... def fromflex(fxarray): ... class _convert2ma: @@ -2452,6 +2452,6 @@ squeeze: _convert2ma zeros: _convert2ma zeros_like: _convert2ma -def append(a, b, axis=...): ... -def dot(a, b, strict=..., out=...): ... +def append(a, b, axis=None): ... +def dot(a, b, strict=False, out=None): ... def mask_rowcols(a, axis=...): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index c3f9fcde4a0a..5a7b2b399ea8 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -55,8 +55,8 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=...): ... -def masked_all(shape, dtype=...): ... +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 def masked_all_like(arr): ... class _fromnxfunction: @@ -91,23 +91,23 @@ diagflat: _fromnxfunction_single def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... -def compress_nd(x, axis=...): ... -def compress_rowcols(x, axis=...): ... +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... +def compress_nd(x, axis=None): ... +def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... def mask_rows(a, axis=...): ... def mask_cols(a, axis=...): ... -def ediff1d(arr, to_end=..., to_begin=...): ... -def unique(ar1, return_index=..., return_inverse=...): ... -def intersect1d(ar1, ar2, assume_unique=...): ... -def setxor1d(ar1, ar2, assume_unique=...): ... -def in1d(ar1, ar2, assume_unique=..., invert=...): ... -def isin(element, test_elements, assume_unique=..., invert=...): ... +def ediff1d(arr, to_end=None, to_begin=None): ... +def unique(ar1, return_index=False, return_inverse=False): ... +def intersect1d(ar1, ar2, assume_unique=False): ... +def setxor1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=...): ... -def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def setdiff1d(ar1, ar2, assume_unique=False): ... +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... +def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... class MAxisConcatenator(AxisConcatenator): @staticmethod @@ -120,15 +120,15 @@ class mr_class(MAxisConcatenator): mr_: mr_class -def ndenumerate(a, compressed=...): ... +def ndenumerate(a, compressed=True): ... def flatnotmasked_edges(a): ... -def notmasked_edges(a, axis=...): ... +def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... -def notmasked_contiguous(a, axis=...): ... +def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... -def vander(x, n=...): ... -def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... +def vander(x, n=None): ... +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... # def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index cae687aa7d1a..c1f3592b0dd6 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -48,49 +48,49 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=..., type=...): ... + def view(self, dtype=None, type=None): ... def harden_mask(self): ... def soften_mask(self): ... def copy(self): ... - def tolist(self, fill_value=...): ... + def tolist(self, fill_value=None): ... def __reduce__(self): ... mrecarray = MaskedRecords def fromarrays( arraylist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, ): ... def fromrecords( reclist, - dtype=..., - shape=..., - formats=..., - names=..., - titles=..., - aligned=..., - byteorder=..., - fill_value=..., + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, mask=..., ): ... def fromtextfile( fname, - delimiter=..., - commentchar=..., - missingchar=..., - varnames=..., - vartypes=..., + delimiter=None, + commentchar="#", + missingchar="", + varnames=None, + vartypes=None, # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 # delimitor=..., ): ... -def addfield(mrecord, newfield, newfieldname=...): ... +def addfield(mrecord, newfield, newfieldname=None): ... From 71a1b5ca6cfc573acbbda1ebe67c28a9f0eddf4d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 18 Aug 2025 09:41:38 -0600 Subject: [PATCH 0318/1718] DEP: Deprecate NumPy warning control utilities (#29550) * DEP: Deprecate NumPy warning control utilities * TST: fix mypy tests * DEP: un-deprecate assert_no_warnings, which is thread-safe * DEP: un-deprecate clear_and_catch_warnings * TYP: update type stubs * STY: appease ruff * TST: update doctest deprecation filters * TST: fix warning test * DOC: add release note * Update numpy/tests/test_warnings.py * MNT: use stacklevel instead * TST: use pytest.deprecated_call() * MNT: fix deprecated version * Apply suggestions from code review Co-authored-by: Sebastian Berg --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/29550.deprecation.rst | 6 ++++ numpy/_core/tests/test_deprecations.py | 19 +++++++++++ numpy/conftest.py | 4 ++- numpy/testing/_private/utils.py | 25 ++++++++++++-- numpy/testing/_private/utils.pyi | 5 ++- numpy/testing/tests/test_utils.py | 34 +++++++++++++++---- numpy/typing/tests/data/pass/ndarray_misc.py | 10 +++--- numpy/typing/tests/data/reveal/testing.pyi | 10 +++--- 8 files changed, 94 insertions(+), 19 deletions(-) create mode 100644 doc/release/upcoming_changes/29550.deprecation.rst diff --git a/doc/release/upcoming_changes/29550.deprecation.rst b/doc/release/upcoming_changes/29550.deprecation.rst new file mode 100644 index 000000000000..ce35477c5010 --- /dev/null +++ b/doc/release/upcoming_changes/29550.deprecation.rst @@ -0,0 +1,6 @@ +Assertion and warning control utilities are deprecated +------------------------------------------------------ + +`np.testing.assert_warns` and `np.testing.suppress_warnings` are deprecated. +Use `warnings.catch_warnings`, `warnings.filterwarnings`, ``pytest.warns``, or +``pytest.filterwarnings`` instead. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 75f092a51808..9be253f7c95c 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -516,3 +516,22 @@ def assign_to_index(): arr.flat[[1.]] = 10 self.assert_deprecated(assign_to_index) + + +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) diff --git a/numpy/conftest.py b/numpy/conftest.py index fde4defc926d..1454b4af59de 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -170,7 +170,9 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", ] + "`in1d` is deprecated", + "NumPy warning suppression and assertion utilities are deprecated." + ] msg = "|".join(msgs) msgs_r = [ diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e4a74e69afb0..9be98f9d2fbe 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1985,7 +1985,7 @@ def integer_repr(x): @contextlib.contextmanager def _assert_warns_context(warning_class, name=None): __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: + with suppress_warnings(_warn=False) as sup: l = sup.record(warning_class) yield if not len(l) > 0: @@ -2009,6 +2009,11 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. + Parameters ---------- warning_class : class @@ -2036,6 +2041,11 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) if not args and not kwargs: return _assert_warns_context(warning_class) elif len(args) < 1: @@ -2288,6 +2298,11 @@ class suppress_warnings: tests might need to see the warning. Additionally it allows easier specificity for testing warnings and can be nested. + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + Parameters ---------- forwarding_rule : str, optional @@ -2348,7 +2363,13 @@ def some_function(): # do something which causes a warning in np.ma.core pass """ - def __init__(self, forwarding_rule="always"): + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) self._entered = False # Suppressions are either instance or defined inside one with block: diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index d5584e511796..76455df87eb0 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -24,7 +24,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar +from typing_extensions import TypeVar, deprecated from unittest.case import SkipTest import numpy as np @@ -148,6 +148,7 @@ class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): @overload # record; bool def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") class suppress_warnings: log: Final[_WarnLog] def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... @@ -358,8 +359,10 @@ def assert_array_max_ulp( ) -> NDArray[Any]: ... # +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 07f2c9da3005..5e1c625955bb 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1140,7 +1140,9 @@ def test_strict(self): with pytest.raises(AssertionError): self._assert_func(x, y.astype(np.float32), strict=True) - +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") class TestWarns: def test_warn(self): @@ -1787,6 +1789,9 @@ def test_clear_and_catch_warnings(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1833,6 +1838,9 @@ def warn(arr): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1861,6 +1869,9 @@ def test_suppress_warnings_type(): assert_warn_len_equal(my_mod, 0) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1876,6 +1887,9 @@ def warn(category): assert_equal(len(w), 1) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1913,9 +1927,13 @@ def test_suppress_warnings_record(): warnings.warn('Some warning') warnings.warn('Some other warning') assert_equal(len(sup2.log), 1) - assert_equal(len(sup.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -1931,7 +1949,8 @@ def warn(arr): for i in range(2): warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1940,7 +1959,8 @@ def warn(arr): warnings.warn("Some warning") warnings.warn("Some warning") - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1950,7 +1970,8 @@ def warn(arr): warnings.warn("Some warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) with suppress_warnings() as sup: sup.record() @@ -1960,7 +1981,8 @@ def warn(arr): warnings.warn("Some other warning") warn_other_module() - assert_equal(len(sup.log), 2) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) def test_tempdir(): diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 30ad270edd6e..4d4e1f872763 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -11,6 +11,8 @@ import operator from typing import Any, cast +import pytest + import numpy as np import numpy.typing as npt @@ -190,11 +192,11 @@ class IntSubClass(npt.NDArray[np.intp]): ... # deprecated -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with np.testing.assert_warns(DeprecationWarning): +with pytest.deprecated_call(): ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 52d9ef6b3a5d..1533cd27955b 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -15,7 +15,7 @@ AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] bool_obj: bool -suppress_obj: np.testing.suppress_warnings +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... @@ -58,12 +58,12 @@ with np.testing.clear_and_catch_warnings(True) as c1: with np.testing.clear_and_catch_warnings() as c2: assert_type(c2, None) -assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) -assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(suppress_obj.filter(RuntimeWarning), None) assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) with suppress_obj as c3: - assert_type(c3, np.testing.suppress_warnings) + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) assert_type(np.testing.IS_PYPY, bool) @@ -172,7 +172,7 @@ assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), Non assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) -assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) def func4(a: int, b: str) -> bool: ... From 4b0a702a02f5090701980b7e0ac78c37e10b06a6 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 18 Aug 2025 21:00:51 +0300 Subject: [PATCH 0319/1718] TST: update link and version for Intel SDE download --- .github/workflows/linux_simd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a9f065e25cc0..9f12215eb30e 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -202,7 +202,7 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde @@ -253,7 +253,7 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/788820/sde-external-9.27.0-2023-09-13-lin.tar.xz + curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde From 111fe76ebf9e3dc528a2d808626808bd9601938d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20G=C3=B3rny?= Date: Mon, 18 Aug 2025 20:55:31 +0200 Subject: [PATCH 0320/1718] BLD: wire up `ASIMDDP` feature to `ARM_FEATURES` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the `ASIMDDP` feature to the `ARM_FEATURES` dictionary, to fix an error when it is explicitly listed in `cpu-baseline`. Fixes #29570 Signed-off-by: Michał Górny --- meson_cpu/arm/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 7ffa3ef58ed0..5478e52cdcea 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -60,6 +60,6 @@ SVE = mod_features.new( # TODO: Add support for MSVC ARM_FEATURES = { 'NEON': NEON, 'NEON_FP16': NEON_FP16, 'NEON_VFPV4': NEON_VFPV4, - 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDFHM': ASIMDFHM, + 'ASIMD': ASIMD, 'ASIMDHP': ASIMDHP, 'ASIMDDP': ASIMDDP, 'ASIMDFHM': ASIMDFHM, 'SVE': SVE } From e868634f6110aaa65bc6568e4d1967cb0757c1b4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 18 Aug 2025 23:47:32 +0200 Subject: [PATCH 0321/1718] TYP: add ``sorted`` kwarg to ``unique`` --- numpy/lib/_arraysetops_impl.pyi | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index c0291680a8ec..6ca3ed8282b6 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -108,6 +108,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> NDArray[_ScalarT]: ... @overload # unknown scalar-type, FFF def unique( @@ -118,6 +119,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> _AnyArray: ... @overload # known scalar-type, TFF def unique( @@ -128,6 +130,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( @@ -138,6 +141,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, FTF (positional) def unique( @@ -148,6 +152,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) def unique( @@ -158,6 +163,7 @@ def unique( return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( @@ -168,6 +174,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( @@ -178,6 +185,7 @@ def unique( return_counts: L[False] = False, axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, FFT (positional) def unique( @@ -188,6 +196,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) def unique( @@ -198,6 +207,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( @@ -208,6 +218,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( @@ -218,6 +229,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray]: ... @overload # known scalar-type, TTF def unique( @@ -228,6 +240,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( @@ -238,6 +251,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) def unique( @@ -248,6 +262,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) def unique( @@ -258,6 +273,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( @@ -268,6 +284,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( @@ -278,6 +295,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) def unique( @@ -288,6 +306,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) def unique( @@ -298,6 +317,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( @@ -308,6 +328,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( @@ -318,6 +339,7 @@ def unique( return_counts: L[True], axis: SupportsIndex | None = None, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT def unique( @@ -328,6 +350,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( @@ -338,6 +361,7 @@ def unique( axis: SupportsIndex | None = None, *, equal_nan: bool = True, + sorted: bool = True, ) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... # From 720d35b53f92d4975821d58e391f019524b4d721 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 03:09:29 +0000 Subject: [PATCH 0322/1718] MAINT: Bump actions/dependency-review-action from 4.7.1 to 4.7.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.1 to 4.7.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/da24556b548a50705dd671f47852072ea4c105d9...bc41886e18ea39df68b1b1245f4184881938e050) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 158a826825f1..248b69a0d939 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 + uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 5015557f7da13fea17e7cbbad321937cb56e1078 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 03:29:30 +0000 Subject: [PATCH 0323/1718] MAINT: Bump github/codeql-action from 3.29.9 to 3.29.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.9 to 3.29.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/df559355d593797519d70b90fc8edd5db049e7a2...96f518a34f7a870018057716cc4d7a5c014bd61c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.10 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 062bc56f5e3a..dce656f39b37 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/autobuild@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3.29.9 + uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index f52a41787c77..cecb8d3eef4a 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v2.1.27 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v2.1.27 with: sarif_file: results.sarif From 5ba8c96e5853fe538bbe5d1a15d64a24e20b54a9 Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Wed, 20 Aug 2025 04:38:15 -0400 Subject: [PATCH 0324/1718] DEP: show warning when np.maximum receives more than 2 inputs (#29052) Deprecate not using `out=` for np.minimum and maximum due to it being a common confusion. --- .../upcoming_changes/29052.deprecation.rst | 10 ++++++++++ numpy/_core/src/umath/ufunc_object.c | 16 ++++++++++++++++ numpy/_core/tests/test_deprecations.py | 9 +++++++++ 3 files changed, 35 insertions(+) create mode 100644 doc/release/upcoming_changes/29052.deprecation.rst diff --git a/doc/release/upcoming_changes/29052.deprecation.rst b/doc/release/upcoming_changes/29052.deprecation.rst new file mode 100644 index 000000000000..e302907abfba --- /dev/null +++ b/doc/release/upcoming_changes/29052.deprecation.rst @@ -0,0 +1,10 @@ +Positional ``out`` argument to `np.maximum`, `np.minimum` is deprecated +----------------------------------------------------------------------- +Passing the output array ``out`` positionally to `numpy.maximum` and +`numpy.minimum` is deprecated. For example, ``np.maximum(a, b, c)`` will +emit a deprecation warning, since ``c`` is treated as the output buffer +rather than a third input. + +Always pass the output with the keyword form, e.g. +``np.maximum(a, b, out=c)``. This makes intent clear and simplifies +type annotations. diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 1d2c3edbd3b9..6b8cc1789f94 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -65,6 +65,7 @@ #include "mapping.h" #include "npy_static_data.h" #include "multiarraymodule.h" +#include "number.h" /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -4368,6 +4369,21 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, Py_INCREF(tmp); PyTuple_SET_ITEM(full_args.out, i-nin, tmp); } + + /* Extra positional args but no keywords */ + /* DEPRECATED NumPy 2.4, 2025-08 */ + if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { + + if (DEPRECATE( + "Passing more than 2 positional arguments to np.maximum and np.minimum " + "is deprecated. If you meant to use the third argument as an output, " + "use the `out` keyword argument instead. If you hoped to work with " + "more than 2 inputs, combine them into a single array and get the extrema " + "for the relevant axis.") < 0) { + return NULL; + } + } + if (all_none) { Py_SETREF(full_args.out, NULL); } diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 9be253f7c95c..252189ef321a 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -535,3 +535,12 @@ def use_suppress_warnings(): sup.filter(RuntimeWarning, 'invalid value encountered in divide') self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) From a1d1dadcddfa635912ae446a1a8ffd0fd99d2da3 Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Wed, 20 Aug 2025 23:13:53 +0800 Subject: [PATCH 0325/1718] DOC: fix for f2py migrating-to-meson (#29601) --- doc/source/f2py/buildtools/distutils-to-meson.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index b24638e62239..32f2b599f6e5 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -117,14 +117,12 @@ sample is included below. +------------------------------------+-------------------------------+ | LDFLAGS | Linker options | +------------------------------------+-------------------------------+ - | LD\_LIBRARY\_PATH | Library file locations (Unix) | + | LD_LIBRARY_PATH | Library file locations (Unix) | +------------------------------------+-------------------------------+ | LIBS | Libraries to link against | +------------------------------------+-------------------------------+ | PATH | Search path for executables | +------------------------------------+-------------------------------+ - | LDFLAGS | Linker flags | - +------------------------------------+-------------------------------+ | CXX | C++ compiler | +------------------------------------+-------------------------------+ | CXXFLAGS | C++ compiler options | From 85ccc9741e10322c7cf1092d56f812ea2cb32191 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 20 Aug 2025 17:50:26 +0100 Subject: [PATCH 0326/1718] TYP: Add defaults to ``numpy/core`` and ``numpy/__init__.py`` (#29594) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 6 +- numpy/_core/_asarray.pyi | 18 +- numpy/_core/_ufunc_config.pyi | 10 +- numpy/_core/arrayprint.pyi | 54 +- numpy/_core/defchararray.pyi | 384 ++++++------ numpy/_core/einsumfunc.pyi | 40 +- numpy/_core/fromnumeric.pyi | 610 +++++++++---------- numpy/_core/numeric.pyi | 186 +++--- numpy/_core/shape_base.pyi | 46 +- numpy/_core/strings.pyi | 100 +-- numpy/typing/tests/data/fail/char.pyi | 2 - numpy/typing/tests/data/fail/chararray.pyi | 2 - numpy/typing/tests/data/reveal/char.pyi | 1 + numpy/typing/tests/data/reveal/chararray.pyi | 1 + 14 files changed, 729 insertions(+), 731 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bf9ed8c9b802..1f47f2aae0d7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5259,11 +5259,11 @@ class poly1d: def __getitem__(self, val: int, /) -> Any: ... def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... def integ( self, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index a4bee00489fb..da5884d49aba 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -18,24 +18,24 @@ _RequirementsWithE: TypeAlias = _Requirements | _E @overload def require( a: _ArrayT, - dtype: None = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> _ArrayT: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _E | Iterable[_RequirementsWithE] = ..., + dtype: DTypeLike = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload def require( a: object, - dtype: DTypeLike = ..., - requirements: _Requirements | Iterable[_Requirements] | None = ..., + dtype: DTypeLike = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, *, - like: _SupportsArrayFunc = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 86df9827d652..1cc3595d5ba0 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -16,11 +16,11 @@ class _ErrDict(TypedDict): invalid: _ErrKind def seterr( - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> _ErrDict: ... def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 967cc09e6a25..57e2e1248c5e 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -77,14 +77,14 @@ class _FormatOptions(TypedDict): __docformat__: Final = "restructuredtext" # undocumented def set_printoptions( - precision: SupportsIndex | None = ..., - threshold: int | None = ..., - edgeitems: int | None = ..., - linewidth: int | None = ..., - suppress: bool | None = ..., - nanstr: str | None = ..., - infstr: str | None = ..., - formatter: _FormatDict | None = ..., + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, *, @@ -189,36 +189,36 @@ def array2string( def format_float_scientific( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., + precision: int | None = None, + unique: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - exp_digits: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, ) -> str: ... def format_float_positional( x: _FloatLike_co, - precision: int | None = ..., - unique: bool = ..., - fractional: bool = ..., + precision: int | None = None, + unique: bool = True, + fractional: bool = True, trim: _Trim = "k", - sign: bool = ..., - pad_left: int | None = ..., - pad_right: int | None = ..., - min_digits: int | None = ..., + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, ) -> str: ... def array_repr( arr: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def array_str( a: NDArray[Any], - max_line_width: int | None = ..., - precision: SupportsIndex | None = ..., - suppress_small: bool | None = ..., + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, ) -> str: ... def printoptions( precision: SupportsIndex | None = ..., diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index d3e01c2c820a..9b86c2a33e35 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -234,90 +234,90 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def center( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def center( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def count( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... def decode( self: _CharArray[bytes_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[str_]: ... def encode( self: _CharArray[str_], - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> _CharArray[bytes_]: ... @overload def endswith( self: _CharArray[str_], suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( self: _CharArray[bytes_], suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def expandtabs( self, - tabsize: i_co = ..., + tabsize: i_co = 8, ) -> Self: ... @overload def find( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload @@ -335,24 +335,24 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def ljust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def ljust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload def lstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def lstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload @@ -371,57 +371,57 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): self: _CharArray[str_], old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[str_]: ... @overload def replace( self: _CharArray[bytes_], old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = None, ) -> _CharArray[bytes_]: ... @overload def rfind( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[str_], sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( self: _CharArray[bytes_], sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rjust( self: _CharArray[str_], width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> _CharArray[str_]: ... @overload def rjust( self: _CharArray[bytes_], width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> _CharArray[bytes_]: ... @overload @@ -438,79 +438,79 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def rsplit( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rstrip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def rstrip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def split( self: _CharArray[str_], - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( self: _CharArray[bytes_], - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... - def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ... + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... @overload def startswith( self: _CharArray[str_], prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( self: _CharArray[bytes_], prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def strip( self: _CharArray[str_], - chars: U_co | None = ..., + chars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def strip( self: _CharArray[bytes_], - chars: S_co | None = ..., + chars: S_co | None = None, ) -> _CharArray[bytes_]: ... @overload def translate( self: _CharArray[str_], table: U_co, - deletechars: U_co | None = ..., + deletechars: U_co | None = None, ) -> _CharArray[str_]: ... @overload def translate( self: _CharArray[bytes_], table: S_co, - deletechars: S_co | None = ..., + deletechars: S_co | None = None, ) -> _CharArray[bytes_]: ... def zfill(self, width: i_co) -> Self: ... @@ -609,33 +609,33 @@ def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... def decode( a: S_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[str_]: ... def encode( a: U_co | T_co, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @@ -647,13 +647,13 @@ def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _Str def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @@ -665,13 +665,13 @@ def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -687,53 +687,53 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co | None = ..., + count: i_co | None = -1, ) -> NDArray[bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( a: U_co, width: i_co, - fillchar: U_co = ..., + fillchar: U_co = " ", ) -> NDArray[str_]: ... @overload def rjust( a: S_co, width: i_co, - fillchar: S_co = ..., + fillchar: str | S_co = " ", ) -> NDArray[bytes_]: ... @overload def rjust( a: _StringDTypeSupportsArray, width: i_co, - fillchar: _StringDTypeSupportsArray = ..., + fillchar: str | _StringDTypeSupportsArray = " ", ) -> _StringDTypeArray: ... @overload def rjust( a: T_co, width: i_co, - fillchar: T_co = ..., + fillchar: T_co = " ", ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -748,72 +748,72 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rsplit( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def rsplit( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload -def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def split( a: U_co, - sep: U_co | None = ..., - maxsplit: i_co | None = ..., + sep: U_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: S_co, - sep: S_co | None = ..., - maxsplit: i_co | None = ..., + sep: S_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = ..., - maxsplit: i_co | None = ..., + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... @overload def split( a: T_co, - sep: T_co | None = ..., - maxsplit: i_co | None = ..., + sep: T_co | None = None, + maxsplit: i_co | None = None, ) -> NDArray[object_]: ... -def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ... +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @overload -def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @overload -def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @@ -837,25 +837,25 @@ def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[str_]: ... @overload def translate( a: S_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> NDArray[bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: str | None = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... @overload @@ -881,88 +881,88 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... def isalpha(a: UST_co) -> NDArray[np.bool]: ... @@ -979,66 +979,66 @@ def isupper(a: UST_co) -> NDArray[np.bool]: ... def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def str_len(A: UST_co) -> NDArray[int_]: ... @@ -1050,18 +1050,18 @@ def str_len(A: UST_co) -> NDArray[int_]: ... @overload def array( obj: U_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[True] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: S_co, - itemsize: int | None = ..., - copy: bool = ..., - unicode: L[False] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( @@ -1069,16 +1069,16 @@ def array( itemsize: int | None, copy: bool, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., + itemsize: int | None = None, + copy: bool = True, *, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def array( @@ -1086,74 +1086,74 @@ def array( itemsize: int | None, copy: bool, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., + itemsize: int | None = None, + copy: bool = True, *, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def array( obj: object, - itemsize: int | None = ..., - copy: bool = ..., - unicode: bool | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload def asarray( obj: U_co, - itemsize: int | None = ..., - unicode: L[True] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: S_co, - itemsize: int | None = ..., - unicode: L[False] | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., + itemsize: int | None = None, *, unicode: L[False], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload def asarray( obj: object, itemsize: int | None, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., + itemsize: int | None = None, *, unicode: L[True], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload def asarray( obj: object, - itemsize: int | None = ..., - unicode: bool | None = ..., - order: _OrderKACF = ..., + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, ) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 9653a26dcd78..6d34883e6625 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -42,55 +42,55 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeBool_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeBool | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeUInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeUInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeInt_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeInt | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeFloat | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeComplex | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -99,9 +99,9 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -112,7 +112,7 @@ def einsum( dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload def einsum( @@ -123,7 +123,7 @@ def einsum( casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload @@ -131,11 +131,11 @@ def einsum( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: None = ..., + out: None = None, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -144,9 +144,9 @@ def einsum( *operands: Any, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., - out: None = ..., + out: None = None, order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> Any: ... @overload def einsum( @@ -157,7 +157,7 @@ def einsum( dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... @overload def einsum( @@ -168,7 +168,7 @@ def einsum( casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = False, ) -> _ArrayT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 95fe7f7d8484..0c96ac8c0aa4 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -129,33 +129,33 @@ _PyScalar: TypeAlias = complex | bytes | str def take( a: _ArrayLike[_ScalarT], indices: _IntLike_co, - axis: None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> _ScalarT: ... @overload def take( a: ArrayLike, indices: _IntLike_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def take( a: _ArrayLike[_ScalarT], indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[_ScalarT]: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - out: None = ..., - mode: _ModeKind = ..., + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def take( @@ -163,16 +163,16 @@ def take( indices: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload @@ -249,29 +249,29 @@ def reshape( def choose( a: _IntLike_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> Any: ... @overload def choose( a: _ArrayLikeInt_co, choices: _ArrayLike[_ScalarT], - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[_ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., + out: None = None, + mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload def choose( a: _ArrayLikeInt_co, choices: ArrayLike, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload @@ -303,7 +303,7 @@ def put( a: NDArray[Any], ind: _ArrayLikeInt_co, v: ArrayLike, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> None: ... @overload @@ -322,12 +322,12 @@ def swapaxes( @overload def transpose( a: _ArrayLike[_ScalarT], - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def transpose( a: ArrayLike, - axes: _ShapeLike | None = ... + axes: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload @@ -374,44 +374,44 @@ def argpartition( @overload def sort( a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[_ScalarT]: ... @overload def sort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: SupportsIndex | None = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: bool | None = ..., + stable: bool | None = None, ) -> NDArray[intp]: ... @overload def argmax( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, keepdims: bool = ..., ) -> Any: ... @@ -426,7 +426,7 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: bool = ..., @@ -435,16 +435,16 @@ def argmax( @overload def argmin( a: ArrayLike, - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, *, keepdims: Literal[False] = ..., ) -> intp: ... @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, *, keepdims: bool = ..., ) -> Any: ... @@ -459,7 +459,7 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: bool = ..., @@ -469,15 +469,15 @@ def argmin( def searchsorted( a: ArrayLike, v: _ScalarLike_co, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., # 1D int array + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... # @@ -497,42 +497,42 @@ def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @overload def squeeze( a: _ScalarT, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> _ScalarT: ... @overload def squeeze( a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def squeeze( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... @overload def diagonal( a: _ArrayLike[_ScalarT], - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[_ScalarT]: ... @overload def diagonal( a: ArrayLike, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, ) -> Any: ... @overload def trace( @@ -546,10 +546,10 @@ def trace( @overload def trace( a: ArrayLike, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -601,15 +601,15 @@ def shape(a: ArrayLike) -> _AnyShape: ... def compress( condition: _ArrayLikeBool_co, # 1D bool array a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def compress( @@ -622,7 +622,7 @@ def compress( def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -632,7 +632,7 @@ def clip( a: _ScalarT, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -648,7 +648,7 @@ def clip( a: _ScalarLike_co, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -664,7 +664,7 @@ def clip( a: _ArrayLike[_ScalarT], a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -680,7 +680,7 @@ def clip( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: None = ..., + out: None = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -712,7 +712,7 @@ def clip( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: ArrayLike = ..., + out: ArrayLike = None, *, min: ArrayLike | None = ..., max: ArrayLike | None = ..., @@ -727,9 +727,9 @@ def clip( @overload def sum( a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -737,9 +737,9 @@ def sum( @overload def sum( a: _ArrayLike[_ScalarT], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -749,7 +749,7 @@ def sum( a: ArrayLike, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -757,10 +757,10 @@ def sum( @overload def sum( a: ArrayLike, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -770,7 +770,7 @@ def sum( a: ArrayLike, axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -778,10 +778,10 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -789,9 +789,9 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -809,8 +809,8 @@ def sum( @overload def sum( a: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, keepdims: bool = ..., @@ -898,38 +898,38 @@ def any( @overload def cumsum( a: _ArrayLike[_ScalarT], - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( @@ -941,8 +941,8 @@ def cumsum( @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -952,64 +952,64 @@ def cumulative_sum( x: _ArrayLike[_ScalarT], /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... @overload def ptp( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., ) -> _ScalarT: ... @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., ) -> Any: ... @overload @@ -1022,7 +1022,7 @@ def ptp( @overload def ptp( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1031,8 +1031,8 @@ def ptp( @overload def amax( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1040,8 +1040,8 @@ def amax( @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1058,7 +1058,7 @@ def amax( @overload def amax( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1069,8 +1069,8 @@ def amax( @overload def amin( a: _ArrayLike[_ScalarT], - axis: None = ..., - out: None = ..., + axis: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1078,8 +1078,8 @@ def amin( @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1096,7 +1096,7 @@ def amin( @overload def amin( a: ArrayLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1114,9 +1114,9 @@ def amin( @overload def prod( a: _ArrayLikeBool_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1124,9 +1124,9 @@ def prod( @overload def prod( a: _ArrayLikeUInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1134,9 +1134,9 @@ def prod( @overload def prod( a: _ArrayLikeInt_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1144,9 +1144,9 @@ def prod( @overload def prod( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1154,9 +1154,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1164,9 +1164,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1176,7 +1176,7 @@ def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1184,10 +1184,10 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1195,9 +1195,9 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -1215,8 +1215,8 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool = ..., @@ -1227,66 +1227,66 @@ def prod( @overload def cumprod( a: _ArrayLikeBool_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[floating]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[complexfloating]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, ) -> NDArray[object_]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, ) -> NDArray[_ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, ) -> NDArray[Any]: ... @overload def cumprod( @@ -1298,8 +1298,8 @@ def cumprod( @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1309,131 +1309,131 @@ def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int_]: ... @overload def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[uint64]: ... @overload def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[int64]: ... @overload def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[floating]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[complexfloating]: ... @overload def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: None = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[object_]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - include_initial: bool = ..., + out: None = None, + include_initial: bool = False, ) -> NDArray[_ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - include_initial: bool = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, + out: None = None, + include_initial: bool = False, ) -> NDArray[Any]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = ..., - dtype: DTypeLike = ..., + axis: SupportsIndex | None = None, + dtype: DTypeLike = None, out: _ArrayT, - include_initial: bool = ..., + include_initial: bool = False, ) -> _ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: int | tuple[int, ...] | None = ...) -> int: ... +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... @overload def around( a: _BoolLike_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> float16: ... @overload def around( a: _NumberOrObjectT, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> _NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> Any: ... @overload def around( a: _ArrayLikeBool_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[float16]: ... @overload def around( a: _ArrayLike[_NumberOrObjectT], - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[_NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., - out: None = ..., + decimals: SupportsIndex = 0, + out: None = None, ) -> NDArray[Any]: ... @overload def around( @@ -1444,7 +1444,7 @@ def around( @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - decimals: SupportsIndex = ..., + decimals: SupportsIndex = 0, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1452,9 +1452,9 @@ def around( @overload def mean( a: _ArrayLikeFloat_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1462,9 +1462,9 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1472,9 +1472,9 @@ def mean( @overload def mean( a: _ArrayLike[np.timedelta64], - axis: None = ..., - dtype: None = ..., - out: None = ..., + axis: None = None, + dtype: None = None, + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1492,8 +1492,8 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ..., @@ -1504,7 +1504,7 @@ def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1512,10 +1512,10 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @@ -1534,7 +1534,7 @@ def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1542,19 +1542,19 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., + out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: None = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1563,10 +1563,10 @@ def mean( @overload def std( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1576,10 +1576,10 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1591,8 +1591,8 @@ def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1602,11 +1602,11 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1615,10 +1615,10 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1631,7 +1631,7 @@ def std( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1641,11 +1641,11 @@ def std( @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1655,10 +1655,10 @@ def std( @overload def var( a: _ArrayLikeComplex_co, - axis: None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1668,10 +1668,10 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1683,8 +1683,8 @@ def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1694,11 +1694,11 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., + axis: None = None, *, dtype: _DTypeLike[_ScalarT], - out: None = ..., - ddof: float = ..., + out: None = None, + ddof: float = 0, keepdims: Literal[False] = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1707,10 +1707,10 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, + out: None = None, + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1723,7 +1723,7 @@ def var( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., @@ -1733,11 +1733,11 @@ def var( @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike = None, *, out: _ArrayT, - ddof: float = ..., + ddof: float = 0, keepdims: bool = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index c73a2694df3c..d08eb540743d 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -737,8 +737,8 @@ def ones_like( def full( shape: SupportsIndex, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], _ScalarT]: ... @overload @@ -746,7 +746,7 @@ def full( shape: SupportsIndex, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[tuple[int], _DTypeT]: ... @overload @@ -754,15 +754,15 @@ def full( shape: SupportsIndex, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( shape: SupportsIndex, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[tuple[int], Any]: ... # known shape @@ -770,8 +770,8 @@ def full( def full( shape: _AnyShapeT, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload @@ -779,7 +779,7 @@ def full( shape: _AnyShapeT, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[_AnyShapeT, _DTypeT]: ... @overload @@ -787,15 +787,15 @@ def full( shape: _AnyShapeT, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( shape: _AnyShapeT, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, Any]: ... # unknown shape @@ -803,8 +803,8 @@ def full( def full( shape: _ShapeLike, fill_value: _ScalarT, - dtype: None = ..., - order: _OrderCF = ..., + dtype: None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[_ScalarT]: ... @overload @@ -812,7 +812,7 @@ def full( shape: _ShapeLike, fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> np.ndarray[Any, _DTypeT]: ... @overload @@ -820,15 +820,15 @@ def full( shape: _ShapeLike, fill_value: Any, dtype: type[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[_ScalarT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., + dtype: DTypeLike | None = None, + order: _OrderCF = "C", **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[Any]: ... @@ -900,147 +900,147 @@ def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... def correlate( a: _ArrayLike[Never], v: _ArrayLike[Never], - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[Any]: ... @overload def correlate( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[np.bool]: ... @overload def correlate( a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[unsignedinteger]: ... @overload def correlate( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[signedinteger]: ... @overload def correlate( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[floating]: ... @overload def correlate( a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[complexfloating]: ... @overload def correlate( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[timedelta64]: ... @overload def correlate( a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "valid", ) -> NDArray[object_]: ... @overload def convolve( a: _ArrayLike[Never], v: _ArrayLike[Never], - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[Any]: ... @overload def convolve( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[np.bool]: ... @overload def convolve( a: _ArrayLikeUInt_co, v: _ArrayLikeUInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[unsignedinteger]: ... @overload def convolve( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[signedinteger]: ... @overload def convolve( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[floating]: ... @overload def convolve( a: _ArrayLikeComplex_co, v: _ArrayLikeComplex_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[complexfloating]: ... @overload def convolve( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[timedelta64]: ... @overload def convolve( a: _ArrayLikeObject_co, v: _ArrayLikeObject_co, - mode: _CorrelateMode = ..., + mode: _CorrelateMode = "full", ) -> NDArray[object_]: ... @overload def outer( a: _ArrayLike[Never], b: _ArrayLike[Never], - out: None = ..., + out: None = None, ) -> NDArray[Any]: ... @overload def outer( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def outer( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - out: None = ..., + out: None = None, ) -> NDArray[unsignedinteger]: ... @overload def outer( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - out: None = ..., + out: None = None, ) -> NDArray[signedinteger]: ... @overload def outer( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[floating]: ... @overload def outer( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - out: None = ..., + out: None = None, ) -> NDArray[complexfloating]: ... @overload def outer( a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, - out: None = ..., + out: None = None, ) -> NDArray[timedelta64]: ... @overload def outer( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - out: None = ..., + out: None = None, ) -> NDArray[object_]: ... @overload def outer( @@ -1053,68 +1053,68 @@ def outer( def tensordot( a: _ArrayLike[Never], b: _ArrayLike[Never], - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[Any]: ... @overload def tensordot( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[np.bool]: ... @overload def tensordot( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[unsignedinteger]: ... @overload def tensordot( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[signedinteger]: ... @overload def tensordot( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[floating]: ... @overload def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[complexfloating]: ... @overload def tensordot( a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[timedelta64]: ... @overload def tensordot( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = ..., + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, ) -> NDArray[object_]: ... @overload def roll( a: _ArrayLike[_ScalarT], shift: _ShapeLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[_ScalarT]: ... @overload def roll( a: ArrayLike, shift: _ShapeLike, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... def rollaxis( a: NDArray[_ScalarT], axis: int, - start: int = ..., + start: int = 0, ) -> NDArray[_ScalarT]: ... def moveaxis( @@ -1127,71 +1127,71 @@ def moveaxis( def cross( a: _ArrayLike[Never], b: _ArrayLike[Never], - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[Any]: ... @overload def cross( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NoReturn: ... @overload def cross( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[unsignedinteger]: ... @overload def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[signedinteger]: ... @overload def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[floating]: ... @overload def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[complexfloating]: ... @overload def cross( a: _ArrayLikeObject_co, b: _ArrayLikeObject_co, - axisa: int = ..., - axisb: int = ..., - axisc: int = ..., - axis: int | None = ..., + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, ) -> NDArray[object_]: ... @overload def indices( dimensions: Sequence[int], dtype: type[int] = ..., - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[int_]: ... @overload def indices( @@ -1210,7 +1210,7 @@ def indices( def indices( dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[_ScalarT]: ... @overload def indices( @@ -1222,7 +1222,7 @@ def indices( def indices( dimensions: Sequence[int], dtype: DTypeLike = ..., - sparse: L[False] = ..., + sparse: L[False] = False, ) -> NDArray[Any]: ... @overload def indices( @@ -1243,40 +1243,40 @@ def fromfunction( shape: Sequence[int], *, dtype: DTypeLike = ..., - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, **kwargs: Any, ) -> _T: ... def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... -def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ... +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... def base_repr( number: SupportsAbs[float], - base: float = ..., - padding: SupportsIndex | None = ..., + base: float = 2, + padding: SupportsIndex | None = 0, ) -> str: ... @overload def identity( n: int, - dtype: None = ..., + dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def identity( n: int, dtype: _DTypeLike[_ScalarT], *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def identity( n: int, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def allclose( diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index c2c9c961e55b..9a5f8c3b9d60 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -68,72 +68,72 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray def vstack( tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def hstack( tup: Sequence[_ArrayLike[_ScalarT]], *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def stack( arrays: Sequence[_ArrayLike[_ScalarT]], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: None = ..., - casting: _CastingKind = ... + dtype: None = None, + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind = ... + casting: _CastingKind = "same_kind" ) -> NDArray[_ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: None = ..., + axis: SupportsIndex = 0, + out: None = None, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... + dtype: DTypeLike = None, + casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload def stack( @@ -159,14 +159,14 @@ def unstack( array: _ArrayLike[_ScalarT], /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[_ScalarT], ...]: ... @overload def unstack( array: ArrayLike, /, *, - axis: int = ..., + axis: int = 0, ) -> tuple[NDArray[Any], ...]: ... @overload diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 1b8bfd84cc3d..2f630ce8556a 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -150,154 +150,154 @@ def str_len(x: UST_co) -> NDArray[np.int_]: ... def find( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def find( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rfind( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def index( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def rindex( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: U_co, sub: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: S_co, sub: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def count( a: T_co, sub: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.int_]: ... @overload def startswith( a: U_co, prefix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: S_co, prefix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def startswith( a: T_co, prefix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: U_co, suffix: U_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: S_co, suffix: S_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... @overload def endswith( a: T_co, suffix: T_co, - start: i_co = ..., - end: i_co | None = ..., + start: i_co = 0, + end: i_co | None = None, ) -> NDArray[np.bool]: ... def decode( @@ -312,13 +312,13 @@ def encode( ) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... @overload -def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... @overload -def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... @overload -def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @@ -433,28 +433,28 @@ def replace( a: U_co, old: U_co, new: U_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.str_]: ... @overload def replace( a: S_co, old: S_co, new: S_co, - count: i_co = ..., + count: i_co = -1, ) -> NDArray[np.bytes_]: ... @overload def replace( a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeArray: ... @overload def replace( a: T_co, old: T_co, new: T_co, - count: i_co = ..., + count: i_co = -1, ) -> _StringDTypeOrUnicodeArray: ... @overload diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 62c4475c29be..3dbe5eda296e 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -19,9 +19,7 @@ np.char.join(AR_U, b"_") # type: ignore[arg-type] np.char.join(AR_S, "_") # type: ignore[arg-type] np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.ljust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] -np.char.rjust(AR_S, 5, fillchar="a") # type: ignore[arg-type] np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index f1b7009439d1..589895510227 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -12,9 +12,7 @@ AR_U.join(b"_") # type: ignore[arg-type] AR_S.join("_") # type: ignore[arg-type] AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.ljust(5, fillchar="a") # type: ignore[arg-type] AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] -AR_S.rjust(5, fillchar="a") # type: ignore[arg-type] AR_U.lstrip(chars=b"a") # type: ignore[arg-type] AR_S.lstrip(chars="a") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 5c6af73888d0..2fba2feae385 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -65,6 +65,7 @@ assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) assert_type(np.char.ljust(AR_T, 5), AR_T_alias) assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index b5f4392b75c8..5c3dc85038db 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -50,6 +50,7 @@ assert_type(AR_S.join([b"_", b""]), _BytesCharArray) assert_type(AR_U.ljust(5), _StrCharArray) assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) assert_type(AR_U.rjust(5), _StrCharArray) assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) From 9927e365c1f5d0d3a138a94ab2d1215cdcf51c57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 17:32:04 +0000 Subject: [PATCH 0327/1718] MAINT: Bump pypa/cibuildwheel from 3.1.3 to 3.1.4 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/352e01339f0a173aa2a3eb57f01492e341e83865...c923d83ad9c1bc00211c5041d0c3f73294ff88f6) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.1.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index a1d5b399988e..e7503871e3f5 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 + - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e87af415d3c5..6d041831c22d 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -174,7 +174,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@352e01339f0a173aa2a3eb57f01492e341e83865 # v3.1.3 + uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From a409870243fee1683b057d52dc40b70d0364b185 Mon Sep 17 00:00:00 2001 From: Richard Smythe <82367391+richardsmythe@users.noreply.github.com> Date: Wed, 20 Aug 2025 20:44:49 +0100 Subject: [PATCH 0328/1718] DOC: Fix typo in tril_indices and triu_indices docstrings - (#29604) correspdonding -> corresponding --- numpy/lib/_twodim_base_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index dc6a55886fdb..bad797a970c9 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -926,7 +926,7 @@ def tril_indices(n, k=0, m=None): ------- inds : tuple of arrays The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also @@ -1073,7 +1073,7 @@ def triu_indices(n, k=0, m=None): ------- inds : tuple, shape(2) of ndarrays, shape(`n`) The row and column indices, respectively. The row indices are sorted - in non-decreasing order, and the correspdonding column indices are + in non-decreasing order, and the corresponding column indices are strictly increasing for each row. See also From 3545479aad0388fea7bde3c143c56b6134231654 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 20 Aug 2025 16:19:11 -0400 Subject: [PATCH 0329/1718] TST: Replace xunit setup with methods (#29596) --- numpy/_core/tests/test_defchararray.py | 414 +++++++++++++------------ 1 file changed, 221 insertions(+), 193 deletions(-) diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 2607953a940a..04518bdbd671 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -134,63 +134,67 @@ def fail(): assert_raises(ValueError, fail) - class TestWhitespace: - def setup_method(self): - self.A = np.array([['abc ', '123 '], - ['789 ', 'xyz ']]).view(np.char.chararray) - self.B = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - def test1(self): - assert_(np.all(self.A == self.B)) - assert_(np.all(self.A >= self.B)) - assert_(np.all(self.A <= self.B)) - assert_(not np.any(self.A > self.B)) - assert_(not np.any(self.A < self.B)) - assert_(not np.any(self.A != self.B)) + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) class TestChar: - def setup_method(self): - self.A = np.array('abc1', dtype='c').view(np.char.chararray) - def test_it(self): - assert_equal(self.A.shape, (4,)) - assert_equal(self.A.upper()[:2].tobytes(), b'AB') + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') class TestComparisons: - def setup_method(self): - self.A = np.array([['abc', 'abcc', '123'], - ['789', 'abc', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', 'efg', '123 '], - ['051', 'efgg', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) def test_not_equal(self): - assert_array_equal((self.A != self.B), + A, B = self.A(), self.B() + assert_array_equal((A != B), [[True, True, False], [True, True, True]]) def test_equal(self): - assert_array_equal((self.A == self.B), + A, B = self.A(), self.B() + assert_array_equal((A == B), [[False, False, True], [False, False, False]]) def test_greater_equal(self): - assert_array_equal((self.A >= self.B), + A, B = self.A(), self.B() + assert_array_equal((A >= B), [[False, False, True], [True, False, True]]) def test_less_equal(self): - assert_array_equal((self.A <= self.B), + A, B = self.A(), self.B() + assert_array_equal((A <= B), [[True, True, True], [False, True, False]]) def test_greater(self): - assert_array_equal((self.A > self.B), + A, B = self.A(), self.B() + assert_array_equal((A > B), [[False, False, False], [True, False, True]]) def test_less(self): - assert_array_equal((self.A < self.B), + A, B = self.A(), self.B() + assert_array_equal((A < B), [[True, True, False], [False, True, False]]) def test_type(self): - out1 = np.char.equal(self.A, self.B) + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) out2 = np.char.equal('a', 'a') assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) @@ -198,59 +202,56 @@ def test_type(self): class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.B = np.array( + def B(self): + return np.array( [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setup_method(self): - TestComparisons.setup_method(self) - self.A = np.array( + def A(self): + return np.array( [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) class TestInformation: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]) \ - .view(np.char.chararray) - # Array with longer strings, > MEMCHR_CUT_OFF in code. - self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', - '01234567890123456789012345']) - .view(np.char.chararray)) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_len(self): - assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) - assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) - assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) def test_count(self): - assert_(issubclass(self.A.count('').dtype.type, np.integer)) - assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) - assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) # Python doesn't seem to like counting NULL characters - # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) - assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) - # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) def test_endswith(self): - assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) - assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) - assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.endswith('3', 'fdjk') + A.endswith('3', 'fdjk') assert_raises(TypeError, fail) @@ -260,7 +261,7 @@ def fail(): ("S", lambda x: x.encode('ascii')), ]) def test_find(self, dtype, encode): - A = self.A.astype(dtype) + A = self.A().astype(dtype) assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) assert_array_equal(A.find(encode('a')), [[1, -1], [-1, 6], [-1, -1]]) @@ -270,103 +271,119 @@ def test_find(self, dtype, encode): [[1, -1], [-1, -1], [-1, -1]]) assert_array_equal(A.find([encode('1'), encode('P')]), [[-1, -1], [0, -1], [0, 1]]) - C = self.C.astype(dtype) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) assert_array_equal(C.find(encode('M')), [12, -1]) def test_index(self): + A = self.A() def fail(): - self.A.index('a') + A.index('a') assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) def test_isalnum(self): - assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) - assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) def test_isalpha(self): - assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) - assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) def test_isdigit(self): - assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) - assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) def test_islower(self): - assert_(issubclass(self.A.islower().dtype.type, np.bool)) - assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) def test_isspace(self): - assert_(issubclass(self.A.isspace().dtype.type, np.bool)) - assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) def test_istitle(self): - assert_(issubclass(self.A.istitle().dtype.type, np.bool)) - assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) def test_isupper(self): - assert_(issubclass(self.A.isupper().dtype.type, np.bool)) - assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) def test_rfind(self): - assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) - assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) - assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) - assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) - assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) def test_rindex(self): + A = self.A() def fail(): - self.A.rindex('a') + A.rindex('a') assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) def test_startswith(self): - assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) - assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) - assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) def fail(): - self.A.startswith('3', 'fdjk') + A.startswith('3', 'fdjk') assert_raises(TypeError, fail) - class TestMethods: - def setup_method(self): - self.A = np.array([[' abc ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']], - dtype='S').view(np.char.chararray) - self.B = np.array([[' \u03a3 ', ''], - ['12345', 'MixedCase'], - ['123 \t 345 \0 ', 'UPPER']]).view( - np.char.chararray) + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) def test_capitalize(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) - assert_array_equal(self.A.capitalize(), tgt) + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) - assert_array_equal(self.B.capitalize(), tgt) + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) def test_center(self): - assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) - C = self.A.center([10, 20]) + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.center(20, b'#') + C = A.center(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_(np.all(C.endswith(b'#'))) @@ -381,17 +398,17 @@ def test_decode(self): assert_(A.decode('unicode-escape')[0] == '\u03a3') def test_encode(self): - B = self.B.encode('unicode_escape') + B = self.B().encode('unicode_escape') assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) def test_expandtabs(self): - T = self.A.expandtabs() + T = self.A().expandtabs() assert_(T[2, 0] == b'123 345 \0') def test_join(self): # NOTE: list(b'123') == [49, 50, 51] # so that b','.join(b'123') results to an error on Py3 - A0 = self.A.decode('ascii') + A0 = self.A().decode('ascii') A = np.char.join([',', '#'], A0) assert_(issubclass(A.dtype.type, np.str_)) @@ -401,12 +418,13 @@ def test_join(self): assert_array_equal(np.char.join([',', '#'], A0), tgt) def test_ljust(self): - assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) - C = self.A.ljust([10, 20]) + C = A.ljust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.ljust(20, b'#') + C = A.ljust(20, b'#') assert_array_equal(C.startswith(b'#'), [ [False, True], [False, False], [False, False]]) assert_(np.all(C.endswith(b'#'))) @@ -418,38 +436,41 @@ def test_ljust(self): assert_array_equal(C, tgt) def test_lower(self): + A, B = self.A(), self.B() tgt = [[b' abc ', b''], [b'12345', b'mixedcase'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) - assert_array_equal(self.A.lower(), tgt) + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mixedcase'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.lower().dtype.type, np.str_)) - assert_array_equal(self.B.lower(), tgt) + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) def test_lstrip(self): + A, B = self.A(), self.B() tgt = [[b'abc ', b''], [b'12345', b'MixedCase'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) - assert_array_equal(self.A.lstrip(), tgt) + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) tgt = [[b' abc', b''], [b'2345', b'ixedCase'], [b'23 \t 345 \x00', b'UPPER']] - assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + assert_array_equal(A.lstrip([b'1', b'M']), tgt) tgt = [['\u03a3 ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) - assert_array_equal(self.B.lstrip(), tgt) + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) def test_partition(self): - P = self.A.partition([b'3', b'M']) + A = self.A() + P = A.partition([b'3', b'M']) tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] @@ -457,7 +478,8 @@ def test_partition(self): assert_array_equal(P, tgt) def test_replace(self): - R = self.A.replace([b'3', b'a'], + A = self.A() + R = A.replace([b'3', b'a'], [b'##########', b'@']) tgt = [[b' abc ', b''], [b'12##########45', b'MixedC@se'], @@ -466,14 +488,14 @@ def test_replace(self): assert_array_equal(R, tgt) # Test special cases that should just return the input array, # since replacements are not possible or do nothing. - S1 = self.A.replace(b'A very long byte string, longer than A', b'') - assert_array_equal(S1, self.A) - S2 = self.A.replace(b'', b'') - assert_array_equal(S2, self.A) - S3 = self.A.replace(b'3', b'3') - assert_array_equal(S3, self.A) - S4 = self.A.replace(b'3', b'', count=0) - assert_array_equal(S4, self.A) + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) def test_replace_count_and_size(self): a = np.array(['0123456789' * i for i in range(4)] @@ -510,12 +532,13 @@ def test_replace_broadcasting(self): assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) def test_rjust(self): - assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) - C = self.A.rjust([10, 20]) + C = A.rjust([10, 20]) assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) - C = self.A.rjust(20, b'#') + C = A.rjust(20, b'#') assert_(np.all(C.startswith(b'#'))) assert_array_equal(C.endswith(b'#'), [[False, True], [False, False], [False, False]]) @@ -527,7 +550,8 @@ def test_rjust(self): assert_array_equal(C, tgt) def test_rpartition(self): - P = self.A.rpartition([b'3', b'M']) + A = self.A() + P = A.rpartition([b'3', b'M']) tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] @@ -535,7 +559,7 @@ def test_rpartition(self): assert_array_equal(P, tgt) def test_rsplit(self): - A = self.A.rsplit(b'3') + A = self.A().rsplit(b'3') tgt = [[[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] @@ -543,45 +567,47 @@ def test_rsplit(self): assert_equal(A.tolist(), tgt) def test_rstrip(self): - assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) tgt = [[b' abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_array_equal(self.A.rstrip(), tgt) + assert_array_equal(A.rstrip(), tgt) tgt = [[b' abc ', b''], [b'1234', b'MixedCase'], [b'123 \t 345 \x00', b'UPP'] ] - assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) tgt = [[' \u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) - assert_array_equal(self.B.rstrip(), tgt) + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) def test_strip(self): + A, B = self.A(), self.B() tgt = [[b'abc', b''], [b'12345', b'MixedCase'], [b'123 \t 345', b'UPPER']] - assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) - assert_array_equal(self.A.strip(), tgt) + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) tgt = [[b' abc ', b''], [b'234', b'ixedCas'], [b'23 \t 345 \x00', b'UPP']] - assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + assert_array_equal(A.strip([b'15', b'EReM']), tgt) tgt = [['\u03a3', ''], ['12345', 'MixedCase'], ['123 \t 345', 'UPPER']] - assert_(issubclass(self.B.strip().dtype.type, np.str_)) - assert_array_equal(self.B.strip(), tgt) + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) def test_split(self): - A = self.A.split(b'3') + A = self.A().split(b'3') tgt = [ [[b' abc '], [b'']], [[b'12', b'45'], [b'MixedCase']], @@ -596,90 +622,98 @@ def test_splitlines(self): assert_(len(A[0]) == 3) def test_swapcase(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'mIXEDcASE'], [b'123 \t 345 \0 ', b'upper']] - assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) - assert_array_equal(self.A.swapcase(), tgt) + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) tgt = [[' \u03c3 ', ''], ['12345', 'mIXEDcASE'], ['123 \t 345 \0 ', 'upper']] - assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) - assert_array_equal(self.B.swapcase(), tgt) + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) def test_title(self): + A, B = self.A(), self.B() tgt = [[b' Abc ', b''], [b'12345', b'Mixedcase'], [b'123 \t 345 \0 ', b'Upper']] - assert_(issubclass(self.A.title().dtype.type, np.bytes_)) - assert_array_equal(self.A.title(), tgt) + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'Mixedcase'], ['123 \t 345 \0 ', 'Upper']] - assert_(issubclass(self.B.title().dtype.type, np.str_)) - assert_array_equal(self.B.title(), tgt) + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) def test_upper(self): + A, B = self.A(), self.B() tgt = [[b' ABC ', b''], [b'12345', b'MIXEDCASE'], [b'123 \t 345 \0 ', b'UPPER']] - assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) - assert_array_equal(self.A.upper(), tgt) + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) tgt = [[' \u03a3 ', ''], ['12345', 'MIXEDCASE'], ['123 \t 345 \0 ', 'UPPER']] - assert_(issubclass(self.B.upper().dtype.type, np.str_)) - assert_array_equal(self.B.upper(), tgt) + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) def test_isnumeric(self): + A, B = self.A(), self.B() def fail(): - self.A.isnumeric() + A.isnumeric() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) - assert_array_equal(self.B.isnumeric(), [ + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ [False, False], [True, False], [False, False]]) def test_isdecimal(self): + A, B = self.A(), self.B() def fail(): - self.A.isdecimal() + A.isdecimal() assert_raises(TypeError, fail) - assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) - assert_array_equal(self.B.isdecimal(), [ + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) - class TestOperations: - def setup_method(self): - self.A = np.array([['abc', '123'], - ['789', 'xyz']]).view(np.char.chararray) - self.B = np.array([['efg', '456'], - ['051', 'tuv']]).view(np.char.chararray) + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) def test_add(self): + A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], ['789051', 'xyztuv']]).view(np.char.chararray) - assert_array_equal(AB, (self.A + self.B)) - assert_(len((self.A + self.B)[0][0]) == 6) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) def test_radd(self): + A = self.A() QA = np.array([['qabc', 'q123'], ['q789', 'qxyz']]).view(np.char.chararray) - assert_array_equal(QA, ('q' + self.A)) + assert_array_equal(QA, ('q' + A)) def test_mul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (self.A * r)) + assert_array_equal(Ar, (A * r)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -687,11 +721,11 @@ def test_mul(self): A * ob def test_rmul(self): - A = self.A + A = self.A() for r in (2, 3, 5, 7, 197): Ar = np.array([[A[0, 0] * r, A[0, 1] * r], [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) - assert_array_equal(Ar, (r * self.A)) + assert_array_equal(Ar, (r * A)) for ob in [object(), 'qrs']: with assert_raises_regex(ValueError, @@ -716,13 +750,14 @@ def test_mod(self): assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): - assert_(f"{self.A}" == str(self.A)) - assert_(f"{self.A!r}" == repr(self.A)) + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) for ob in [42, object()]: with assert_raises_regex( TypeError, "unsupported operand type.* and 'chararray'"): - ob % self.A + ob % A def test_slice(self): """Regression test for https://github.com/numpy/numpy/issues/5982""" @@ -751,27 +786,21 @@ def test_getitem_length_zero_item(self, data): # or does not have length 0. assert_equal(a[1], a.dtype.type()) - class TestMethodsEmptyArray: - def setup_method(self): - self.U = np.array([], dtype='U') - self.S = np.array([], dtype='S') - def test_encode(self): - res = np.char.encode(self.U) + res = np.char.encode(np.array([], dtype='U')) assert_array_equal(res, []) assert_(res.dtype.char == 'S') def test_decode(self): - res = np.char.decode(self.S) + res = np.char.decode(np.array([], dtype='S')) assert_array_equal(res, []) assert_(res.dtype.char == 'U') def test_decode_with_reshape(self): - res = np.char.decode(self.S.reshape((1, 0, 1))) + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) assert_(res.shape == (1, 0, 1)) - class TestMethodsScalarValues: def test_mod(self): A = np.array([[' abc ', ''], @@ -816,7 +845,6 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') - def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an From f9102115ee4e97a07d8421f7344c688a4ca676c4 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 21 Aug 2025 09:47:03 +0800 Subject: [PATCH 0330/1718] ENH: Enable unit tests for RISC-V CPU dispatcher utilities Signed-off-by: Wang Yang --- numpy/_core/meson.build | 2 +- numpy/_core/tests/test_cpu_dispatcher.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index b4c769810ad8..61a9d53a42d0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -777,7 +777,7 @@ _umath_tests_mtargets = mod_features.multi_targets( AVX2, SSE41, SSE2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, - VXE, VX, + VXE, VX, RVV, ], baseline: CPU_BASELINE, prefix: 'NPY_', diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index fc9d5e3147e0..f665fa6464ef 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -15,7 +15,7 @@ def test_dispatcher(): "SSE2", "SSE41", "AVX2", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", - "VX", "VXE", "LSX" + "VX", "VXE", "LSX", "RVV" ) highest_sfx = "" # no suffix for the baseline all_sfx = [] From 8efb7b76aad6179c9dc052038cb24024be280fa3 Mon Sep 17 00:00:00 2001 From: Hunter Hogan Date: Wed, 20 Aug 2025 22:01:05 -0500 Subject: [PATCH 0331/1718] TYP: ndarray.fill() takes no keyword arguments ```python (.venv) C:\apps\mapFolding>py Python 3.13.7 (tags/v3.13.7:bcee1c3, Aug 14 2025, 14:15:11) [MSC v.1944 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import numpy >>> aa = numpy.zeros((4,3)) >>> aa array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) >>> aa.fill(value=9) Traceback (most recent call last): File "", line 1, in aa.fill(value=9) ~~~~~~~^^^^^^^^^ TypeError: ndarray.fill() takes no keyword arguments >>> aa array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) >>> aa.fill(9) >>> aa array([[9., 9., 9.], [9., 9., 9.], [9., 9., 9.], [9., 9., 9.]]) >>> ``` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1f47f2aae0d7..0982a502f018 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2177,7 +2177,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @strides.setter def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any) -> None: ... + def fill(self, value: Any, /) -> None: ... @property def flat(self) -> flatiter[Self]: ... From cd86711619fa2bf6aa44d9f3bd053c3134e8bc25 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 21 Aug 2025 19:57:58 +0200 Subject: [PATCH 0332/1718] CI: more specific mypy_primer ``on:`` paths --- .github/workflows/mypy_primer.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 544c568a522d..2bfc4197e9b8 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -4,9 +4,13 @@ on: # Only run on PR, since we diff against main pull_request: paths: - - "**/*.pyi" - ".github/workflows/mypy_primer.yml" - ".github/workflows/mypy_primer_comment.yml" + - "numpy/**/*.pyi" + - "numpy/_typing/*.py" + - "numpy/typing/*.py" + - "!numpy/typing/tests/**" + - "numpy/py.typed" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} From f2e65e7c9c69f8aa21094d2b897d680c208fb221 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 21 Aug 2025 20:07:55 +0200 Subject: [PATCH 0333/1718] CI: replace comment-hider acction in mypy_primer workflow --- .github/workflows/mypy_primer_comment.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index be0dda7f7dec..338a53da6a13 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,10 +49,10 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: kanga333/comment-hider@c12bb20b48aeb8fc098e35967de8d4f8018fffdf # v0.4.0 + uses: int128/hide-comment-action@a30d551065e4231e6d7a671bb5ce884f9ee6417b # v1.43.0 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - issue_number: ${{ steps.get-pr-number.outputs.result }} + token: ${{ secrets.GITHUB_TOKEN }} + issue-number: ${{ steps.get-pr-number.outputs.result }} - run: cat diff_*.txt | tee fulldiff.txt From 6e7641976458d2310b151ad8e6ba2be5ee55a53b Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Wed, 13 Aug 2025 22:34:10 +0530 Subject: [PATCH 0334/1718] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 18 ++++++++++++++++++ numpy/_core/src/multiarray/_datetime.h | 1 + 2 files changed, 19 insertions(+) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 2095dc3aea49..9c53d1bbdb1e 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -309,6 +309,24 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== +Converting datetime and timedelta to Python Object +================================================== + +NumPy follows a strict protocol when converting datetime and timedelta to Python Objects (e.g., tuple, list, datetime.datetime). + +For conversion of datetime to a Python Object: + +- For days or coarser, returns a datetime.date. +- For days or coarser, returns a datetime.date. +- For microseconds or coarser, returns a datetime.datetime. +- For units finer than microseconds, returns an integer. + +For conversion of timedelta to Python Object + +- Not-a-time is returned as the string "NaT". +- For microseconds or coarser, returns a datetime.timedelta. +- For units finer than microseconds, returns an integer. + Business day functionality ========================== diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 112c57433094..64284f550af4 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -243,6 +243,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, * Converts a datetime into a PyObject *. * * For days or coarser, returns a datetime.date. + * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. */ From 23de624336554b7783175185699714f0ba3faf04 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Wed, 13 Aug 2025 22:51:35 +0530 Subject: [PATCH 0335/1718] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 2 +- numpy/_core/src/multiarray/_datetime.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9c53d1bbdb1e..d14d99c39b44 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -316,7 +316,7 @@ NumPy follows a strict protocol when converting datetime and timedelta to Python For conversion of datetime to a Python Object: -- For days or coarser, returns a datetime.date. +- Not-a-time is returned as the string "NaT". - For days or coarser, returns a datetime.date. - For microseconds or coarser, returns a datetime.datetime. - For units finer than microseconds, returns an integer. diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 64284f550af4..49745197dfab 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * For days or coarser, returns a datetime.date. + * Not-a-time is returned as the string "NaT". * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. From 19119dc08ea99f038c9490238b185df4b75d6bd5 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 14 Aug 2025 00:53:50 +0530 Subject: [PATCH 0336/1718] DOC: document datetime/timedelta to PyObject conversion --- doc/source/reference/arrays.datetime.rst | 8 +++++--- numpy/_core/src/multiarray/_datetime.h | 6 +++--- numpy/_core/src/multiarray/datetime.c | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index d14d99c39b44..d2d68a26be3c 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -309,6 +309,7 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] as attosecond +/- 9.2 seconds [ 1969 AD, 1970 AD] ======== ================ ======================= ========================== + Converting datetime and timedelta to Python Object ================================================== @@ -316,16 +317,17 @@ NumPy follows a strict protocol when converting datetime and timedelta to Python For conversion of datetime to a Python Object: -- Not-a-time is returned as the string "NaT". +- Not-a-time is returned as None. - For days or coarser, returns a datetime.date. - For microseconds or coarser, returns a datetime.datetime. - For units finer than microseconds, returns an integer. For conversion of timedelta to Python Object -- Not-a-time is returned as the string "NaT". +- Not-a-time is returned as None. - For microseconds or coarser, returns a datetime.timedelta. -- For units finer than microseconds, returns an integer. +- For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + Business day functionality ========================== diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 49745197dfab..7ef2c39023a1 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -253,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 0dac36a0903b..f2f9e6405f9a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2761,7 +2761,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -2945,9 +2945,9 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as the string "NaT". + * Not-a-time is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) From 3cfb1d21c81425c8d2e0a8e7ec27878f1c8f5b09 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 12:07:03 +0530 Subject: [PATCH 0337/1718] DOC: Update under title 'Converting datetime and timedelta to Python Object' --- doc/source/reference/arrays.datetime.rst | 40 ++++++++++++++++++------ 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index d2d68a26be3c..43ff7bf650e3 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,20 +313,40 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting datetime and timedelta to Python Objects (e.g., tuple, list, datetime.datetime). +NumPy follows a strict protocol when converting datetime64 and/or timedelta64 to Python Objects (e.g., tuple, list, datetime.datetime). -For conversion of datetime to a Python Object: +The protocol is described in the following table: -- Not-a-time is returned as None. -- For days or coarser, returns a datetime.date. -- For microseconds or coarser, returns a datetime.datetime. -- For units finer than microseconds, returns an integer. +================================ ================================= ================================== + Input Type for datetime64 for timedelta64 +================================ ================================= ================================== + NaT None None + Finer than Microseconds int int + Microseconds or Coarser datetime.datetime datetime.timedelta + Days or Coarser datetime.date datetime.timedelta +Non-linear(Y/M) and genric units datetime.date int +================================ ================================= ================================== -For conversion of timedelta to Python Object +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.datetime64('NaT').astype(object)) + + + >>> type(np.timedelta64('NaT').astype(object)) + + + >>> type(np.timedelta64(123, 'ns').astype(object)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(object)) + -- Not-a-time is returned as None. -- For microseconds or coarser, returns a datetime.timedelta. -- For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + >>> type(np.timedelta64(10, 'D').astype(object)) + Business day functionality From eac897a2aac3ca0f3ad64b2936a70ccef70beb69 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 12:11:16 +0530 Subject: [PATCH 0338/1718] DOC: Fix ambiguous comments under datetime/timedelta to PyObject conversion --- numpy/_core/src/multiarray/_datetime.h | 6 +++--- numpy/_core/src/multiarray/datetime.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 7ef2c39023a1..829cb7972065 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -242,7 +242,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For days or coarser, returns a datetime.date. * For microseconds or coarser, returns a datetime.datetime. * For units finer than microseconds, returns an integer. @@ -253,9 +253,9 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index f2f9e6405f9a..cd70559dd90a 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2945,9 +2945,9 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, /* * Converts a timedelta into a PyObject *. * - * Not-a-time is returned as None. + * NaT (Not-a-time) is returned as None. * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M/B (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) @@ -2963,7 +2963,7 @@ convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) /* * If the type's precision is greater than microseconds, is - * Y/M/B (nonlinear units), or is generic units, return an int + * Y/M (nonlinear units), or is generic units, return an int */ if (meta->base > NPY_FR_us || meta->base == NPY_FR_Y || From ebaab572235781fad5fc012068ebeea55efeb6e7 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Thu, 21 Aug 2025 22:35:19 +0530 Subject: [PATCH 0339/1718] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 43ff7bf650e3..93ec2d00f5b8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting datetime64 and/or timedelta64 to Python Objects (e.g., tuple, list, datetime.datetime). +NumPy follows a strict protocol when converting ``np.datetime64`` and/or ``np.timedelta64`` to Python Objects (e.g., ``tuple``, ``list``, ``datetime.datetime``). The protocol is described in the following table: ================================ ================================= ================================== - Input Type for datetime64 for timedelta64 + Input Type for ``datetime64`` for ``timedelta64`` ================================ ================================= ================================== - NaT None None - Finer than Microseconds int int - Microseconds or Coarser datetime.datetime datetime.timedelta - Days or Coarser datetime.date datetime.timedelta -Non-linear(Y/M) and genric units datetime.date int + ``NaT`` ``None`` ``None`` + Finer than Microseconds ``int`` ``int`` + Microseconds or Coarser ``datetime.datetime`` ``datetime.timedelta`` + Days or Coarser ``datetime.date`` ``datetime.timedelta`` +Non-linear(Y/M) and genric units ``datetime.date`` ``int`` ================================ ================================= ================================== .. admonition:: Example From 62667abf4f8964b1388aba5beb56a9a7310e17a7 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 01:11:00 +0530 Subject: [PATCH 0340/1718] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 93ec2d00f5b8..9735ccbfa2a4 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting ``np.datetime64`` and/or ``np.timedelta64`` to Python Objects (e.g., ``tuple``, ``list``, ``datetime.datetime``). +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., `tuple`, `list`, `datetime.datetime`). The protocol is described in the following table: ================================ ================================= ================================== - Input Type for ``datetime64`` for ``timedelta64`` + Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - ``NaT`` ``None`` ``None`` - Finer than Microseconds ``int`` ``int`` - Microseconds or Coarser ``datetime.datetime`` ``datetime.timedelta`` - Days or Coarser ``datetime.date`` ``datetime.timedelta`` -Non-linear(Y/M) and genric units ``datetime.date`` ``int`` + `NaT` `None` `None` + Finer than Microseconds `int` `int` + Microseconds or Coarser `datetime.datetime` `datetime.timedelta` + Days or Coarser `datetime.date` `datetime.timedelta` +Non-linear(Y/M) and genric units `datetime.date` `int` ================================ ================================= ================================== .. admonition:: Example From cb016cd1adcd6007943680f1b4165dfebf3d23dd Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 01:31:38 +0530 Subject: [PATCH 0341/1718] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9735ccbfa2a4..a62ec26e00c8 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -313,18 +313,18 @@ us / μs microsecond +/- 2.9e5 years [290301 BC, 294241 AD] Converting datetime and timedelta to Python Object ================================================== -NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., `tuple`, `list`, `datetime.datetime`). +NumPy follows a strict protocol when converting `datetime64` and/or `timedelta64` to Python Objects (e.g., ``tuple``, ``list``, `datetime.datetime`). The protocol is described in the following table: ================================ ================================= ================================== Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - `NaT` `None` `None` - Finer than Microseconds `int` `int` + ``NaT`` ``None`` ``None`` + Finer than Microseconds ``int`` ``int`` Microseconds or Coarser `datetime.datetime` `datetime.timedelta` Days or Coarser `datetime.date` `datetime.timedelta` -Non-linear(Y/M) and genric units `datetime.date` `int` +Non-linear(Y/M) and genric units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example From 835e56c322ac5bcfdf1397f5905222d1b1d22484 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 14 Aug 2025 21:35:59 +0200 Subject: [PATCH 0342/1718] TYP: move a `# type: ignore` so that it actually ignores the error --- numpy/_typing/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 0044749e3dce..0594ae5759b4 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -118,8 +118,8 @@ ) # -from ._nbit_base import ( - NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] _8Bit as _8Bit, _16Bit as _16Bit, _32Bit as _32Bit, From 7216e70e17fe2dc60fb91be9aa8bc78a3e533b2a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 14 Aug 2025 22:27:30 +0200 Subject: [PATCH 0343/1718] TYP: use ``TypeAliasType`` for ``ArrayLike`` and ``DTypeLike`` on py312+ --- numpy/_typing/__init__.py | 18 ++++++++++++++++-- numpy/typing/tests/test_runtime.py | 21 +++++++++++++++++---- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 0594ae5759b4..11ed6e62f1be 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,7 +1,8 @@ """Private counterpart of ``numpy.typing``.""" +import sys + from ._array_like import ( - ArrayLike as ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -84,7 +85,6 @@ # from ._dtype_like import ( - DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, @@ -156,3 +156,17 @@ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, ) + +# wrapping the public aliases in `TypeAliasType` helps with introspection readability +if sys.version_info >= (3, 12): + from typing import TypeAliasType + + from ._array_like import ArrayLike as _ArrayLikeAlias + from ._dtype_like import DTypeLike as _DTypeLikeAlias + + ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) + DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) + +else: + from ._array_like import ArrayLike as ArrayLike + from ._dtype_like import DTypeLike as DTypeLike diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 236952101126..8e49fda8185c 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -22,12 +22,25 @@ class TypeTup(NamedTuple): origin: type | None +def _flatten_type_alias(t: Any) -> Any: + # "flattens" a TypeAliasType to its underlying type alias + return getattr(t, "__value__", t) + + NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), - "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), - "NBitBase": TypeTup(npt.NBitBase, (), None), + "ArrayLike": TypeTup( + _flatten_type_alias(npt.ArrayLike), + _flatten_type_alias(npt.ArrayLike).__args__, + Union, + ), + "DTypeLike": TypeTup( + _flatten_type_alias(npt.DTypeLike), + _flatten_type_alias(npt.DTypeLike).__args__, + Union, + ), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] "NDArray": NDArrayTup, } @@ -68,7 +81,7 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: def func(a: typ_str) -> None: pass out = get_type_hints(func) - ref = {"a": typ, "return": type(None)} + ref = {"a": getattr(npt, str(name)), "return": type(None)} assert out == ref From 6d77e46292b4c1bafebc76a6abbb96448a87ed4a Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Fri, 22 Aug 2025 13:17:16 +1000 Subject: [PATCH 0344/1718] CI: only test pyodide --- .github/workflows/emscripten.yml | 53 ++------------------------------ 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index e7503871e3f5..c5f770c9d785 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,25 +5,6 @@ on: branches: - main - maintenance/** - # Note: this workflow gets triggered on the same schedule as the - # wheels.yml workflow to upload WASM wheels to Anaconda.org. - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" - workflow_dispatch: - inputs: - push_wheels: - # Can be 'true' or 'false'. Default is 'false'. - # Warning: this will overwrite existing wheels. - description: > - Push wheels to Anaconda.org if the build succeeds - required: false - default: 'false' env: FORCE_COLOR: 3 @@ -37,7 +18,7 @@ jobs: build-wasm-emscripten: permissions: contents: read # to fetch code (actions/checkout) - name: Build NumPy distribution for Pyodide + name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' @@ -52,34 +33,4 @@ jobs: - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 env: CIBW_PLATFORM: pyodide - - - name: Upload wheel artifact(s) - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: cp312-pyodide_wasm32 - path: ./wheelhouse/*.whl - if-no-files-found: error - - # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy - # WARNING: this job will overwrite any existing WASM wheels. - upload-wheels: - name: Upload NumPy WASM wheels to Anaconda.org - runs-on: ubuntu-22.04 - permissions: {} - needs: [build-wasm-emscripten] - if: >- - (github.repository == 'numpy/numpy') && - (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || - (github.event_name == 'schedule') - steps: - - name: Download wheel artifact(s) - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: wheelhouse/ - merge-multiple: true - - - name: Push to Anaconda PyPI index - uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # v0.6.2 - with: - artifacts_path: wheelhouse/ - anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + CIBW_BUILD: cp312-* From 89dd7c593d6e21b66b70991fe5136fed292277d0 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:15:30 +0530 Subject: [PATCH 0345/1718] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 40 ++++++++++++++++++------ 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index a62ec26e00c8..59e11fa91f5a 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -320,11 +320,12 @@ The protocol is described in the following table: ================================ ================================= ================================== Input Type for `datetime64` for `timedelta64` ================================ ================================= ================================== - ``NaT`` ``None`` ``None`` - Finer than Microseconds ``int`` ``int`` - Microseconds or Coarser `datetime.datetime` `datetime.timedelta` - Days or Coarser `datetime.date` `datetime.timedelta` -Non-linear(Y/M) and genric units `datetime.date` ``int`` + ``NaT`` ``None`` ``None`` + ns/ps/fs/as ``int`` ``int`` + μs/ms/s/m/h `datetime.datetime` `datetime.timedelta` + D/W (Linear units) `datetime.date` `datetime.timedelta` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example @@ -333,22 +334,41 @@ Non-linear(Y/M) and genric units `datetime.date` >>> import numpy as np - >>> type(np.datetime64('NaT').astype(object)) + >>> type(np.datetime64('NaT').item()) - >>> type(np.timedelta64('NaT').astype(object)) + >>> type(np.timedelta64('NaT').item()) - >>> type(np.timedelta64(123, 'ns').astype(object)) + >>> type(np.timedelta64(123, 'ns').item()) - >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(object)) + >>> type(np.datetime64('2025-01-01T12:00:00.123456').item()) - >>> type(np.timedelta64(10, 'D').astype(object)) + >>> type(np.timedelta64(10, 'D').item()) +In the casea where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> type(np.timedelta64(1, 'D').astype(int)) + + + >>> type(np.datetime64('2025-01-01T12:00:00.123456').astype(float)) + + + >>> type(np.timedelta64(123, 'ns').astype(str)) + + + Business day functionality ========================== From 0bb2bf80da0a32a4969b1b4300711fecb6adc65a Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:15:57 +0530 Subject: [PATCH 0346/1718] DOC: numpy/_core/src/multiarray/_datetime.h --- numpy/_core/src/multiarray/_datetime.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 829cb7972065..5261e8232a08 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -243,9 +243,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, * Converts a datetime into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); @@ -254,8 +254,8 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta); * Converts a timedelta into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta); From da71accb4afcd80ed2eecf07839716764808c002 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 12:16:21 +0530 Subject: [PATCH 0347/1718] DOC: numpy/_core/src/multiarray/datetime.c --- numpy/_core/src/multiarray/datetime.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index cd70559dd90a..3c163a34fbdf 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2761,10 +2761,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* * Converts a datetime into a PyObject *. * - * Not-a-time is returned as None. - * For days or coarser, returns a datetime.date. - * For microseconds or coarser, returns a datetime.datetime. - * For units finer than microseconds, returns an integer. + * NaT (Not-a-time) is returned as None. + * For D/W/Y/M (days or coarser), returns a datetime.date. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.datetime. + * For ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) @@ -2946,8 +2946,8 @@ convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, * Converts a timedelta into a PyObject *. * * NaT (Not-a-time) is returned as None. - * For microseconds or coarser, returns a datetime.timedelta. - * For Y/M (nonlinear units), generic units and units finer than microseconds, returns an integer. + * For μs/ms/s/m/h/D/W (microseconds or coarser), returns a datetime.timedelta. + * For Y/M (non-linear units), generic units and ns/ps/fs/as (units shorter than microseconds), returns an integer. */ NPY_NO_EXPORT PyObject * convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) From 398e38e6bea79df7276749228fead40f29f5a092 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Fri, 22 Aug 2025 12:50:02 +0100 Subject: [PATCH 0348/1718] Merge pull request #29603 from MarcoGorelli/typ-lib-defaults --- numpy/lib/_array_utils_impl.pyi | 6 +- numpy/lib/_arraypad_impl.pyi | 20 +- numpy/lib/_function_base_impl.pyi | 582 +++++++++++++++--------------- numpy/lib/_histograms_impl.pyi | 22 +- numpy/lib/_index_tricks_impl.pyi | 4 +- numpy/lib/_npyio_impl.pyi | 132 +++---- numpy/lib/_polynomial_impl.pyi | 76 ++-- numpy/lib/_shape_base_impl.pyi | 10 +- numpy/lib/_stride_tricks_impl.pyi | 34 +- numpy/lib/_twodim_base_impl.pyi | 198 +++++----- numpy/lib/_ufunclike_impl.pyi | 14 +- 11 files changed, 549 insertions(+), 549 deletions(-) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index d3e0714773f2..db9ef852ba57 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -14,9 +14,9 @@ def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... def normalize_axis_tuple( axis: int | Iterable[int], - ndim: int = ..., - argname: str | None = ..., - allow_duplicate: bool | None = ..., + ndim: int, + argname: str | None = None, + allow_duplicate: bool | None = False, ) -> tuple[int, int]: ... def normalize_axis_index( diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index cfabdecf669e..e7aacea43254 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -54,23 +54,23 @@ _PadWidth: TypeAlias = ( def pad( array: _ArrayLike[_ScalarT], pad_width: _PadWidth, - mode: _ModeKind = ..., + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[_ScalarT]: ... @overload def pad( array: ArrayLike, pad_width: _PadWidth, - mode: _ModeKind = ..., + mode: _ModeKind = "constant", *, - stat_length: _ArrayLikeInt | None = ..., - constant_values: ArrayLike = ..., - end_values: ArrayLike = ..., - reflect_type: L["odd", "even"] = ..., + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload def pad( diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 41d6204cd684..bee5da4c58ae 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -122,24 +122,24 @@ class _TrimZerosSequence(Protocol[_T_co]): @overload def rot90( m: _ArrayLike[_ScalarT], - k: int = ..., + k: int = 1, axes: tuple[int, int] = ..., ) -> NDArray[_ScalarT]: ... @overload def rot90( m: ArrayLike, - k: int = ..., + k: int = 1, axes: tuple[int, int] = ..., ) -> NDArray[Any]: ... @overload -def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ... +def flip(m: _ScalarT, axis: None = None) -> _ScalarT: ... @overload -def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +def flip(m: _ScalarLike_co, axis: None = None) -> Any: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload -def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... def iterable(y: object) -> TypeIs[Iterable[Any]]: ... @@ -201,26 +201,26 @@ def average( @overload def asarray_chkfinite( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., + dtype: None = None, + order: _OrderKACF = None, ) -> NDArray[_ScalarT]: ... @overload def asarray_chkfinite( a: object, - dtype: None = ..., - order: _OrderKACF = ..., + dtype: None = None, + order: _OrderKACF = None, ) -> NDArray[Any]: ... @overload def asarray_chkfinite( a: Any, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> NDArray[_ScalarT]: ... @overload def asarray_chkfinite( a: Any, dtype: DTypeLike, - order: _OrderKACF = ..., + order: _OrderKACF = None, ) -> NDArray[Any]: ... @overload @@ -251,7 +251,7 @@ def piecewise( def select( condlist: Sequence[ArrayLike], choicelist: Sequence[ArrayLike], - default: ArrayLike = ..., + default: ArrayLike = 0, ) -> NDArray[Any]: ... @overload @@ -263,43 +263,43 @@ def copy( @overload def copy( a: _ArrayT, - order: _OrderKACF = ..., + order: _OrderKACF = "K", *, subok: L[True], ) -> _ArrayT: ... @overload def copy( a: _ArrayLike[_ScalarT], - order: _OrderKACF = ..., - subok: L[False] = ..., + order: _OrderKACF = "K", + subok: L[False] = False, ) -> NDArray[_ScalarT]: ... @overload def copy( a: ArrayLike, - order: _OrderKACF = ..., - subok: L[False] = ..., + order: _OrderKACF = "K", + subok: L[False] = False, ) -> NDArray[Any]: ... def gradient( f: ArrayLike, *varargs: ArrayLike, - axis: _ShapeLike | None = ..., - edge_order: L[1, 2] = ..., + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, ) -> Any: ... @overload def diff( a: _T, n: L[0], - axis: SupportsIndex = ..., + axis: SupportsIndex = -1, prepend: ArrayLike = ..., append: ArrayLike = ..., ) -> _T: ... @overload def diff( a: ArrayLike, - n: int = ..., - axis: SupportsIndex = ..., + n: int = 1, + axis: SupportsIndex = -1, prepend: ArrayLike = ..., append: ArrayLike = ..., ) -> NDArray[Any]: ... @@ -387,27 +387,27 @@ def interp( ) -> NDArray[complex128 | float64] | complex128 | float64: ... @overload -def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ... +def angle(z: _ComplexLike_co, deg: bool = False) -> floating: ... @overload -def angle(z: object_, deg: bool = ...) -> Any: ... +def angle(z: object_, deg: bool = False) -> Any: ... @overload -def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ... +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[floating]: ... @overload -def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... +def angle(z: _ArrayLikeObject_co, deg: bool = False) -> NDArray[object_]: ... @overload def unwrap( p: _ArrayLikeFloat_co, - discont: float | None = ..., - axis: int = ..., + discont: float | None = None, + axis: int = -1, *, period: float = ..., ) -> NDArray[floating]: ... @overload def unwrap( p: _ArrayLikeObject_co, - discont: float | None = ..., - axis: int = ..., + discont: float | None = None, + axis: int = -1, *, period: float = ..., ) -> NDArray[object_]: ... @@ -416,7 +416,7 @@ def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... def trim_zeros( filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = ..., + trim: L["f", "b", "fb", "bf"] = "fb", ) -> _T: ... @overload @@ -429,48 +429,48 @@ def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... @overload def cov( m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, - dtype: None = ..., + dtype: None = None, ) -> NDArray[floating]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, - dtype: None = ..., + dtype: None = None, ) -> NDArray[complexfloating]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... @overload def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = ..., - rowvar: bool = ..., - bias: bool = ..., - ddof: SupportsIndex | SupportsInt | None = ..., - fweights: ArrayLike | None = ..., - aweights: ArrayLike | None = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: ArrayLike | None = None, + aweights: ArrayLike | None = None, *, dtype: DTypeLike, ) -> NDArray[Any]: ... @@ -544,59 +544,59 @@ def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload def median( a: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> floating: ... @overload def median( a: _ArrayLikeComplex_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> complexfloating: ... @overload def median( a: _ArrayLikeTD64_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> timedelta64: ... @overload def median( a: _ArrayLikeObject_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, ) -> Any: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, ) -> Any: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + keepdims: bool = False, ) -> _ArrayT: ... @overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + keepdims: bool = False, ) -> _ArrayT: ... _MethodKind = L[ @@ -620,133 +620,133 @@ _MethodKind = L[ def percentile( a: _ArrayLikeFloat_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> floating: ... @overload def percentile( a: _ArrayLikeComplex_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> complexfloating: ... @overload def percentile( a: _ArrayLikeTD64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> timedelta64: ... @overload def percentile( a: _ArrayLikeDT64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> datetime64: ... @overload def percentile( a: _ArrayLikeObject_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def percentile( a: _ArrayLikeFloat_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def percentile( a: _ArrayLikeComplex_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[complexfloating]: ... @overload def percentile( a: _ArrayLikeTD64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[timedelta64]: ... @overload def percentile( a: _ArrayLikeDT64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[datetime64]: ... @overload def percentile( a: _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[object_]: ... @overload def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def percentile( @@ -754,23 +754,23 @@ def percentile( q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... @overload def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., - weights: _ArrayLikeFloat_co | None = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -778,133 +778,133 @@ def percentile( def quantile( a: _ArrayLikeFloat_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> floating: ... @overload def quantile( a: _ArrayLikeComplex_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> complexfloating: ... @overload def quantile( a: _ArrayLikeTD64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> timedelta64: ... @overload def quantile( a: _ArrayLikeDT64_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> datetime64: ... @overload def quantile( a: _ArrayLikeObject_co, q: _FloatLike_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def quantile( a: _ArrayLikeFloat_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def quantile( a: _ArrayLikeComplex_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[complexfloating]: ... @overload def quantile( a: _ArrayLikeTD64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[timedelta64]: ... @overload def quantile( a: _ArrayLikeDT64_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[datetime64]: ... @overload def quantile( a: _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: L[False] = ..., + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[object_]: ... @overload def quantile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., - out: None = ..., - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def quantile( @@ -912,23 +912,23 @@ def quantile( q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, *, - weights: _ArrayLikeFloat_co | None = ..., + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... @overload def quantile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: _ShapeLike | None = ..., + axis: _ShapeLike | None = None, *, out: _ArrayT, - overwrite_input: bool = ..., - method: _MethodKind = ..., - keepdims: bool = ..., - weights: _ArrayLikeFloat_co | None = ..., + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... _ScalarT_fm = TypeVar( @@ -942,51 +942,51 @@ class _SupportsRMulFloat(Protocol[_T_co]): @overload def trapezoid( # type: ignore[overload-overlap] y: Sequence[_FloatLike_co], - x: Sequence[_FloatLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_FloatLike_co] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float64: ... @overload def trapezoid( y: Sequence[_ComplexLike_co], - x: Sequence[_ComplexLike_co] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_ComplexLike_co] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> complex128: ... @overload def trapezoid( y: _ArrayLike[bool_ | integer], - x: _ArrayLike[bool_ | integer] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLike[bool_ | integer] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float64 | NDArray[float64]: ... @overload def trapezoid( # type: ignore[overload-overlap] y: _ArrayLikeObject_co, - x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> float | NDArray[object_]: ... @overload def trapezoid( y: _ArrayLike[_ScalarT_fm], - x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... @overload def trapezoid( y: Sequence[_SupportsRMulFloat[_T]], - x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> _T: ... @overload def trapezoid( y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., - dx: float = ..., - axis: SupportsIndex = ..., + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, ) -> ( floating | complexfloating | timedelta64 | NDArray[floating | complexfloating | timedelta64 | object_] @@ -998,27 +998,27 @@ def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = @overload def meshgrid( *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[()]: ... @overload def meshgrid( x1: _ArrayLike[_ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT]]: ... @overload def meshgrid( x1: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any]]: ... @overload def meshgrid( @@ -1026,9 +1026,9 @@ def meshgrid( x2: _ArrayLike[_ScalarT2], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... @overload def meshgrid( @@ -1036,9 +1036,9 @@ def meshgrid( x2: _ArrayLike[_ScalarT], /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... @overload def meshgrid( @@ -1046,9 +1046,9 @@ def meshgrid( x2: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... @overload def meshgrid( @@ -1056,9 +1056,9 @@ def meshgrid( x2: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( @@ -1067,9 +1067,9 @@ def meshgrid( x3: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( @@ -1079,29 +1079,29 @@ def meshgrid( x4: ArrayLike, /, *, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... @overload def meshgrid( *xi: ArrayLike, - copy: bool = ..., - sparse: bool = ..., - indexing: _MeshgridIdx = ..., + copy: bool = True, + sparse: bool = False, + indexing: _MeshgridIdx = "xy", ) -> tuple[NDArray[Any], ...]: ... @overload def delete( arr: _ArrayLike[_ScalarT], obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[_ScalarT]: ... @overload def delete( arr: ArrayLike, obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... @overload @@ -1109,31 +1109,31 @@ def insert( arr: _ArrayLike[_ScalarT], obj: slice | _ArrayLikeInt_co, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[_ScalarT]: ... @overload def insert( arr: ArrayLike, obj: slice | _ArrayLikeInt_co, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... def append( arr: ArrayLike, values: ArrayLike, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, ) -> NDArray[Any]: ... @overload def digitize( x: _FloatLike_co, bins: _ArrayLikeFloat_co, - right: bool = ..., + right: bool = False, ) -> intp: ... @overload def digitize( x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, - right: bool = ..., + right: bool = False, ) -> NDArray[intp]: ... diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 06987dd71e6b..7cb70ef21508 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -18,23 +18,23 @@ _BinKind: TypeAlias = L[ def histogram_bin_edges( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: ArrayLike | None = None, ) -> NDArray[Any]: ... def histogram( a: ArrayLike, - bins: _BinKind | SupportsIndex | ArrayLike = ..., - range: tuple[float, float] | None = ..., - density: bool = ..., - weights: ArrayLike | None = ..., + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + density: bool = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, - bins: SupportsIndex | ArrayLike = ..., - range: Sequence[tuple[float, float]] = ..., - density: bool | None = ..., - weights: ArrayLike | None = ..., + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[tuple[float, float]] = None, + density: bool | None = None, + weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 208a8e868b48..c9ee8a5b0bb7 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -187,10 +187,10 @@ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ... def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... # -def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... # -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... # diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 253fcb0fdf9e..18fce91382ab 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -205,88 +205,88 @@ def fromregex( def genfromtxt( fname: _FName, dtype: None = None, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( fname: _FName, dtype: _DTypeLike[_ScalarT], - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def genfromtxt( fname: _FName, dtype: DTypeLike, - comments: str = ..., - delimiter: str | int | Iterable[int] | None = ..., - skip_header: int = ..., - skip_footer: int = ..., - converters: Mapping[int | str, Callable[[str], Any]] | None = ..., - missing_values: Any = ..., - filling_values: Any = ..., - usecols: Sequence[int] | None = ..., - names: L[True] | str | Collection[str] | None = ..., - excludelist: Sequence[str] | None = ..., + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, deletechars: str = ..., - replace_space: str = ..., - autostrip: bool = ..., - case_sensitive: bool | L["upper", "lower"] = ..., - defaultfmt: str = ..., - unpack: bool | None = ..., - usemask: bool = ..., - loose: bool = ..., - invalid_raise: bool = ..., - max_rows: int | None = ..., - encoding: str = ..., + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, *, - ndmin: L[0, 1, 2] = ..., - like: _SupportsArrayFunc | None = ..., + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 3b0eade1399e..0cdab5f11b5c 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -67,47 +67,47 @@ def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... @overload def polyint( p: poly1d, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = None, ) -> poly1d: ... @overload def polyint( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeFloat_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeFloat_co | None = None, ) -> NDArray[floating]: ... @overload def polyint( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeComplex_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | None = None, ) -> NDArray[complexfloating]: ... @overload def polyint( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., - k: _ArrayLikeObject_co | None = ..., + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeObject_co | None = None, ) -> NDArray[object_]: ... @overload def polyder( p: poly1d, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> poly1d: ... @overload def polyder( p: _ArrayLikeFloat_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[floating]: ... @overload def polyder( p: _ArrayLikeComplex_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[complexfloating]: ... @overload def polyder( p: _ArrayLikeObject_co, - m: SupportsInt | SupportsIndex = ..., + m: SupportsInt | SupportsIndex = 1, ) -> NDArray[object_]: ... @overload @@ -115,40 +115,40 @@ def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[float64]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[False] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, ) -> NDArray[complex128]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[True, "unscaled"] = False, ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., - full: L[False] = ..., - w: _ArrayLikeFloat_co | None = ..., - cov: L[True, "unscaled"] = ..., + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[True, "unscaled"] = False, ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( @@ -157,19 +157,19 @@ def polyfit( deg: SupportsIndex | SupportsInt, rcond: float | None, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload def polyfit( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., + rcond: float | None = None, *, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[float64]]: ... @overload def polyfit( @@ -178,19 +178,19 @@ def polyfit( deg: SupportsIndex | SupportsInt, rcond: float | None, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload def polyfit( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: SupportsIndex | SupportsInt, - rcond: float | None = ..., + rcond: float | None = None, *, full: L[True], - w: _ArrayLikeFloat_co | None = ..., - cov: bool | L["unscaled"] = ..., + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, ) -> _5Tup[NDArray[complex128]]: ... @overload diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 0206d95109fa..f8ba31d0f774 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -79,7 +79,7 @@ class _SupportsArrayWrap(Protocol): def take_along_axis( arr: _ScalarT | NDArray[_ScalarT], indices: NDArray[integer], - axis: int | None = ..., + axis: int | None = -1, ) -> NDArray[_ScalarT]: ... def put_along_axis( @@ -147,26 +147,26 @@ def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... def array_split( ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[_ScalarT]]: ... @overload def array_split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... @overload def split( ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[_ScalarT]]: ... @overload def split( ary: ArrayLike, indices_or_sections: _ShapeLike, - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... @overload diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index a7005d702d96..51d377e06a18 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -20,55 +20,55 @@ class DummyArray: @overload def as_strided( x: _ArrayLike[_ScalarT], - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, ) -> NDArray[_ScalarT]: ... @overload def as_strided( x: ArrayLike, - shape: Iterable[int] | None = ..., - strides: Iterable[int] | None = ..., - subok: bool = ..., - writeable: bool = ..., + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, ) -> NDArray[Any]: ... @overload def sliding_window_view( x: _ArrayLike[_ScalarT], window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[_ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = None, *, - subok: bool = ..., - writeable: bool = ..., + subok: bool = False, + writeable: bool = False, ) -> NDArray[Any]: ... @overload def broadcast_to( array: _ArrayLike[_ScalarT], shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[_ScalarT]: ... @overload def broadcast_to( array: ArrayLike, shape: int | Iterable[int], - subok: bool = ..., + subok: bool = False, ) -> NDArray[Any]: ... def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... def broadcast_arrays( *args: ArrayLike, - subok: bool = ..., + subok: bool = False, ) -> tuple[NDArray[Any], ...]: ... diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 9e70d0a617f6..2c4de938444f 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -84,13 +84,13 @@ def flipud(m: ArrayLike) -> NDArray[Any]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def eye( @@ -98,52 +98,52 @@ def eye( M: int | None, k: int, dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, dtype: _DTypeLike[_ScalarT], - order: _OrderCF = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + order: _OrderCF = "C", + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def eye( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., - order: _OrderCF = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike = ..., # = float + order: _OrderCF = "C", *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: None = ..., + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[float64]: ... @overload def tri( @@ -152,25 +152,25 @@ def tri( k: int, dtype: _DTypeLike[_ScalarT], *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[_ScalarT]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., + M: int | None = None, + k: int = 0, *, dtype: _DTypeLike[_ScalarT], - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[_ScalarT]: ... @overload def tri( N: int, - M: int | None = ..., - k: int = ..., - dtype: DTypeLike = ..., + M: int | None = None, + k: int = 0, + dtype: DTypeLike = ..., # = float *, - like: _SupportsArrayFunc | None = ... + like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... @overload @@ -186,36 +186,36 @@ def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def vander( # type: ignore[misc] x: _ArrayLikeInt_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[signedinteger]: ... @overload def vander( # type: ignore[misc] x: _ArrayLikeFloat_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[floating]: ... @overload def vander( x: _ArrayLikeComplex_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[complexfloating]: ... @overload def vander( x: _ArrayLikeObject_co, - N: int | None = ..., - increasing: bool = ..., + N: int | None = None, + increasing: bool = False, ) -> NDArray[object_]: ... @overload def histogram2d( x: _ArrayLike1D[_ComplexFloatingT], y: _ArrayLike1D[_ComplexFloatingT | _Float_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_ComplexFloatingT], @@ -225,10 +225,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_ComplexFloatingT | _Float_co], y: _ArrayLike1D[_ComplexFloatingT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_ComplexFloatingT], @@ -238,10 +238,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_InexactT], y: _ArrayLike1D[_InexactT | _Int_co], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_InexactT], @@ -251,10 +251,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1D[_InexactT | _Int_co], y: _ArrayLike1D[_InexactT], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_InexactT], @@ -264,10 +264,10 @@ def histogram2d( def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[float64], @@ -277,10 +277,10 @@ def histogram2d( def histogram2d( x: Sequence[complex], y: Sequence[complex], - bins: int | Sequence[int] = ..., - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[complex128 | float64], @@ -291,9 +291,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT], @@ -304,9 +304,9 @@ def histogram2d( x: _ArrayLike1D[_InexactT], y: _ArrayLike1D[_InexactT], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | _InexactT], @@ -317,9 +317,9 @@ def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | float64], @@ -330,9 +330,9 @@ def histogram2d( x: Sequence[complex], y: Sequence[complex], bins: Sequence[_ArrayLike1D[_NumberCoT] | int], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[_NumberCoT | complex128 | float64], @@ -343,9 +343,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[bool]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.bool], @@ -356,9 +356,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[int]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.int_ | np.bool], @@ -369,9 +369,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[float]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.float64 | np.int_ | np.bool], @@ -382,9 +382,9 @@ def histogram2d( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, bins: Sequence[Sequence[complex]], - range: _ArrayLike2DFloat_co | None = ..., - density: bool | None = ..., - weights: _ArrayLike1DFloat_co | None = ..., + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, ) -> tuple[ NDArray[float64], NDArray[np.complex128 | np.float64 | np.int_ | np.bool], @@ -398,7 +398,7 @@ def histogram2d( def mask_indices( n: int, mask_func: _MaskFunc[int], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[intp], NDArray[intp]]: ... @overload def mask_indices( @@ -409,22 +409,22 @@ def mask_indices( def tril_indices( n: int, - k: int = ..., - m: int | None = ..., + k: int = 0, + m: int | None = None, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def tril_indices_from( arr: NDArray[Any], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices( n: int, - k: int = ..., - m: int | None = ..., + k: int = 0, + m: int | None = None, ) -> tuple[NDArray[int_], NDArray[int_]]: ... def triu_indices_from( arr: NDArray[Any], - k: int = ..., + k: int = 0, ) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index a673f05c010d..d5c3151fb892 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -16,17 +16,17 @@ _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) @overload def fix( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> floating: ... @overload def fix( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[floating]: ... @overload def fix( x: _ArrayLikeObject_co, - out: None = ..., + out: None = None, ) -> NDArray[object_]: ... @overload def fix( @@ -37,12 +37,12 @@ def fix( @overload def isposinf( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def isposinf( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def isposinf( @@ -53,12 +53,12 @@ def isposinf( @overload def isneginf( # type: ignore[misc] x: _FloatLike_co, - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def isneginf( x: _ArrayLikeFloat_co, - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def isneginf( From 71eebaf8513dfd6ed40b0c689702d81e3f6f2527 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 22 Aug 2025 15:49:05 +0200 Subject: [PATCH 0349/1718] BUG: make round consistently return a copy (#29137) * BUG: make round consistently return a copy Otherwise, `round` returns a view for integer arguments and a copy otherwise. All other "rounding" functions (ceil, floor, trunc, rint), always return copies. Thus, make `round` consistent with them. * DOC: add a release note snippet for the `round` change * TST: add a test for round preserving the order * MAINT: address review comments * BUG: round: NPY_KEEPORDER for complex arrays, too --- .../upcoming_changes/29137.compatibility.rst | 3 +++ numpy/_core/src/multiarray/calculation.c | 5 ++--- numpy/_core/tests/test_multiarray.py | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 doc/release/upcoming_changes/29137.compatibility.rst diff --git a/doc/release/upcoming_changes/29137.compatibility.rst b/doc/release/upcoming_changes/29137.compatibility.rst new file mode 100644 index 000000000000..3ac9da2a4c48 --- /dev/null +++ b/doc/release/upcoming_changes/29137.compatibility.rst @@ -0,0 +1,3 @@ +* `numpy.round` now always returns a copy. Previously, it returned a view + for integer inputs for ``decimals >= 0`` and a copy in all other cases. + This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 87f03a94fa5f..0d855281d57a 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -576,7 +576,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) Py_INCREF(arr); } else { - arr = PyArray_Copy(a); + arr = PyArray_NewCopy(a, NPY_KEEPORDER); if (arr == NULL) { return NULL; } @@ -637,8 +637,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) return (PyObject *)out; } else { - Py_INCREF(a); - return (PyObject *)a; + return PyArray_NewCopy(a, NPY_KEEPORDER); } } if (decimals == 0) { diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index da4eeb91cfc2..d5aac78c4a4d 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2153,6 +2153,7 @@ def check_round(arr, expected, *round_args): assert_equal(out, expected) assert out is res + check_round(np.array([1, 2, 3]), [1, 2, 3]) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) @@ -2161,6 +2162,20 @@ def check_round(arr, expected, *round_args): check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) From 0034f453d126bd7e29ac0eb055013cead6ebb537 Mon Sep 17 00:00:00 2001 From: Mohit Deoli <141480197+devmt04@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:39:33 +0530 Subject: [PATCH 0350/1718] Update doc/source/reference/arrays.datetime.rst Co-authored-by: Joren Hammudoglu --- doc/source/reference/arrays.datetime.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 59e11fa91f5a..895d725c1f28 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -350,7 +350,7 @@ The protocol is described in the following table: -In the casea where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. +In the case where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. .. admonition:: Example From f5234ec0f62850b0944ef1429e7ef93e3f557c01 Mon Sep 17 00:00:00 2001 From: Mohit Deoli Date: Fri, 22 Aug 2025 21:10:25 +0530 Subject: [PATCH 0351/1718] DOC: Update doc/source/reference/arrays.datetime.rst --- doc/source/reference/arrays.datetime.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 895d725c1f28..2235f92e0bab 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -324,8 +324,8 @@ The protocol is described in the following table: ns/ps/fs/as ``int`` ``int`` μs/ms/s/m/h `datetime.datetime` `datetime.timedelta` D/W (Linear units) `datetime.date` `datetime.timedelta` - Y/M (Non-linear units) `datetime.date` ``int`` - Generic units `datetime.date` ``int`` + Y/M (Non-linear units) `datetime.date` ``int`` + Generic units `datetime.date` ``int`` ================================ ================================= ================================== .. admonition:: Example From 1aa051052ca0d8b4d3350d19a0d99a8091454ec3 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 22 Aug 2025 12:05:07 -0400 Subject: [PATCH 0352/1718] TST: Replace xunit setup with methods (#29605) --- numpy/_core/tests/test_half.py | 98 ++++++++++++++------------ numpy/_core/tests/test_indexing.py | 54 +++++++------- numpy/_core/tests/test_numerictypes.py | 9 ++- numpy/_core/tests/test_records.py | 18 ++--- 4 files changed, 97 insertions(+), 82 deletions(-) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 711c13655b7a..ef13c5fbcde6 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -18,56 +18,63 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs): f"Did not raise floating point {strmatch} error") class TestHalf: - def setup_method(self): + def _create_arrays_all(self): # An array of all possible float16 values - self.all_f16 = np.arange(0x10000, dtype=uint16) - self.all_f16 = self.all_f16.view(float16) + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): - self.all_f32 = np.array(self.all_f16, dtype=float32) - self.all_f64 = np.array(self.all_f16, dtype=float64) + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + def _create_arrays_nonan(self): # An array of all non-NaN float16 values, in sorted order - self.nonan_f16 = np.concatenate( + nonan_f16 = np.concatenate( (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), np.arange(0x0000, 0x7c01, 1, dtype=uint16))) - self.nonan_f16 = self.nonan_f16.view(float16) - self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) - self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) - - # An array of all finite float16 values, in sorted order - self.finite_f16 = self.nonan_f16[1:-1] - self.finite_f32 = self.nonan_f32[1:-1] - self.finite_f64 = self.nonan_f64[1:-1] + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 def test_half_conversions(self): """Checks that all 16-bit values survive conversion to/from 32-bit and 64-bit float""" # Because the underlying routines preserve the NaN bits, every # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() # Convert from float32 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f32, dtype=float16) + b = np.array(all_f32, dtype=float16) # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert from float64 back to float16 with np.errstate(invalid='ignore'): - b = np.array(self.all_f64, dtype=float16) + b = np.array(all_f64, dtype=float16) b_nn = b == b - assert_equal(self.all_f16[b_nn].view(dtype=uint16), + assert_equal(all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) # Convert float16 to longdouble and back # This doesn't necessarily preserve the extra NaN bits, # so exclude NaNs. - a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + a_ld = np.array(nonan_f16, dtype=np.longdouble) b = np.array(a_ld, dtype=float16) - assert_equal(self.nonan_f16.view(dtype=uint16), + assert_equal(nonan_f16.view(dtype=uint16), b.view(dtype=uint16)) # Check the range for which all integers can be represented @@ -171,34 +178,35 @@ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): assert larger_value.astype(np.float16) == smallest_value def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() with np.errstate(all='ignore'): # Check some of the ufuncs - assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) - assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) - assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) - assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) assert_equal(np.spacing(float16(65504)), np.inf) # Check comparisons of all values with NaN nan = float16(np.nan) - assert_(not (self.all_f16 == nan).any()) - assert_(not (nan == self.all_f16).any()) + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) - assert_((self.all_f16 != nan).all()) - assert_((nan != self.all_f16).all()) + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) - assert_(not (self.all_f16 < nan).any()) - assert_(not (nan < self.all_f16).any()) + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) - assert_(not (self.all_f16 <= nan).any()) - assert_(not (nan <= self.all_f16).any()) + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) - assert_(not (self.all_f16 > nan).any()) - assert_(not (nan > self.all_f16).any()) + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) - assert_(not (self.all_f16 >= nan).any()) - assert_(not (nan >= self.all_f16).any()) + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) def test_half_values(self): """Confirms a small number of known half values""" @@ -255,9 +263,10 @@ def test_half_rounding(self): def test_half_correctness(self): """Take every finite float16, and check the casting functions with a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() # Create an array of all finite float16s - a_bits = self.finite_f16.view(dtype=uint16) + a_bits = finite_f16.view(dtype=uint16) # Convert to 64-bit float manually a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) @@ -270,29 +279,30 @@ def test_half_correctness(self): a_manual = a_sgn * a_man * 2.0**a_exp - a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + a32_fail = np.nonzero(finite_f32 != a_manual)[0] if len(a32_fail) != 0: bad_index = a32_fail[0] - assert_equal(self.finite_f32, a_manual, + assert_equal(finite_f32, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f32[bad_index], + finite_f32[bad_index], a_manual[bad_index])) - a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] - assert_equal(self.finite_f64, a_manual, + assert_equal(finite_f64, a_manual, "First non-equal is half value 0x%x -> %g != %g" % (a_bits[bad_index], - self.finite_f64[bad_index], + finite_f64[bad_index], a_manual[bad_index])) def test_half_ordering(self): """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() # All non-NaN float16 values in reverse order - a = self.nonan_f16[::-1].copy() + a = nonan_f16[::-1].copy() # 32-bit float copy b = np.array(a, dtype=float32) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index c65468ebd24a..b4bb53fa71e1 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -776,6 +776,7 @@ def test_boolean_index_cast_assign(self): zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) + class TestFancyIndexingEquivalence: def test_object_assign(self): # Check that the field and object special case using copyto is active. @@ -846,10 +847,11 @@ class TestMultiIndexingAutomated: """ - def setup_method(self): - self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) - self.b = np.empty((3, 0, 5, 6)) - self.complex_indices = ['skip', Ellipsis, + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up # dimensions, also need to test all False @@ -869,11 +871,6 @@ def setup_method(self): np.array([2, -1], dtype=np.int8), np.zeros([1] * 31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype - # Some simpler indices that still cover a bit more - self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), - 'skip'] - # Very simple ones to fill the rest: - self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): """Mimic multi dimensional indexing. @@ -1206,16 +1203,23 @@ def test_boolean(self): # it is aligned to the left. This is probably correct for # consistency with arr[boolean_array,] also no broadcasting # is done at all + a = self._create_array() self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool),)) + a, (np.zeros_like(a, dtype=bool),)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + a, (np.zeros_like(a, dtype=bool)[..., 0],)) self._check_multi_index( - self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + a, (np.zeros_like(a, dtype=bool)[None, ...],)) def test_multidim(self): # Automatically test combinations with complex indexes on 2nd (or 1st) # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + with warnings.catch_warnings(): # This is so that np.array(True) is not accepted in a full integer # index, when running the file separately. @@ -1226,28 +1230,30 @@ def isskip(idx): return isinstance(idx, str) and idx == "skip" for simple_pos in [0, 2, 3]: - tocheck = [self.fill_indices, self.complex_indices, - self.fill_indices, self.fill_indices] - tocheck[simple_pos] = self.simple_indices + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices for index in product(*tocheck): index = tuple(i for i in index if not isskip(i)) - self._check_multi_index(self.a, index) - self._check_multi_index(self.b, index) + self._check_multi_index(a, index) + self._check_multi_index(b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0, 0, 0, 0)) - self._check_multi_index(self.b, (0, 0, 0, 0)) + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) - assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) - assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): a = np.arange(10) - for index in self.complex_indices: + complex_indices = self._create_complex_indices() + for index in complex_indices: self._check_single_index(a, index) + class TestFloatNonIntegerArgument: """ These test that ``TypeError`` is raised when you try to use diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c9a2ac06472c..c6ecd5327850 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -347,17 +347,16 @@ def test_assign(self): class TestMultipleFields: - def setup_method(self): - self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') - def _bad_call(self): - return self.ary['f0', 'f1'] + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] def test_no_tuple(self): assert_raises(IndexError, self._bad_call) def test_return(self): - res = self.ary[['f0', 'f2']].tolist() + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index b4b93aee4026..7ed6ea7687ff 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -359,26 +359,26 @@ def test_tofile_fromfile(self): class TestRecord: - def setup_method(self): - self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], + def _create_data(self): + return np.rec.fromrecords([(1, 2, 3), (4, 5, 6)], dtype=[("col1", " Date: Fri, 22 Aug 2025 17:23:32 +0000 Subject: [PATCH 0353/1718] MAINT: Bump github/codeql-action from 3.29.10 to 3.29.11 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.10 to 3.29.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/96f518a34f7a870018057716cc4d7a5c014bd61c...3c3833e0f8c1c83d449a7478aa59c036a9165498) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.11 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index dce656f39b37..68aead6651e6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3.29.10 + uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index cecb8d3eef4a..d61c6f1dd815 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v2.1.27 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v2.1.27 with: sarif_file: results.sarif From 0caeebfbb9b9195da81d8afa90f9881662c2dccf Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:10:49 -0700 Subject: [PATCH 0354/1718] DOC: Bold font and backquote don't mix together --- doc/source/building/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index d7baeaee9324..d027ecb0ee8f 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -473,7 +473,7 @@ interface is self-documenting, so please see ``spin --help`` and install"). Editable installs are supported. It is important to understand that **you - may use either an editable install or ``spin`` in a given repository clone, + may use either an editable install or** ``spin`` **in a given repository clone, but not both**. If you use editable installs, you have to use ``pytest`` and other development tools directly instead of using ``spin``. From 0eda83e198d61517b2e07df29d4e55e1c5048fb9 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:13:36 -0700 Subject: [PATCH 0355/1718] DOC: Correct typos in symbol formatting --- doc/source/user/numpy-for-matlab-users.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 9e8093b20f02..e05e123e224c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -674,10 +674,10 @@ are only a handful of key differences between the two. - Operators ``*`` and ``@``, functions ``dot()``, and ``multiply()``: - - For ``array``, **``*`` means element-wise multiplication**, while - **``@`` means matrix multiplication**; they have associated functions + - For ``array``, ``*`` **means element-wise multiplication**, while + ``@`` **means matrix multiplication**; they have associated functions ``multiply()`` and ``dot()``. - - For ``matrix``, **``*`` means matrix multiplication**, and for + - For ``matrix``, ``*`` **means matrix multiplication**, and for element-wise multiplication one has to use the ``multiply()`` function. - Handling of vectors (one-dimensional arrays) From 6f7297004e29ec119d2b642a76018e069bc115c4 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 22 Aug 2025 21:15:58 -0700 Subject: [PATCH 0356/1718] DOC: Correct a bold font typo [skip azp] [skip cirrus] [skip actions] --- doc/source/f2py/buildtools/distutils-to-meson.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 32f2b599f6e5..585bfba57246 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -10,7 +10,7 @@ collects common workflows in both formats. .. note:: - This is a ****living**** document, `pull requests `_ are very welcome! + This is a **living** document, `pull requests `_ are very welcome! 1.1 Baseline ~~~~~~~~~~~~ From ddb94ee4897166e244ef098aafcbade20858c9a2 Mon Sep 17 00:00:00 2001 From: Richard Smythe Date: Sat, 23 Aug 2025 09:22:02 +0100 Subject: [PATCH 0357/1718] MAINT: fix typo in cmds.py --- .spin/cmds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 66885de630e0..9490aca297d9 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -455,7 +455,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ] + bench_args _run_asv(cmd) else: - # Ensure that we don't have uncommited changes + # Ensure that we don't have uncommitted changes commit_a, commit_b = [_commit_to_sha(c) for c in commits] if commit_b == 'HEAD' and _dirty_git_working_dir(): From 7f9339b1bcb216b4797c98f802cc8ff05d5640e8 Mon Sep 17 00:00:00 2001 From: zuhu2195 <56065368+zuhu2195@users.noreply.github.com> Date: Sat, 23 Aug 2025 16:45:45 +0530 Subject: [PATCH 0358/1718] MAINT: Eliminate ambiguous order of evaluation in the ratios of random variates. (#29598) In C, in an expression such as f1()/f2(), the order of evaluation of f1() and f2() is unspecified. When such a ratio was used in the C functions random_f and random_standard_cauchy, it meant the streams of random variates for the f and standard_cauchy distributions depended on how the compiler chose to order the evaluation. This could result in different streams of variates when numpy is compiled with different compilers. By evaluating the numerator and denominator in separate statements, the ambiguity is eliminated. Co-authored-by: Joren Hammudoglu --- numpy/random/src/distributions/distributions.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 5ff694b2cde9..2b7c69481064 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -468,12 +468,15 @@ double random_chisquare(bitgen_t *bitgen_state, double df) { } double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) { - return ((random_chisquare(bitgen_state, dfnum) * dfden) / - (random_chisquare(bitgen_state, dfden) * dfnum)); + double subexpr1 = random_chisquare(bitgen_state, dfnum) * dfden; + double subexpr2 = random_chisquare(bitgen_state, dfden) * dfnum; + return subexpr1 / subexpr2; } double random_standard_cauchy(bitgen_t *bitgen_state) { - return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state); + double subexpr1 = random_standard_normal(bitgen_state); + double subexpr2 = random_standard_normal(bitgen_state); + return subexpr1 / subexpr2; } double random_pareto(bitgen_t *bitgen_state, double a) { From d5692ba8660ef664f51067b1f541ee7c14a58cff Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 02:43:56 +0900 Subject: [PATCH 0359/1718] enh: extend coverage --- benchmarks/benchmarks/bench_lib.py | 45 +++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 0e60468308bb..738be22c54d6 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -1,5 +1,6 @@ """Benchmarks for `numpy.lib`.""" +import string import numpy as np @@ -119,37 +120,61 @@ def time_nanpercentile(self, array_size, percent_nans): class Unique(Benchmark): """Benchmark for np.unique with np.nan values.""" - param_names = ["array_size", "percent_nans"] + param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5)], + [200, int(2e5), int(1e9)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], + # percent of unique values in arrays + [2., 50., 90.], + # dtypes of the arrays + [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] - def setup(self, array_size, percent_nans): - np.random.seed(123) + def setup(self, array_size, percent_nans, percent_unique_values, dtype): + rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) + match dtype: + case np.float64: + base_array = rng.uniform(size=array_size, dtype=dtype) + case np.complex128: + base_array = np.array( + [ + complex(*rng.uniform(size=2, dtype=np.float64)) + for _ in range(array_size) + ], + dtype=dtype, + ) + case np.dtypes.StringDType: + chars = string.ascii_letters + string.digits + base_array = np.array( + [ + ''.join(rng.choice(list(chars), size=10)) + for _ in range(array_size) + ], + dtype=dtype, + ) + n_nan = int(percent_nans * array_size) nan_indices = np.random.choice(np.arange(array_size), size=n_nan) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans): + def time_unique_values(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans): + def time_unique_counts(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, - return_inverse=False, return_counts=True) + return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans): + def time_unique_inverse(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans): + def time_unique_all(self, array_size, percent_nans, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From 723e2c6224ec4fd6705049b317047f516861cb40 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 13:37:06 +0900 Subject: [PATCH 0360/1718] enh: coverage --- benchmarks/benchmarks/bench_lib.py | 40 +++++++++++++++++------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 738be22c54d6..9d075b3fb017 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(1e9)], + [200, int(2e5), int(2e8)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [2., 50., 90.], + [0.002, 0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -136,45 +136,51 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): rng = np.random.default_rng(123) # produce a randomly shuffled array with the # approximate desired percentage np.nan content + unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) match dtype: case np.float64: - base_array = rng.uniform(size=array_size, dtype=dtype) + unique_array = rng.uniform(size=unique_values_size) case np.complex128: - base_array = np.array( + unique_array = np.array( [ - complex(*rng.uniform(size=2, dtype=np.float64)) - for _ in range(array_size) + complex(*rng.uniform(size=2)) + for _ in range(unique_values_size) ], dtype=dtype, ) - case np.dtypes.StringDType: + case np.dtypes.StringDType(): chars = string.ascii_letters + string.digits - base_array = np.array( + unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=10)) - for _ in range(array_size) + ''.join(rng.choice(list(chars), size=rng.integers(5, 20))) + for _ in range(unique_values_size) ], dtype=dtype, ) - - n_nan = int(percent_nans * array_size) - nan_indices = np.random.choice(np.arange(array_size), size=n_nan) + case _: + raise ValueError(f"Unsupported dtype {dtype}") + + base_array = np.resize(unique_array, array_size) + rng.shuffle(base_array) + # insert nans in random places + n_nan = int(percent_nans / 100. * array_size) + nan_indices = rng.choice(np.arange(array_size), size=n_nan, replace=False) base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans, dtype): + def time_unique_values(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans, dtype): + def time_unique_counts(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans, dtype): + def time_unique_inverse(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans, dtype): + def time_unique_all(self, array_size, percent_nans, percent_unique_values, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From 45c9779b0dc1f3304435454ec65e5508b8f6005d Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 13:42:24 +0900 Subject: [PATCH 0361/1718] fix: lint --- benchmarks/benchmarks/bench_lib.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 9d075b3fb017..7a9cdb1c1d2c 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -168,19 +168,23 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): base_array[nan_indices] = np.nan self.arr = base_array - def time_unique_values(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_values(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=False) - def time_unique_counts(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_counts(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=False, return_counts=True,) - def time_unique_inverse(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_inverse(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=False, return_inverse=True, return_counts=False) - def time_unique_all(self, array_size, percent_nans, percent_unique_values, dtype): + def time_unique_all(self, array_size, percent_nans, + percent_unique_values, dtype): np.unique(self.arr, return_index=True, return_inverse=True, return_counts=True) From eba1760b125e2607555c9aa4943046572bd3a071 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 14:48:20 +0900 Subject: [PATCH 0362/1718] fix: change array_size --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 7a9cdb1c1d2c..0b755f61adfd 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(2e8)], + [200, int(2e5), int(2e7)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays From 24459e7b4a9997e542c130b09b52720e4989ee68 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 15:46:01 +0900 Subject: [PATCH 0363/1718] fix: change length of string --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 0b755f61adfd..c2c4bc05b610 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -152,7 +152,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): chars = string.ascii_letters + string.digits unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=rng.integers(5, 20))) + ''.join(rng.choice(list(chars), size=rng.integers(5, 10))) for _ in range(unique_values_size) ], dtype=dtype, From 85a9c102894a800244ed912b7e52f48284c4ba73 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 25 Aug 2025 10:44:41 +0300 Subject: [PATCH 0364/1718] DOC: more accurately describe 'cache' [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 0379c3c60a1d..47136b93e86d 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2331,8 +2331,8 @@ class vectorize: passed directly to `pyfunc` unmodified. cache : bool, optional - If `True`, then cache the first function call that determines the number - of outputs if `otypes` and `signature` are not provided. + If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then + cache the number of outputs. signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for @@ -2356,12 +2356,12 @@ class vectorize: The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. - If `otypes` and `signature` are not specified, then a call to the function with the - first argument will be used to determine the number of outputs. The - results of this call will be cached if `cache` is `True` to prevent - calling the function twice. However, to implement the cache, the - original function must be wrapped which will slow down subsequent - calls, so only do this if your function is expensive. + If neither `otypes` nor `signature` are specified, then a call to the function with + the first argument will be used to determine the number of outputs. The results of + this call will be cached if `cache` is `True` to prevent calling the function + twice. However, to implement the cache, the original function must be wrapped + which will slow down subsequent calls, so only do this if your function is + expensive. The new keyword argument interface and `excluded` argument support further degrades performance. From c9d6050f5dd12d7bd61a3345334e080689fdd891 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 17:19:39 +0900 Subject: [PATCH 0365/1718] fix: change length of string --- benchmarks/benchmarks/bench_lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index c2c4bc05b610..af7d65da542c 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -127,7 +127,7 @@ class Unique(Benchmark): # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.002, 0.2, 20.], + [0.001, 0.1, 10.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -152,7 +152,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): chars = string.ascii_letters + string.digits unique_array = np.array( [ - ''.join(rng.choice(list(chars), size=rng.integers(5, 10))) + ''.join(rng.choice(list(chars), size=rng.integers(4, 8))) for _ in range(unique_values_size) ], dtype=dtype, From bbb244233a7d6f6cd8e4afc370d437cef67b6cfe Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 18:13:00 +0900 Subject: [PATCH 0366/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index af7d65da542c..f7c21d6b1b3e 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(2e5), int(2e7)], + [500, int(5e4), int(5e6)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.001, 0.1, 10.], + [0.02, 0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] @@ -139,7 +139,7 @@ def setup(self, array_size, percent_nans, percent_unique_values, dtype): unique_values_size = max(int(percent_unique_values / 100. * array_size), 2) match dtype: case np.float64: - unique_array = rng.uniform(size=unique_values_size) + unique_array = rng.uniform(size=unique_values_size).astype(dtype) case np.complex128: unique_array = np.array( [ From 3036cc749c74973987880353e4e9b35de9598f33 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 18:57:28 +0900 Subject: [PATCH 0367/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index f7c21d6b1b3e..11076b3de134 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [500, int(5e4), int(5e6)], + [200, int(5e4), int(1e7)], # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays From 4754c87b9b2ae004797ace59c97c70bd1c73d5cc Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Mon, 25 Aug 2025 20:16:47 +0900 Subject: [PATCH 0368/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 11076b3de134..e816017496a4 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -127,7 +127,7 @@ class Unique(Benchmark): # percent of np.nan in arrays [0, 0.1, 2., 50., 90.], # percent of unique values in arrays - [0.02, 0.2, 20.], + [0.02, 0.5, 10.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] From 47430c795c406154eda575bcad706875b595acff Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 25 Aug 2025 17:05:19 -0400 Subject: [PATCH 0369/1718] TST: Replace xunit setup with methods (#29616) * TST: Replace xunit setup with methods * STY: Method rename, code cleanup --- numpy/_core/tests/test_numeric.py | 379 ++++++++++++++-------------- numpy/_core/tests/test_overrides.py | 59 +++-- 2 files changed, 218 insertions(+), 220 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 560d7cf71543..db5269c507d5 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -747,27 +747,29 @@ def test_bitwise_xor(self): class TestBoolArray: - def setup_method(self): + def _create_bool_arrays(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=bool)[1::] - self.f = np.array([False] * 41, dtype=bool)[1::] - self.o = np.array([False] * 42, dtype=bool)[2::] - self.nm = self.f.copy() - self.im = self.t.copy() - self.nm[3] = True - self.nm[-2] = True - self.im[3] = False - self.im[-2] = False + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im def test_all_any(self): - assert_(self.t.all()) - assert_(self.t.any()) - assert_(not self.f.all()) - assert_(not self.f.any()) - assert_(self.nm.any()) - assert_(self.im.any()) - assert_(not self.nm.all()) - assert_(not self.im.all()) + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) # check bad element in all positions for i in range(256 - 7): d = np.array([False] * 256, dtype=bool)[7::] @@ -787,118 +789,103 @@ def test_all_any(self): assert_(not np.all(e), msg=f"{i!r}") def test_logical_not_abs(self): - assert_array_equal(~self.t, self.f) - assert_array_equal(np.abs(~self.t), self.f) - assert_array_equal(np.abs(~self.f), self.t) - assert_array_equal(np.abs(self.f), self.f) - assert_array_equal(~np.abs(self.f), self.t) - assert_array_equal(~np.abs(self.t), self.f) - assert_array_equal(np.abs(~self.nm), self.im) - np.logical_not(self.t, out=self.o) - assert_array_equal(self.o, self.f) - np.abs(self.t, out=self.o) - assert_array_equal(self.o, self.t) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) def test_logical_and_or_xor(self): - assert_array_equal(self.t | self.t, self.t) - assert_array_equal(self.f | self.f, self.f) - assert_array_equal(self.t | self.f, self.t) - assert_array_equal(self.f | self.t, self.t) - np.logical_or(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t & self.t, self.t) - assert_array_equal(self.f & self.f, self.f) - assert_array_equal(self.t & self.f, self.f) - assert_array_equal(self.f & self.t, self.f) - np.logical_and(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.t) - assert_array_equal(self.t ^ self.t, self.f) - assert_array_equal(self.f ^ self.f, self.f) - assert_array_equal(self.t ^ self.f, self.t) - assert_array_equal(self.f ^ self.t, self.t) - np.logical_xor(self.t, self.t, out=self.o) - assert_array_equal(self.o, self.f) - - assert_array_equal(self.nm & self.t, self.nm) - assert_array_equal(self.im & self.f, False) - assert_array_equal(self.nm & True, self.nm) - assert_array_equal(self.im & False, self.f) - assert_array_equal(self.nm | self.t, self.t) - assert_array_equal(self.im | self.f, self.im) - assert_array_equal(self.nm | True, self.t) - assert_array_equal(self.im | False, self.im) - assert_array_equal(self.nm ^ self.t, self.im) - assert_array_equal(self.im ^ self.f, self.im) - assert_array_equal(self.nm ^ True, self.im) - assert_array_equal(self.im ^ False, self.im) + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) class TestBoolCmp: - def setup_method(self): - self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=bool) - self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=bool) + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 - for i in range(32): - self.f[s:s + 8] = [i & 2**x for x in range(8)] - self.ef[s:s + 8] = [(i & 2**x) != 0 for x in range(8)] - s += 8 - s = 0 - for i in range(16): - self.d[s:s + 4] = [i & 2**x for x in range(4)] - self.ed[s:s + 4] = [(i & 2**x) != 0 for x in range(4)] - s += 4 - - self.nf = self.f.copy() - self.nd = self.d.copy() - self.nf[self.ef] = np.nan - self.nd[self.ed] = np.nan - - self.inff = self.f.copy() - self.infd = self.d.copy() - self.inff[::3][self.ef[::3]] = np.inf - self.infd[::3][self.ed[::3]] = np.inf - self.inff[1::3][self.ef[1::3]] = -np.inf - self.infd[1::3][self.ed[1::3]] = -np.inf - self.inff[2::3][self.ef[2::3]] = np.nan - self.infd[2::3][self.ed[2::3]] = np.nan - self.efnonan = self.ef.copy() - self.efnonan[2::3] = False - self.ednonan = self.ed.copy() - self.ednonan[2::3] = False - - self.signf = self.f.copy() - self.signd = self.d.copy() - self.signf[self.ef] *= -1. - self.signd[self.ed] *= -1. - self.signf[1::6][self.ef[1::6]] = -np.inf - self.signd[1::6][self.ed[1::6]] = -np.inf + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.machine() != 'riscv64': - self.signf[3::6][self.ef[3::6]] = -np.nan - self.signd[3::6][self.ed[3::6]] = -np.nan - self.signf[4::6][self.ef[4::6]] = -0. - self.signd[4::6][self.ed[4::6]] = -0. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign def test_float(self): # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) for i in range(4): - assert_array_equal(self.f[i:] > 0, self.ef[i:]) - assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) - assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) - assert_array_equal(-self.f[i:] < 0, self.ef[i:]) - assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) - r = self.f[i:] != 0 - assert_array_equal(r, self.ef[i:]) - r2 = self.f[i:] != np.zeros_like(self.f[i:]) - r3 = 0 != self.f[i:] + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -907,24 +894,25 @@ def test_float(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) - assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) - assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) - assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) - assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) def test_double(self): # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) for i in range(2): - assert_array_equal(self.d[i:] > 0, self.ed[i:]) - assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) - assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) - assert_array_equal(-self.d[i:] < 0, self.ed[i:]) - assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) - r = self.d[i:] != 0 - assert_array_equal(r, self.ed[i:]) - r2 = self.d[i:] != np.zeros_like(self.d[i:]) - r3 = 0 != self.d[i:] + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 @@ -933,11 +921,11 @@ def test_double(self): assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path - assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) - assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) - assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) - assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) - assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) class TestSeterr: @@ -1646,6 +1634,7 @@ def test_failed_itemsetting(self): with pytest.raises(ValueError): np.fromiter(iterable, dtype=np.dtype((int, 2))) + class TestNonzero: def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) @@ -2315,9 +2304,8 @@ def assert_array_strict_equal(x, y): class TestClip: - def setup_method(self): - self.nr = 5 - self.nc = 3 + nr = 5 + nc = 3 def fastclip(self, a, m, M, out=None, **kwargs): return a.clip(m, M, out=out, **kwargs) @@ -2950,6 +2938,7 @@ def test_out_of_bound_pyints(self, dtype, min, max): if max is not None: assert (c <= max).all() + class TestAllclose: rtol = 1e-5 atol = 1e-8 @@ -3243,45 +3232,50 @@ def test_tol_warnings(self): class TestStdVar: - def setup_method(self): - self.A = np.array([1, -1, 1, -1]) - self.real_var = 1 + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var def test_basic(self): - assert_almost_equal(np.var(self.A), self.real_var) - assert_almost_equal(np.std(self.A)**2, self.real_var) + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) def test_scalars(self): assert_equal(np.var(1), 0) assert_equal(np.std(1), 0) def test_ddof1(self): - assert_almost_equal(np.var(self.A, ddof=1), - self.real_var * len(self.A) / (len(self.A) - 1)) - assert_almost_equal(np.std(self.A, ddof=1)**2, - self.real_var * len(self.A) / (len(self.A) - 1)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) def test_ddof2(self): - assert_almost_equal(np.var(self.A, ddof=2), - self.real_var * len(self.A) / (len(self.A) - 2)) - assert_almost_equal(np.std(self.A, ddof=2)**2, - self.real_var * len(self.A) / (len(self.A) - 2)) + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) def test_correction(self): + A, _ = self._create_data() assert_almost_equal( - np.var(self.A, correction=1), np.var(self.A, ddof=1) + np.var(A, correction=1), np.var(A, ddof=1) ) assert_almost_equal( - np.std(self.A, correction=1), np.std(self.A, ddof=1) + np.std(A, correction=1), np.std(A, ddof=1) ) err_msg = "ddof and correction can't be provided simultaneously." with assert_raises_regex(ValueError, err_msg): - np.var(self.A, ddof=1, correction=0) + np.var(A, ddof=1, correction=0) with assert_raises_regex(ValueError, err_msg): - np.std(self.A, ddof=1, correction=1) + np.std(A, ddof=1, correction=1) def test_out_scalar(self): d = np.arange(10) @@ -3310,26 +3304,22 @@ def test_scalars(self): class TestCreationFuncs: - # Test ones, zeros, empty and full. - - def setup_method(self): - dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} - # void, bytes, str - variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} keyfunc = lambda dtype: dtype.str - self.dtypes = sorted(dtypes - variable_sized | + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | {np.dtype(tp.str.replace("0", str(i))) for tp in variable_sized for i in range(1, 10)}, key=keyfunc) - self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] - self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} - self.ndims = 10 + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 - def check_function(self, func, fill_value=None): par = ((0, 1, 2), - range(self.ndims), - self.orders, - self.dtypes) + range(ndims), + orders, + dtypes) fill_kwarg = {} if fill_value is not None: fill_kwarg = {'fill_value': fill_value} @@ -3355,7 +3345,7 @@ def check_function(self, func, fill_value=None): assert_equal(arr.dtype, np.dtype(dtype_str)) else: assert_equal(arr.dtype, np.dtype(dtype.type)) - assert_(getattr(arr.flags, self.orders[order])) + assert_(getattr(arr.flags, orders[order])) if fill_value is not None: if arr.dtype.str.startswith('|S'): @@ -3395,32 +3385,6 @@ def test_for_reference_leak(self): class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' - def setup_method(self): - self.data = [ - # Array scalars - (np.array(3.), None), - (np.array(3), 'f8'), - # 1D arrays - (np.arange(6, dtype='f4'), None), - (np.arange(6), 'c16'), - # 2D C-layout arrays - (np.arange(6).reshape(2, 3), None), - (np.arange(6).reshape(3, 2), 'i1'), - # 2D F-layout arrays - (np.arange(6).reshape((2, 3), order='F'), None), - (np.arange(6).reshape((3, 2), order='F'), 'i1'), - # 3D C-layout arrays - (np.arange(24).reshape(2, 3, 4), None), - (np.arange(24).reshape(4, 3, 2), 'f4'), - # 3D F-layout arrays - (np.arange(24).reshape((2, 3, 4), order='F'), None), - (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), - # 3D non-C/F-layout arrays - (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), - (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), - ] - self.shapes = [(), (5,), (5, 6,), (5, 6, 7,)] - def compare_array_value(self, dz, value, fill_value): if value is not None: if fill_value: @@ -3433,11 +3397,36 @@ def compare_array_value(self, dz, value, fill_value): assert_(np.all(dz == value)) def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + if fill_value: fill_kwarg = {'fill_value': value} else: fill_kwarg = {} - for d, dtype in self.data: + for d, dtype in data: # default (K) order, dtype dz = like_function(d, dtype=dtype, **fill_kwarg) assert_equal(dz.shape, d.shape) @@ -3485,7 +3474,7 @@ def check_like_function(self, like_function, value, fill_value=False): self.compare_array_value(dz, value, fill_value) # Test the 'shape' parameter - for s in self.shapes: + for s in shapes: for o in 'CFA': sz = like_function(d, dtype=dtype, shape=s, order=o, **fill_kwarg) diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index b0d73375ed10..ebcf2f0ce112 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -550,7 +550,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: - def setup_method(self): + def _create_MyArray(self): class MyArray: def __init__(self, function=None): self.function = function @@ -563,20 +563,22 @@ def __array_function__(self, func, types, args, kwargs): return NotImplemented return my_func(*args, **kwargs) - self.MyArray = MyArray + return MyArray + def _create_MyNoArrayFunctionArray(self): class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function - self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + return MyNoArrayFunctionArray + def _create_MySubclass(self): class MySubclass(np.ndarray): def __array_function__(self, func, types, args, kwargs): result = super().__array_function__(func, types, args, kwargs) return result.view(self.__class__) - self.MySubclass = MySubclass + return MySubclass def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): @@ -593,9 +595,10 @@ def func_args(*args, **kwargs): return args, kwargs def test_array_like_not_implemented(self): - self.add_method('array', self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises_regex(TypeError, 'no implementation found'): array_like = np.asarray(1, like=ref) @@ -646,15 +649,16 @@ def test_nep35_functions_as_array_functions(self,): @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) - my_func = getattr(self.MyArray, function) + my_func = getattr(MyArray, function) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) @@ -672,19 +676,20 @@ def test_array_like(self, function, args, kwargs, numpy_ref): assert_equal(array_like, np_arr) else: - assert type(array_like) is self.MyArray + assert type(array_like) is MyArray assert array_like.function is my_func @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) def test_no_array_function_like(self, function, args, kwargs, ref): - self.add_method('array', self.MyNoArrayFunctionArray) - self.add_method(function, self.MyNoArrayFunctionArray) + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) np_func = getattr(np, function) # Instantiate ref if it's the MyNoArrayFunctionArray class if ref == "MyNoArrayFunctionArray": - ref = self.MyNoArrayFunctionArray.array() + ref = MyNoArrayFunctionArray.array() like_args = tuple(a() if callable(a) else a for a in args) @@ -694,11 +699,12 @@ def test_no_array_function_like(self, function, args, kwargs, ref): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_subclass(self, function, args, kwargs): - ref = np.array(1).view(self.MySubclass) + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) array_like = np_func(*like_args, **kwargs, like=ref) - assert type(array_like) is self.MySubclass + assert type(array_like) is MySubclass if np_func is np.empty: return np_args = tuple(a() if callable(a) else a for a in args) @@ -707,13 +713,14 @@ def test_subclass(self, function, args, kwargs): @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): - self.add_method('array', self.MyArray) - self.add_method("fromfile", self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) if numpy_ref is True: ref = np.array(1) else: - ref = self.MyArray.array() + ref = MyArray.array() data = np.random.random(5) @@ -728,13 +735,14 @@ def test_array_like_fromfile(self, numpy_ref): assert_equal(np_res, data) assert_equal(array_like, np_res) else: - assert type(array_like) is self.MyArray - assert array_like.function is self.MyArray.fromfile + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile def test_exception_handling(self): - self.add_method('array', self.MyArray, enable_value_error=True) + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) - ref = self.MyArray.array() + ref = MyArray.array() with assert_raises(TypeError): # Raises the error about `value_error` being invalid first @@ -742,8 +750,9 @@ def test_exception_handling(self): @pytest.mark.parametrize('function, args, kwargs', _array_tests) def test_like_as_none(self, function, args, kwargs): - self.add_method('array', self.MyArray) - self.add_method(function, self.MyArray) + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) np_func = getattr(np, function) like_args = tuple(a() if callable(a) else a for a in args) From 3cbaf3f37ec08843c43e5170a80aca6e103bf32d Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Tue, 26 Aug 2025 23:40:00 +0900 Subject: [PATCH 0370/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index e816017496a4..e2c9c49944d0 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -125,7 +125,7 @@ class Unique(Benchmark): # sizes of the 1D arrays [200, int(5e4), int(1e7)], # percent of np.nan in arrays - [0, 0.1, 2., 50., 90.], + [0, 10., 90.], # percent of unique values in arrays [0.02, 0.5, 10.], # dtypes of the arrays From 9566c7787c923e64e398ba515e375974d7546dc3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 01:59:17 +0200 Subject: [PATCH 0371/1718] TYP: replace scalar type ``__init__`` with ``__new__`` ported from numpy/numtype#692 --- numpy/__init__.pyi | 121 +++++++++-------------- numpy/typing/tests/data/fail/scalars.pyi | 12 +-- 2 files changed, 55 insertions(+), 78 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0982a502f018..00c1a9494931 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3582,7 +3582,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __new__(self) -> None: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @@ -3853,7 +3853,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): @abstractmethod - def __init__(self, value: _NumberItemT_co, /) -> None: ... + def __new__(cls) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... @@ -3889,13 +3889,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def imag(self) -> np.bool[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __init__(self: np.bool[builtins.bool], value: Never, /) -> None: ... + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... @overload - def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... @overload - def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... @overload - def __init__(self: np.bool[builtins.bool], value: object, /) -> None: ... + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload @@ -4004,7 +4004,7 @@ class object_(_RealMixin, generic): def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] - def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... @@ -4014,7 +4014,7 @@ class object_(_RealMixin, generic): class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): @abstractmethod - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls) -> Self: ... # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes def bit_count(self, /) -> int: ... @@ -4038,7 +4038,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __rxor__(self, other: _IntLike_co, /) -> integer: ... class signedinteger(integer[_NBit1]): - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... __add__: _SignedIntOp[_NBit1] __radd__: _SignedIntOp[_NBit1] @@ -4079,9 +4079,9 @@ long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + # NOTE: `uint64 + signedinteger -> float64` __add__: _UnsignedIntOp[_NBit1] __radd__: _UnsignedIntOp[_NBit1] __sub__: _UnsignedIntOp[_NBit1] @@ -4122,10 +4122,10 @@ ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): @abstractmethod - def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... + def __new__(cls) -> Self: ... class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): - def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] @@ -4153,9 +4153,6 @@ float32: TypeAlias = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] - def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... - - # @property def itemsize(self) -> L[8]: ... @property @@ -4294,19 +4291,19 @@ longdouble: TypeAlias = floating[_NBitLongDouble] class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): @overload - def __init__( - self, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, /, - ) -> None: ... + ) -> Self: ... @overload - def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] + def real(self) -> floating[_NBit1]: ... @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def imag(self) -> floating[_NBit2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... @@ -4385,18 +4382,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): complex64: TypeAlias = complexfloating[_32Bit] -class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] - @overload - def __new__( - cls, - real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., - imag: complex | SupportsFloat | SupportsIndex = ..., - /, - ) -> Self: ... - @overload - def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... - - # +class complex128(complexfloating[_64Bit, _64Bit], complex): @property def itemsize(self) -> L[16]: ... @property @@ -4455,26 +4441,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... @overload - def __init__(self: timedelta64[L[0]], /) -> None: ... + def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... @overload - def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload - def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload - def __init__( - self: timedelta64[dt.timedelta], + def __new__( + cls, value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, - ) -> None: ... + ) -> timedelta64[dt.timedelta]: ... @overload - def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4639,25 +4625,25 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): def nbytes(self) -> L[8]: ... @overload - def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... @overload - def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... @overload - def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... @overload - def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __init__( - self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / - ) -> None: ... + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... @overload - def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @@ -4732,13 +4718,15 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): + @abstractmethod + def __new__(cls) -> Self: ... class void(flexible[bytes | tuple[Any, ...]]): @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... + def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + def __new__(cls, value: Any, /, dtype: _DTypeLikeVoid) -> Self: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @@ -4750,7 +4738,7 @@ class void(flexible[bytes | tuple[Any, ...]]): class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): @abstractmethod - def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... + def __new__(cls) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart @@ -4760,12 +4748,6 @@ class bytes_(character[bytes], bytes): @overload def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... - # - @overload - def __init__(self, o: object = ..., /) -> None: ... - @overload - def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... - # def __bytes__(self, /) -> bytes: ... @@ -4775,11 +4757,6 @@ class str_(character[str], str): @overload def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... - # - @overload - def __init__(self, value: object = ..., /) -> None: ... - @overload - def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 018a88e652ae..02043e06e8fe 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -46,12 +46,12 @@ np.uint64(A()) # type: ignore[arg-type] np.void("test") # type: ignore[call-overload] np.void("test", dtype=None) # type: ignore[call-overload] -np.generic(1) # type: ignore[abstract] -np.number(1) # type: ignore[abstract] -np.integer(1) # type: ignore[abstract] -np.inexact(1) # type: ignore[abstract] -np.character("test") # type: ignore[abstract] -np.flexible(b"test") # type: ignore[abstract] +np.generic(1) # type: ignore[abstract, call-arg] +np.number(1) # type: ignore[abstract, call-arg] +np.integer(1) # type: ignore[abstract, call-arg] +np.inexact(1) # type: ignore[abstract, call-arg] +np.character("test") # type: ignore[abstract, call-arg] +np.flexible(b"test") # type: ignore[abstract, call-arg] np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] From 2f35e84809586e233d1f8fd86c143e1c88dd4850 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:00:55 +0200 Subject: [PATCH 0372/1718] TYP: appease ruff --- numpy/__init__.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 00c1a9494931..8dc1daf7f797 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4757,7 +4757,6 @@ class str_(character[str], str): @overload def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... - # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: From 9c29c3226eaa3c4879bf73ca4d9267285d7c2589 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:10:59 +0200 Subject: [PATCH 0373/1718] TYP: fix slightly incorrect ``memoryview`` type argument in ``ScalarType`` Ported from numpy/numtype#693 --- numpy/_core/numerictypes.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 26ed4b9d9524..0a6ac988163d 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,5 +1,5 @@ from builtins import bool as py_bool -from typing import Final, Literal as L, TypedDict, type_check_only +from typing import Any, Final, Literal as L, TypedDict, type_check_only import numpy as np from numpy import ( @@ -166,7 +166,7 @@ ScalarType: Final[ type[py_bool], type[bytes], type[str], - type[memoryview], + type[memoryview[Any]], type[np.bool], type[csingle], type[cdouble], From 6089ecbec5b25f1c59993755813d59d40e26af5f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 02:11:46 +0200 Subject: [PATCH 0374/1718] TYP: remove trivial ``ScalarType`` test case --- .../typing/tests/data/reveal/numerictypes.pyi | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 784167467532..aa5cf10410d4 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -2,42 +2,6 @@ from typing import Literal, assert_type import numpy as np -assert_type( - np.ScalarType, - tuple[ - type[int], - type[float], - type[complex], - type[bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[np.csingle], - type[np.cdouble], - type[np.clongdouble], - type[np.half], - type[np.single], - type[np.double], - type[np.longdouble], - type[np.byte], - type[np.short], - type[np.intc], - type[np.long], - type[np.longlong], - type[np.timedelta64], - type[np.datetime64], - type[np.object_], - type[np.bytes_], - type[np.str_], - type[np.ubyte], - type[np.ushort], - type[np.uintc], - type[np.ulong], - type[np.ulonglong], - type[np.void], - ], -) assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) From ef9edae407263110258d974202acf35c4362b91c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 03:04:39 +0200 Subject: [PATCH 0375/1718] TYP: Make ``datetime64`` a generic type at runtime --- numpy/_core/src/multiarray/scalartypes.c.src | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index f38a16300cfd..f84c120ad409 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2853,11 +2853,15 @@ static PyMethodDef numbertype_methods[] = { {NULL, NULL, 0, NULL} /* sentinel */ }; -static PyMethodDef booleantype_methods[] = { - /* for typing */ - {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ +/**begin repeat + * #name = boolean,datetime# + */ +static PyMethodDef @name@type_methods[] = { + /* for typing */ + {"__class_getitem__", Py_GenericAlias, METH_CLASS | METH_O, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; +/**end repeat**/ /**begin repeat * #name = cfloat,clongdouble# @@ -4564,6 +4568,7 @@ initialize_numeric_types(void) /**end repeat**/ + PyDatetimeArrType_Type.tp_methods = datetimetype_methods; /**begin repeat * #Type = Byte, UByte, Short, UShort, Int, UInt, Long, From 99f26d7a5824a57cf015dc90dcfd1a3474a5a103 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 03:14:03 +0200 Subject: [PATCH 0376/1718] TST: expect the ``datetime64`` type to be subscriptable --- numpy/_core/tests/test_scalar_methods.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 26dad71794e3..6fd4846006d0 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -171,8 +171,8 @@ def test_abc_non_numeric(self, cls: type[np.generic]) -> None: @pytest.mark.parametrize("code", np.typecodes["All"]) def test_concrete(self, code: str) -> None: cls = np.dtype(code).type - if cls == np.bool: - # np.bool allows subscript + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable assert cls[Any] else: with pytest.raises(TypeError): From aa5360d109af681cdc7c82627bf7fe7cc4889a76 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Wed, 27 Aug 2025 22:47:48 +0900 Subject: [PATCH 0377/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index e2c9c49944d0..37f599f075b3 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,11 +123,11 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [200, int(5e4), int(1e7)], + [int(2e3), int(2e6)], # percent of np.nan in arrays - [0, 10., 90.], + [10., 90.], # percent of unique values in arrays - [0.02, 0.5, 10.], + [0.2, 20.], # dtypes of the arrays [np.float64, np.complex128, np.dtypes.StringDType(na_object=np.nan)], ] From ad6302aef8d280e801628b72f8f6dd0e8105ffb2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 27 Aug 2025 17:46:23 +0200 Subject: [PATCH 0378/1718] TYP: add missing ``_NoValue`` annotations in ``_core.fromnumeric`` --- numpy/_core/fromnumeric.pyi | 348 ++++++++++++++++++------------------ 1 file changed, 177 insertions(+), 171 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0c96ac8c0aa4..05816088f041 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -8,7 +8,9 @@ from typing import ( Protocol, SupportsIndex, TypeAlias, + TypedDict, TypeVar, + Unpack, overload, type_check_only, ) @@ -119,12 +121,21 @@ class _SupportsShape(Protocol[_ShapeT_co]): @property def shape(self, /) -> _ShapeT_co: ... +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind + # a "sequence" that isn't a string, bytes, bytearray, or memoryview _T = TypeVar("_T") _PyArray: TypeAlias = list[_T] | tuple[_T, ...] # `int` also covers `bool` _PyScalar: TypeAlias = complex | bytes | str +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def take( a: _ArrayLike[_ScalarT], @@ -405,7 +416,7 @@ def argmax( axis: None = None, out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmax( @@ -413,7 +424,7 @@ def argmax( axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmax( @@ -421,7 +432,7 @@ def argmax( axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload def argmax( @@ -429,7 +440,7 @@ def argmax( axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload @@ -438,7 +449,7 @@ def argmin( axis: None = None, out: None = None, *, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... @overload def argmin( @@ -446,7 +457,7 @@ def argmin( axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def argmin( @@ -454,7 +465,7 @@ def argmin( axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... @overload def argmin( @@ -462,9 +473,10 @@ def argmin( axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _BoolOrIntArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def searchsorted( a: ArrayLike, @@ -480,7 +492,7 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... @overload @@ -494,6 +506,7 @@ def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dty @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def squeeze( a: _ScalarT, @@ -627,69 +640,54 @@ def compress( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def clip( a: _ScalarT, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> _ScalarT: ... @overload def clip( a: _ScalarLike_co, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload def clip( a: _ArrayLike[_ScalarT], - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[_ScalarT]: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., out: None = None, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: None = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload def clip( @@ -698,30 +696,34 @@ def clip( a_max: ArrayLike | None, out: _ArrayT, *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike = ..., - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> _ArrayT: ... @overload def clip( a: ArrayLike, - a_min: ArrayLike | None, - a_max: ArrayLike | None, - out: ArrayLike = None, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., *, - min: ArrayLike | None = ..., - max: ArrayLike | None = ..., - dtype: DTypeLike, - where: _ArrayLikeBool_co | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - signature: str | tuple[str | None, ...] = ..., - casting: _CastingKind = ..., + out: _ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload @@ -730,9 +732,9 @@ def sum( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -740,9 +742,9 @@ def sum( axis: None = None, dtype: None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -750,9 +752,9 @@ def sum( axis: None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -761,9 +763,9 @@ def sum( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def sum( @@ -771,9 +773,9 @@ def sum( axis: _ShapeLike | None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -782,9 +784,9 @@ def sum( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT | NDArray[_ScalarT]: ... @overload def sum( @@ -792,9 +794,9 @@ def sum( axis: _ShapeLike | None = None, dtype: DTypeLike = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( @@ -802,9 +804,9 @@ def sum( axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( @@ -813,9 +815,9 @@ def sum( dtype: DTypeLike = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... # keep in sync with `any` @@ -1003,21 +1005,21 @@ def ptp( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., ) -> _ScalarT: ... @overload def ptp( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload def ptp( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload def ptp( @@ -1025,7 +1027,7 @@ def ptp( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1033,27 +1035,27 @@ def amax( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def amax( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amax( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amax( @@ -1061,9 +1063,9 @@ def amax( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1071,27 +1073,27 @@ def amin( a: _ArrayLike[_ScalarT], axis: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def amin( a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def amin( a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def amin( @@ -1099,27 +1101,27 @@ def amin( axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. # The only requirement is that it is compatible # with the `.__mul__()` method(s) of the passed array's elements. - # Note that the same situation holds for all wrappers around # `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def prod( a: _ArrayLikeBool_co, axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int_: ... @overload def prod( @@ -1127,9 +1129,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> uint64: ... @overload def prod( @@ -1137,9 +1139,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> int64: ... @overload def prod( @@ -1147,9 +1149,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> floating: ... @overload def prod( @@ -1157,9 +1159,9 @@ def prod( axis: None = None, dtype: None = None, out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> complexfloating: ... @overload def prod( @@ -1167,9 +1169,9 @@ def prod( axis: _ShapeLike | None = None, dtype: None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1177,9 +1179,9 @@ def prod( axis: None, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def prod( @@ -1188,9 +1190,9 @@ def prod( *, dtype: _DTypeLike[_ScalarT], out: None = None, - keepdims: Literal[False] = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ScalarT: ... @overload def prod( @@ -1198,9 +1200,9 @@ def prod( axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1208,9 +1210,9 @@ def prod( axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( @@ -1219,11 +1221,12 @@ def prod( dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumprod( a: _ArrayLikeBool_co, @@ -1304,6 +1307,7 @@ def cumprod( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def cumulative_prod( x: _ArrayLikeBool_co, @@ -1399,6 +1403,7 @@ def ndim(a: ArrayLike) -> int: ... def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def around( a: _BoolLike_co, @@ -1449,6 +1454,7 @@ def around( out: _ArrayT, ) -> _ArrayT: ... +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload def mean( a: _ArrayLikeFloat_co, @@ -1567,7 +1573,7 @@ def std( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1580,7 +1586,7 @@ def std( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1593,7 +1599,7 @@ def std( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1607,7 +1613,7 @@ def std( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1619,7 +1625,7 @@ def std( dtype: DTypeLike = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1632,7 +1638,7 @@ def std( dtype: DTypeLike, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1646,7 +1652,7 @@ def std( *, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1659,7 +1665,7 @@ def var( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., @@ -1672,7 +1678,7 @@ def var( dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1685,7 +1691,7 @@ def var( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1699,7 +1705,7 @@ def var( dtype: _DTypeLike[_ScalarT], out: None = None, ddof: float = 0, - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -1711,7 +1717,7 @@ def var( dtype: DTypeLike = None, out: None = None, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1724,7 +1730,7 @@ def var( dtype: DTypeLike, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., @@ -1738,7 +1744,7 @@ def var( *, out: _ArrayT, ddof: float = 0, - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., From bde097d548a1dddcd21084400a8fdddd6c0683d2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 27 Aug 2025 14:17:41 -0400 Subject: [PATCH 0379/1718] TST: Replace xunit setup with methods (#29628) * TST: Replace xunit setup with methods * TST: Remove markers, err_status fixture to autouse * TST: err_status fixture scope, moved vars --- numpy/lib/tests/test_recfunctions.py | 84 ++++------ numpy/ma/tests/test_core.py | 235 ++++++++++++++------------- 2 files changed, 159 insertions(+), 160 deletions(-) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 72377b8f7c35..3991f92b16a3 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,4 +1,3 @@ - import numpy as np import numpy.ma as ma from numpy.lib.recfunctions import ( @@ -31,19 +30,14 @@ class TestRecFunctions: # Misc tests - - def setup_method(self): + def test_zip_descr(self): + # Test zip_descr x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) - - def test_zip_descr(self): - # Test zip_descr - (w, x, y, z) = self.data # Std array test = zip_descr((x, x), flatten=True) @@ -448,7 +442,7 @@ def test_masked_flexible(self): class TestMergeArrays: # Test merge_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -456,11 +450,11 @@ def setup_method(self): w = np.array( [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test merge_arrays on a single array. - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays(x) control = np.array([(1,), (2,)], dtype=[('f0', int)]) @@ -475,7 +469,7 @@ def test_solo(self): def test_solo_w_flatten(self): # Test merge_arrays on a single array w & w/o flattening - w = self.data[0] + w = self._create_arrays()[0] test = merge_arrays(w, flatten=False) assert_equal(test, w) @@ -487,7 +481,7 @@ def test_solo_w_flatten(self): def test_standard(self): # Test standard & standard # Test merge arrays - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = merge_arrays((x, y), usemask=False) control = np.array([(1, 10), (2, 20), (-1, 30)], dtype=[('f0', int), ('f1', int)]) @@ -502,7 +496,7 @@ def test_standard(self): def test_flatten(self): # Test standard & flexible - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = merge_arrays((x, z), flatten=True) control = np.array([(1, 'A', 1.), (2, 'B', 2.)], dtype=[('f0', int), ('A', '|S3'), ('B', float)]) @@ -516,7 +510,7 @@ def test_flatten(self): def test_flatten_wflexible(self): # Test flatten standard & nested - (w, x, _, _) = self.data + w, x, _, _ = self._create_arrays() test = merge_arrays((x, w), flatten=True) control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], dtype=[('f0', int), @@ -532,7 +526,7 @@ def test_flatten_wflexible(self): def test_wmasked_arrays(self): # Test merge_arrays masked arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] mx = ma.array([1, 2, 3], mask=[1, 0, 0]) test = merge_arrays((x, mx), usemask=True) control = ma.array([(1, 1), (2, 2), (-1, 3)], @@ -554,7 +548,7 @@ def test_w_singlefield(self): def test_w_shorter_flex(self): # Test merge_arrays w/ a shorter flexndarray. - z = self.data[-1] + z = self._create_arrays()[-1] # Fixme, this test looks incomplete and broken #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) @@ -567,7 +561,7 @@ def test_w_shorter_flex(self): dtype=[('A', '|S3'), ('B', float), ('C', int)]) def test_singlerecord(self): - (_, x, y, z) = self.data + _, x, y, z = self._create_arrays() test = merge_arrays((x[0], y[0], z[0]), usemask=False) control = np.array([(1, 10, ('A', 1))], dtype=[('f0', int), @@ -579,18 +573,18 @@ def test_singlerecord(self): class TestAppendFields: # Test append_fields - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_append_single(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, 'A', data=[10, 20, 30]) control = ma.array([(1, 10), (2, 20), (-1, 30)], mask=[(0, 0), (0, 0), (1, 0)], @@ -599,7 +593,7 @@ def test_append_single(self): def test_append_double(self): # Test simple case - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], @@ -608,7 +602,7 @@ def test_append_double(self): def test_append_on_flex(self): # Test append_fields on flexible type arrays - z = self.data[-1] + z = self._create_arrays()[-1] test = append_fields(z, 'C', data=[10, 20, 30]) control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], @@ -617,7 +611,7 @@ def test_append_on_flex(self): def test_append_on_nested(self): # Test append_fields on nested fields - w = self.data[0] + w = self._create_arrays()[0] test = append_fields(w, 'C', data=[10, 20, 30]) control = ma.array([(1, (2, 3.0), 10), (4, (5, 6.0), 20), @@ -632,18 +626,18 @@ def test_append_on_nested(self): class TestStackArrays: # Test stack_arrays - def setup_method(self): + def _create_arrays(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) - self.data = (w, x, y, z) + return w, x, y, z def test_solo(self): # Test stack_arrays on single arrays - (_, x, _, _) = self.data + x = self._create_arrays()[1] test = stack_arrays((x,)) assert_equal(test, x) assert_(test is x) @@ -654,7 +648,7 @@ def test_solo(self): def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields - (_, x, y, _) = self.data + _, x, y, _ = self._create_arrays() test = stack_arrays((x, x), usemask=False) control = np.array([1, 2, 1, 2]) @@ -670,7 +664,7 @@ def test_unnamed_fields(self): def test_unnamed_and_named_fields(self): # Test combination of arrays w/ & w/o named fields - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() test = stack_arrays((x, z)) control = ma.array([(1, -1, -1), (2, -1, -1), @@ -702,7 +696,7 @@ def test_unnamed_and_named_fields(self): def test_matching_named_fields(self): # Test combination of arrays w/ matching field names - (_, x, _, z) = self.data + _, x, _, z = self._create_arrays() zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) test = stack_arrays((z, zz)) @@ -730,7 +724,7 @@ def test_matching_named_fields(self): def test_defaults(self): # Test defaults: no exception raised if keys of defaults are not fields. - (_, _, _, z) = self.data + z = self._create_arrays()[-1] zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], dtype=[('A', '|S3'), ('B', float), ('C', float)]) defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} @@ -802,18 +796,18 @@ def test_subdtype(self): class TestJoinBy: - def setup_method(self): - self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + def _create_arrays(self): + a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) - self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) + return a, b def test_inner_join(self): # Basic test of join_by - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), @@ -823,8 +817,7 @@ def test_inner_join(self): assert_equal(test, control) def test_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), @@ -833,7 +826,6 @@ def test_join(self): # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) - join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), @@ -851,8 +843,7 @@ def test_join_subdtype(self): assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -879,8 +870,7 @@ def test_outer_join(self): assert_equal(test, control) def test_leftouter_join(self): - a, b = self.a, self.b - + a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), @@ -1029,19 +1019,17 @@ def test_two_keys_two_vars(self): assert_equal(test.dtype, control.dtype) assert_equal(test, control) + class TestAppendFieldsObj: """ Test append_fields with arrays containing objects """ # https://github.com/numpy/numpy/issues/2346 - def setup_method(self): - from datetime import date - self.data = {'obj': date(2000, 1, 1)} - def test_append_to_objects(self): "Test append_fields when the base array contains objects" - obj = self.data['obj'] + from datetime import date + obj = date(2000, 1, 1) x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)]) y = np.array([10, 20], dtype=int) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index bdef61dac3ba..fbebd9fccc37 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -152,12 +152,11 @@ WARNING_MESSAGE = ("setting an item on a masked array which has a shared " "mask will not copy") WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" - class TestMaskedArray: # Base test class for MaskedArrays. # message for warning filters - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -170,7 +169,7 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + return x, y, a10, m1, m2, xm, ym, z, zm, xf def test_basicattributes(self): # Tests some basic array attributes. @@ -196,7 +195,7 @@ def test_basic0d(self): def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_((xm - ym).filled(0).any()) @@ -214,7 +213,7 @@ def test_basic1d(self): def test_basic2d(self): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() for s in [(4, 3), (6, 2)]: x = x.reshape(s) y = y.reshape(s) @@ -234,7 +233,7 @@ def test_basic2d(self): def test_concatenate_basic(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() # basic concatenation assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) assert_equal(np.concatenate((x, y)), concatenate((x, y))) @@ -243,7 +242,7 @@ def test_concatenate_basic(self): def test_concatenate_alongaxis(self): # Tests concatenations. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() # Concatenation along an axis s = (3, 4) x = x.reshape(s) @@ -367,7 +366,7 @@ def test_unknown_keyword_parameter(self): MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. def test_asarray(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm = self._create_data()[5] xm.fill_value = -9999 xm._hardmask = True xmm = asarray(xm) @@ -1123,8 +1122,7 @@ def test_maskedarray_tofile_raises_notimplementederror(self): class TestMaskedArrayArithmetic: # Base test class for MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -1137,16 +1135,18 @@ def setup_method(self): zm = masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1e+20, x) xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') + return x, y, a10, m1, m2, xm, ym, z, zm, xf - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_basic_arithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, a10, _, _, xm, ym, _, _, xf = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_equal(a2d * a2d, a2d * a2dm) @@ -1256,7 +1256,7 @@ def test_scalar_arithmetic(self): def test_basic_ufuncs(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, z, zm, _ = self._create_data() assert_equal(np.cos(x), cos(xm)) assert_equal(np.cosh(x), cosh(xm)) assert_equal(np.sin(x), sin(xm)) @@ -1318,7 +1318,7 @@ def test_count_on_python_builtins(self): def test_minmax_func(self): # Tests minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, _, _, _, _ = self._create_data() # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) @@ -1390,7 +1390,7 @@ def test_minmax_funcs_with_output(self): def test_minmax_methods(self): # Additional tests on max/min - (_, _, _, _, _, xm, _, _, _, _) = self.d + xm = self._create_data()[5] xm.shape = (xm.size,) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) @@ -1491,7 +1491,7 @@ def minmax_with_mask(arr, mask): def test_addsumprod(self): # Tests add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(np.add.reduce(x), add.reduce(x)) assert_equal(np.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4), axis=0)) @@ -1612,7 +1612,7 @@ def test_noshink_on_creation(self): def test_mod(self): # Tests mod - (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() assert_equal(mod(x, y), mod(xm, ym)) test = mod(ym, xm) assert_equal(test, np.mod(ym, xm)) @@ -2590,16 +2590,17 @@ def test_fillvalue_bytes_or_str(self): class TestUfuncs: # Test class for the application of ufuncs on MaskedArrays. - - def setup_method(self): + def _create_data(self): # Base data definition. - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) - self.err_status = np.geterr() - np.seterr(divide='ignore', invalid='ignore') - def teardown_method(self): - np.seterr(**self.err_status) + @pytest.fixture(autouse=True, scope="class") + def err_status(self): + err = np.geterr() + np.seterr(divide='ignore', invalid='ignore') + yield err + np.seterr(**err) def test_testUfuncRegression(self): # Tests new ufuncs on MaskedArrays. @@ -2625,7 +2626,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(numpy.ma.core, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] ur = uf(*args) mr = mf(*args) assert_equal(ur.filled(0), mr.filled(0), f) @@ -2633,7 +2634,7 @@ def test_testUfuncRegression(self): def test_reduce(self): # Tests reduce on MaskedArrays. - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -2737,34 +2738,41 @@ def test_masked_array_underflow(self): X2 = X / 2.0 np.testing.assert_array_equal(X2, x / 2) + class TestMaskedArrayInPlaceArithmetic: # Test MaskedArray Arithmetic - - def setup_method(self): + def _create_intdata(self): x = arange(10) y = arange(10) xm = arange(10) xm[2] = masked - self.intdata = (x, y, xm) - self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) - self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] - self.othertypes = [np.dtype(_).type for _ in self.othertypes] - self.uint8data = ( + return x, y, xm + + def _create_floatdata(self): + x, y, xm = self._create_intdata() + return x.astype(float), y.astype(float), xm.astype(float) + + def _create_otherdata(self): + o = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + othertypes = [np.dtype(_).type for _ in o] + x, y, xm = self._create_intdata() + uint8data = ( x.astype(np.uint8), y.astype(np.uint8), xm.astype(np.uint8) ) + return othertypes, uint8data def test_inplace_addition_scalar(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() xm[2] = masked x += 1 assert_equal(x, y + 1) xm += 1 assert_equal(xm, y + 1) - (x, _, xm) = self.floatdata + x, _, xm = self._create_floatdata() id1 = x.data.ctypes.data x += 1. assert_(id1 == x.data.ctypes.data) @@ -2772,7 +2780,7 @@ def test_inplace_addition_scalar(self): def test_inplace_addition_array(self): # Test of inplace additions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() m = xm.mask a = arange(10, dtype=np.int16) a[-1] = masked @@ -2784,7 +2792,7 @@ def test_inplace_addition_array(self): def test_inplace_subtraction_scalar(self): # Test of inplace subtractions - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x -= 1 assert_equal(x, y - 1) xm -= 1 @@ -2792,7 +2800,7 @@ def test_inplace_subtraction_scalar(self): def test_inplace_subtraction_array(self): # Test of inplace subtractions - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2804,7 +2812,7 @@ def test_inplace_subtraction_array(self): def test_inplace_multiplication_scalar(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x *= 2.0 assert_equal(x, y * 2) xm *= 2.0 @@ -2812,7 +2820,7 @@ def test_inplace_multiplication_scalar(self): def test_inplace_multiplication_array(self): # Test of inplace multiplication - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -2824,7 +2832,7 @@ def test_inplace_multiplication_array(self): def test_inplace_division_scalar_int(self): # Test of inplace division - (x, y, xm) = self.intdata + x, y, xm = self._create_intdata() x = arange(10) * 2 xm = arange(10) * 2 xm[2] = masked @@ -2835,7 +2843,7 @@ def test_inplace_division_scalar_int(self): def test_inplace_division_scalar_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() x /= 2.0 assert_equal(x, y / 2.0) xm /= arange(10) @@ -2843,7 +2851,7 @@ def test_inplace_division_scalar_float(self): def test_inplace_division_array_float(self): # Test of inplace division - (x, y, xm) = self.floatdata + x, y, xm = self._create_floatdata() m = xm.mask a = arange(10, dtype=float) a[-1] = masked @@ -3020,10 +3028,11 @@ def test_datafriendly_mul_arrays(self): def test_inplace_addition_scalar_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) xm[2] = masked x += t(1) assert_equal(x, y + t(1)) @@ -3032,10 +3041,11 @@ def test_inplace_addition_scalar_type(self): def test_inplace_addition_array_type(self): # Test of inplace additions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3047,10 +3057,11 @@ def test_inplace_addition_array_type(self): def test_inplace_subtraction_scalar_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x -= t(1) assert_equal(x, y - t(1)) xm -= t(1) @@ -3058,10 +3069,11 @@ def test_inplace_subtraction_scalar_type(self): def test_inplace_subtraction_array_type(self): # Test of inplace subtractions - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3073,10 +3085,11 @@ def test_inplace_subtraction_array_type(self): def test_inplace_multiplication_scalar_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x *= t(2) assert_equal(x, y * t(2)) xm *= t(2) @@ -3084,10 +3097,11 @@ def test_inplace_multiplication_scalar_type(self): def test_inplace_multiplication_array_type(self): # Test of inplace multiplication - for t in self.othertypes: + othertypes, uint8data = self._create_otherdata() + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3100,11 +3114,12 @@ def test_inplace_multiplication_array_type(self): def test_inplace_floor_division_scalar_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked @@ -3120,11 +3135,12 @@ def test_inplace_floor_division_scalar_type(self): def test_inplace_floor_division_array_type(self): # Test of inplace division # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} - for t in self.othertypes: + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3143,10 +3159,11 @@ def test_inplace_floor_division_array_type(self): def test_inplace_division_scalar_type(self): # Test of inplace division + othertypes, uint8data = self._create_otherdata() with warnings.catch_warnings(): warnings.simplefilter('error', DeprecationWarning) - for t in self.othertypes: - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) x = arange(10, dtype=t) * t(2) xm = arange(10, dtype=t) * t(2) xm[2] = masked @@ -3179,10 +3196,11 @@ def test_inplace_division_scalar_type(self): def test_inplace_division_array_type(self): # Test of inplace division + othertypes, uint8data = self._create_otherdata() with warnings.catch_warnings(): warnings.simplefilter('error', DeprecationWarning) - for t in self.othertypes: - (x, y, xm) = (_.astype(t) for _ in self.uint8data) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) m = xm.mask a = arange(10, dtype=t) a[-1] = masked @@ -3219,7 +3237,8 @@ def test_inplace_division_array_type(self): def test_inplace_pow_type(self): # Test keeping data w/ (inplace) power - for t in self.othertypes: + othertypes = self._create_otherdata()[0] + for t in othertypes: with warnings.catch_warnings(): warnings.filterwarnings("error") # Test pow on scalar @@ -3236,7 +3255,7 @@ def test_inplace_pow_type(self): class TestMaskedArrayMethods: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3266,7 +3285,7 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_generic_methods(self): # Tests some MaskedArray methods. @@ -3363,7 +3382,7 @@ def test_allany_oddities(self): def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, _, m2x, m2X, _ = self._create_data() assert_equal(mx.argmin(), 35) assert_equal(mX.argmin(), 35) @@ -4015,8 +4034,7 @@ def test_diagonal_view(self): class TestMaskedArrayMathMethods: - - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4046,11 +4064,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_cumsumprod(self): # Tests cumsum & cumprod on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_equal(mXcp._data, mX.filled(0).cumsum(0)) mXcp = mX.cumsum(1) @@ -4084,7 +4102,7 @@ def test_cumsumprod_with_output(self): def test_ptp(self): # Tests ptp on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() (n, m) = X.shape assert_equal(mx.ptp(), np.ptp(mx.compressed())) rows = np.zeros(n, float) @@ -4148,7 +4166,7 @@ def test_anom(self): def test_trace(self): # Tests trace on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, _, _, _, mX, _, _, _, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_almost_equal(mX.trace(), @@ -4163,7 +4181,7 @@ def test_trace(self): def test_dot(self): # Tests dot on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() fx = mx.filled(0) r = mx.dot(mx) assert_almost_equal(r.filled(0), fx.dot(fx)) @@ -4212,7 +4230,7 @@ def test_varmean_nomask(self): def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_almost_equal(mX.std(axis=None, ddof=1), @@ -4363,7 +4381,7 @@ def test_diff_with_n_0(self): class TestMaskedArrayMathMethodsComplex: # Test class for miscellaneous MaskedArrays methods. - def setup_method(self): + def _create_data(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -4393,11 +4411,11 @@ def setup_method(self): m2x = array(data=x, mask=m2) m2X = array(data=X, mask=m2.reshape(X.shape)) m2XX = array(data=XX, mask=m2.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX def test_varstd(self): # Tests var & std on MaskedArrays. - (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() assert_almost_equal(mX.var(axis=None), mX.compressed().var()) assert_almost_equal(mX.std(axis=None), mX.compressed().std()) assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) @@ -4416,17 +4434,6 @@ def test_varstd(self): class TestMaskedArrayFunctions: # Test class for miscellaneous functions. - - def setup_method(self): - x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] - xm = masked_array(x, mask=m1) - ym = masked_array(y, mask=m2) - xm.set_fill_value(1e+20) - self.info = (xm, ym) - def test_masked_where_bool(self): x = [1, 2] y = masked_where(False, x) @@ -5131,8 +5138,7 @@ def test_convolve(self): class TestMaskedFields: - - def setup_method(self): + def _create_data(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -5140,11 +5146,12 @@ def setup_method(self): mdtype = [('a', bool), ('b', bool), ('c', bool)] mask = [0, 1, 0, 0, 1] base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) - self.data = {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} def test_set_records_masks(self): - base = self.data['base'] - mdtype = self.data['mdtype'] + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] # Set w/ nomask or masked base.mask = nomask assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) @@ -5163,7 +5170,7 @@ def test_set_records_masks(self): def test_set_record_element(self): # Check setting an element of a record) - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[0] = (pi, pi, 'pi') @@ -5178,7 +5185,7 @@ def test_set_record_element(self): [b'pi', b'two', b'three', b'four', b'five']) def test_set_record_slice(self): - base = self.data['base'] + base = self._create_data()['base'] (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) base[:3] = (pi, pi, 'pi') @@ -5194,7 +5201,7 @@ def test_set_record_slice(self): def test_mask_element(self): "Check record access" - base = self.data['base'] + base = self._create_data()['base'] base[0] = masked for n in ('a', 'b', 'c'): @@ -5287,9 +5294,10 @@ def test_setitem_scalar(self): assert_array_equal(arr.mask, [True, False, False]) def test_element_len(self): + data = self._create_data() # check that len() works for mvoid (Github issue #576) - for rec in self.data['base']: - assert_equal(len(rec), len(self.data['ddtype'])) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) class TestMaskedObjectArray: @@ -5341,31 +5349,30 @@ def test_nested_ma(self): class TestMaskedView: - - def setup_method(self): + def _create_data(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) a.mask[0] = (1, 0) controlmask = np.array([1] + 19 * [0], dtype=bool) - self.data = (data, a, controlmask) + return data, a, controlmask def test_view_to_nothing(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view() assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view(np.ndarray) assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) def test_view_to_simple_dtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view(float) assert_(isinstance(test, MaskedArray)) @@ -5373,7 +5380,7 @@ def test_view_to_simple_dtype(self): assert_equal(test.mask, controlmask) def test_view_to_flexible_dtype(self): - (data, a, controlmask) = self.data + a = self._create_data()[1] test = a.view([('A', float), ('B', float)]) assert_equal(test.mask.dtype.names, ('A', 'B')) @@ -5393,7 +5400,7 @@ def test_view_to_flexible_dtype(self): assert_equal(test['B'], a['b'][-1]) def test_view_to_subdtype(self): - (data, a, controlmask) = self.data + data, a, controlmask = self._create_data() # View globally test = a.view((float, 2)) assert_(isinstance(test, MaskedArray)) @@ -5410,7 +5417,7 @@ def test_view_to_subdtype(self): assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): - (data, a, controlmask) = self.data + data, a, _ = self._create_data() test = a.view((float, 2), np.recarray) assert_equal(test, data) @@ -5643,6 +5650,7 @@ def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) assert_equal(np.argwhere(a), [[1], [3]]) + def test_masked_array_no_copy(): # check nomask array is updated in place a = np.ma.array([1, 2, 3, 4]) @@ -5657,6 +5665,7 @@ def test_masked_array_no_copy(): _ = np.ma.masked_invalid(a, copy=False) assert_array_equal(a.mask, [True, False, False, False, False]) + def test_append_masked_array(): a = np.ma.masked_equal([1, 2, 3], value=2) b = np.ma.masked_equal([4, 3, 2], value=2) @@ -5695,6 +5704,7 @@ def test_append_masked_array_along_axis(): assert_array_equal(result.data, expected.data) assert_array_equal(result.mask, expected.mask) + def test_default_fill_value_complex(): # regression test for Python 3, where 'unicode' was not defined assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) @@ -5864,6 +5874,7 @@ def test_mask_shape_assignment_does_not_break_masked(): b.shape = (1,) assert_equal(a.mask.shape, ()) + @pytest.mark.skipif(sys.flags.optimize > 1, reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 def test_doc_note(): From a1098a8279fb960f53c8f3cfdaa3b0fba5203a6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 23:14:41 +0000 Subject: [PATCH 0380/1718] MAINT: Bump actions/dependency-review-action from 4.7.2 to 4.7.3 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.2 to 4.7.3. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/bc41886e18ea39df68b1b1245f4184881938e050...595b5aeba73380359d98a5e087f648dbb0edce1b) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 248b69a0d939..be1c6f7c4bb1 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@bc41886e18ea39df68b1b1245f4184881938e050 # v4.7.2 + uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 447a903b95f885760cf8833f2787f016a5dd1b30 Mon Sep 17 00:00:00 2001 From: AnkitAhlawat Date: Thu, 28 Aug 2025 22:34:16 +0530 Subject: [PATCH 0381/1718] BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs with equal_nan=True #29336 (#29372) * BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs with equal_nan=True * Update test_arraysetops.py corrected white space * MAINT: Fix lint issue (W293) in test_arraysetops.py Review comment implementation * Fix:found failure with review comments so revert back the changes * Review commets implemented * doc test update * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- numpy/lib/_arraysetops_impl.py | 5 ++++- numpy/lib/tests/test_arraysetops.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index c4788385b924..f85a3d55aae9 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -21,6 +21,7 @@ import numpy as np from numpy._core import overrides from numpy._core._multiarray_umath import _array_converter, _unique_hash +from numpy.lib.array_utils import normalize_axis_index array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -290,7 +291,9 @@ def unique(ar, return_index=False, return_inverse=False, """ ar = np.asanyarray(ar) - if axis is None: + if axis is None or ar.ndim == 1: + if axis is not None: + normalize_axis_index(axis, ar.ndim) ret = _unique1d(ar, return_index, return_inverse, return_counts, equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, sorted=sorted) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index b3e2bfa279b0..86d93a569d98 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1256,3 +1256,21 @@ def test_unique_with_matrix(self, data, transpose, dtype): u = np.unique(mat) expected = np.unique(np.asarray(mat)) assert_array_equal(u, expected, strict=True) + + def test_unique_axis0_equal_nan_on_1d_array(self): + # Test Issue #29336 + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=0, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_minus1_eq_on_1d_array(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=-1, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_float_raises_typeerror(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + with pytest.raises(TypeError, match="integer argument expected"): + np.unique(arr1d, axis=0.0, equal_nan=False) From 2a04da3a996fe389127342f3b37a13b669e6c7ff Mon Sep 17 00:00:00 2001 From: Christian Bourjau Date: Sun, 31 Aug 2025 22:16:51 +0200 Subject: [PATCH 0382/1718] Update basics.strings.rst --- doc/source/user/basics.strings.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.strings.rst b/doc/source/user/basics.strings.rst index 460bc1fe589f..cbbaa8f6e3b3 100644 --- a/doc/source/user/basics.strings.rst +++ b/doc/source/user/basics.strings.rst @@ -109,7 +109,7 @@ that empty strings are used to populate empty arrays: >>> np.empty(3, dtype=StringDType()) array(['', '', ''], dtype=StringDType()) -Optionally, you can pass create an instance of ``StringDType`` with +Optionally, you can create an instance of ``StringDType`` with support for missing values by passing ``na_object`` as a keyword argument for the initializer: From 17632fa4472e51e24037a19650a6508452667183 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:36:30 -0400 Subject: [PATCH 0383/1718] TST: Replace xunit setup with methods (#29641) * TST: Replace xunit setup with methods * TST: Replace xunit setup with methods * STY: Reverted spacing --- numpy/ma/tests/test_extras.py | 35 +++++++-------- numpy/ma/tests/test_mrecords.py | 12 +++--- numpy/ma/tests/test_old_ma.py | 47 +++++++++++---------- numpy/ma/tests/test_subclassing.py | 20 ++++----- numpy/matrixlib/tests/test_masked_matrix.py | 12 +++--- 5 files changed, 64 insertions(+), 62 deletions(-) diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index f9deb33fc2c5..230c9d211f19 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1314,11 +1314,11 @@ def test_object(self): class TestCov: - def setup_method(self): - self.data = array(np.random.rand(12)) + def _create_data(self): + return array(np.random.rand(12)) def test_covhelper(self): - x = self.data + x = self._create_data() # Test not mask output type is a float. assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) @@ -1339,7 +1339,7 @@ def test_covhelper(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data() assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1347,7 +1347,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test cov on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data().reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -1355,7 +1355,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test cov 1 1D variable w/missing values - x = self.data + x = self._create_data() x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1379,7 +1379,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test cov on 2D variable w/ missing value - x = self.data + x = self._create_data() x[-1] = masked x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) @@ -1401,13 +1401,14 @@ def test_2d_with_missing(self): class TestCorrcoef: - def setup_method(self): - self.data = array(np.random.rand(12)) - self.data2 = array(np.random.rand(12)) + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 def test_ddof(self): # ddof raises DeprecationWarning - x, y = self.data, self.data2 + x, y = self._create_data() expected = np.corrcoef(x) expected2 = np.corrcoef(x, y) with pytest.warns(DeprecationWarning): @@ -1425,7 +1426,7 @@ def test_ddof(self): assert_almost_equal(corrcoef(x, y, ddof=3), expected2) def test_bias(self): - x, y = self.data, self.data2 + x, y = self._create_data() expected = np.corrcoef(x) # bias raises DeprecationWarning with pytest.warns(DeprecationWarning): @@ -1443,7 +1444,7 @@ def test_bias(self): def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values - x = self.data + x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) @@ -1455,7 +1456,7 @@ def test_1d_without_missing(self): def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values - x = self.data.reshape(3, 4) + x = self._create_data()[0].reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) @@ -1467,7 +1468,7 @@ def test_2d_without_missing(self): def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values - x = self.data + x = self._create_data()[0] x[-1] = masked x -= x.mean() nx = x.compressed() @@ -1499,7 +1500,7 @@ def test_1d_with_missing(self): def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value - x = self.data + x = self._create_data()[0] x[-1] = masked x = x.reshape(3, 4) @@ -1519,7 +1520,7 @@ def test_2d_with_missing(self): class TestPolynomial: - # + def test_polyfit(self): # Tests polyfit # On ndarrays diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index e0b0db24904c..b4070df0f9a3 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -350,24 +350,24 @@ def test_exotic_formats(self): class TestView: - def setup_method(self): - (a, b) = (np.arange(10), np.random.rand(10)) + def _create_data(self): + a, b = (np.arange(10), np.random.rand(10)) ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) - self.data = (mrec, a, b, arr) + return mrec, a, b, arr def test_view_by_itself(self): - (mrec, a, b, arr) = self.data + mrec = self._create_data()[0] test = mrec.view() assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): - (mrec, a, b, arr) = self.data + mrec, a, b, _ = self._create_data() ntype = (float, 2) test = mrec.view(ntype) assert_(isinstance(test, ma.MaskedArray)) @@ -375,7 +375,7 @@ def test_view_simple_dtype(self): assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): - (mrec, a, b, arr) = self.data + mrec, _, _, arr = self._create_data() alttype = [('A', float), ('B', float)] test = mrec.view(alttype) assert_(isinstance(test, MaskedRecords)) diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index f5d6d3ec27b9..fcf02fa2dccb 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -97,7 +97,7 @@ def eq(v, w, msg=''): class TestMa: - def setup_method(self): + def _create_data(self): x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. @@ -110,11 +110,11 @@ def setup_method(self): xf = np.where(m1, 1e+20, x) s = x.shape xm.set_fill_value(1e+20) - self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) + return x, y, a10, m1, m2, xm, ym, z, zm, xf, s def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, m1, _, xm, _, _, _, xf, s = self._create_data() assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) @@ -129,7 +129,7 @@ def test_testBasic1d(self): @pytest.mark.parametrize("s", [(4, 3), (6, 2)]) def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() x.shape = s y.shape = s xm.shape = s @@ -148,7 +148,7 @@ def test_testBasic2d(self, s): def test_testArithmetic(self): # Test of basic arithmetic. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, a10, _, _, xm, ym, _, _, xf, s = self._create_data() a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) assert_(eq(a2d * a2d, a2d * a2dm)) @@ -192,7 +192,7 @@ def test_testMixedArithmetic(self): def test_testUfuncs1(self): # Test various functions such as sin, cos. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, z, zm, _, _ = self._create_data() assert_(eq(np.cos(x), cos(xm))) assert_(eq(np.cosh(x), cosh(xm))) assert_(eq(np.sin(x), sin(xm))) @@ -238,7 +238,7 @@ def test_xtestCount(self): def test_testMinMax(self): # Test minimum and maximum. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, _, _, _, _, xm, _, _, _, _, _ = self._create_data() xr = np.ravel(x) # max doesn't work if shaped xmr = ravel(xm) @@ -248,7 +248,7 @@ def test_testMinMax(self): def test_testAddSumProd(self): # Test add, sum, product. - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + x, y, _, _, _, xm, ym, _, _, _, s = self._create_data() assert_(eq(np.add.reduce(x), add.reduce(x))) assert_(eq(np.add.accumulate(x), add.accumulate(x))) assert_(eq(4, sum(array(4), axis=0))) @@ -417,7 +417,7 @@ def test_testPut2(self): assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): - (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] i = np.nonzero(m)[0] put(ym, i, zm) @@ -777,8 +777,9 @@ def test_assignment_by_condition_2(self): class TestUfuncs: - def setup_method(self): - self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) def test_testUfuncRegression(self): @@ -807,7 +808,7 @@ def test_testUfuncRegression(self): except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(np.ma, f) - args = self.d[:uf.nin] + args = self._create_data()[:uf.nin] with np.errstate(): if f in f_invalid_ignore: np.seterr(invalid='ignore') @@ -819,7 +820,7 @@ def test_testUfuncRegression(self): assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): - a = self.d[0] + a = self._create_data()[0] assert_(not alltrue(a, axis=0)) assert_(sometrue(a, axis=0)) assert_equal(sum(a[:3], axis=0), 0) @@ -843,7 +844,7 @@ def test_nonzero(self): class TestArrayMethods: - def setup_method(self): + def _create_data(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -863,10 +864,10 @@ def setup_method(self): mX = array(data=X, mask=m.reshape(X.shape)) mXX = array(data=XX, mask=m.reshape(XX.shape)) - self.d = (x, X, XX, m, mx, mX, mXX) + return x, X, XX, m, mx, mX, mXX def test_trace(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, _, _, _, mX, _ = self._create_data() mXdiag = mX.diagonal() assert_equal(mX.trace(), mX.diagonal().compressed().sum()) assert_(eq(mX.trace(), @@ -874,15 +875,15 @@ def test_trace(self): axis=0))) def test_clip(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + x, _, _, _, mx, _, _ = self._create_data() clipped = mx.clip(2, 8) assert_(eq(clipped.mask, mx.mask)) assert_(eq(clipped._data, x.clip(2, 8))) assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): - (x, X, XX, m, mx, mX, mXX,) = self.d - (n, m) = X.shape + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape # print(type(mx), mx.compressed()) # raise Exception() assert_equal(mx.ptp(), np.ptp(mx.compressed())) @@ -896,28 +897,28 @@ def test_ptp(self): assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, _, _, _, _, mX, mXX = self._create_data() mXswapped = mX.swapaxes(0, 1) assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumprod(0) assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + mX = self._create_data()[5] mXcp = mX.cumsum(0) assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): - (x, X, XX, m, mx, mX, mXX,) = self.d + _, X, XX, _, _, mX, mXX = self._create_data() assert_(eq(mX.var(axis=None), mX.compressed().var())) assert_(eq(mX.std(axis=None), mX.compressed().std())) assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 3364e563097e..22bece987cb7 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -188,10 +188,10 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_data_subclassing(self): # Tests whether the subclass is kept. @@ -205,19 +205,19 @@ def test_data_subclassing(self): def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, subarray)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), msubarray)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a msubarray assert_(isinstance(add(mx, mx), msubarray)) assert_(isinstance(add(mx, x), msubarray)) @@ -230,7 +230,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), msubarray)) assert_(isinstance(divide(mx, x), msubarray)) @@ -427,20 +427,20 @@ def test_array_no_inheritance(): class TestClassWrapping: # Test suite for classes that wrap MaskedArrays - def setup_method(self): + def _create_data(self): m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) wm = WrappedArray(m) - self.data = (m, wm) + return m, wm def test_masked_unary_operations(self): # Tests masked_unary_operation - (m, wm) = self.data + wm = self._create_data()[1] with np.errstate(divide='ignore'): assert_(isinstance(np.log(wm), WrappedArray)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (m, wm) = self.data + m, wm = self._create_data() # Result should be a WrappedArray assert_(isinstance(np.add(wm, wm), WrappedArray)) assert_(isinstance(np.add(m, wm), WrappedArray)) diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index 3f9414ff7d30..ee3dc96b9ac5 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -182,26 +182,26 @@ def test_view(self): class TestSubclassing: # Test suite for masked subclasses of ndarray. - def setup_method(self): + def _create_data(self): x = np.arange(5, dtype='float') mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) + return x, mx def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray - (x, mx) = self.data + mx = self._create_data()[1] assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation - (x, mx) = self.data + x, mx = self._create_data() with np.errstate(divide='ignore'): assert_(isinstance(log(mx), MMatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() # Result should be a MMatrix assert_(isinstance(add(mx, mx), MMatrix)) assert_(isinstance(add(mx, x), MMatrix)) @@ -215,7 +215,7 @@ def test_masked_binary_operations(self): def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation - (x, mx) = self.data + x, mx = self._create_data() xmx = masked_array(mx.data.__array__(), mask=mx.mask) assert_(isinstance(divide(mx, mx), MMatrix)) assert_(isinstance(divide(mx, x), MMatrix)) From 263bb871bffec2b99a6f4ecc2b03743d797ad3e2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 1 Sep 2025 11:33:20 -0600 Subject: [PATCH 0384/1718] BUG: avoid thread-unsafe refcount check in temp elision --- numpy/_core/src/common/pythoncapi-compat | 2 +- numpy/_core/src/multiarray/temp_elide.c | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 0f1d42a10a3f..90c06a4cae55 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d +Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 9236476c4213..dbd469b05654 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -5,6 +5,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" #include "numpy/arrayobject.h" #define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -115,10 +116,14 @@ check_unique_temporary(PyObject *lhs) #if PY_VERSION_HEX == 0x030E00A7 && !defined(PYPY_VERSION) #error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" #elif PY_VERSION_HEX >= 0x030E00B1 && !defined(PYPY_VERSION) + // Python 3.14 changed the semantics for reference counting temporaries // see https://github.com/python/cpython/issues/133164 return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); #else - return 1; + // equivalent to Py_REFCNT(lhs) == 1 except on 3.13t + // we need to use the backport on 3.13t because + // this function was first exposed in 3.14 + return PyUnstable_Object_IsUniquelyReferenced(lhs); #endif } @@ -303,13 +308,13 @@ can_elide_temp(PyObject *olhs, PyObject *orhs, int *cannot) * array of a basic type, own its data and size larger than threshold */ PyArrayObject *alhs = (PyArrayObject *)olhs; - if (Py_REFCNT(olhs) != 1 || !PyArray_CheckExact(olhs) || + if (!check_unique_temporary(olhs) || + !PyArray_CheckExact(olhs) || !PyArray_ISNUMBER(alhs) || !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || - PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary(olhs)) { + PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } if (PyArray_CheckExact(orhs) || From a4eebb1c2b3711ea0a06687ab2222117545adb6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 07:49:33 +0000 Subject: [PATCH 0385/1718] MAINT: Bump github/codeql-action from 3.29.11 to 3.30.0 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.11 to 3.30.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/3c3833e0f8c1c83d449a7478aa59c036a9165498...2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 68aead6651e6..ecdf8cfeb85e 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/autobuild@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d61c6f1dd815..19acbd619437 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v2.1.27 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v2.1.27 with: sarif_file: results.sarif From b0403816009ee50eecbb91a66acf75a446623b8f Mon Sep 17 00:00:00 2001 From: Jonathan Reimer <41432658+jonathimer@users.noreply.github.com> Date: Tue, 2 Sep 2025 14:48:34 +0200 Subject: [PATCH 0386/1718] Add Linux Foundation Health Badge to README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b2d3cffc8978..7bf1e13346ce 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ https://anaconda.org/conda-forge/numpy) https://stackoverflow.com/questions/tagged/numpy) [![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) [![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) From 29caecb9d4761938aa80f9b1f01fe3b2e77a6044 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Sep 2025 11:40:43 -0600 Subject: [PATCH 0387/1718] TST: delete global env_setup fixture (#29648) delete global env_setup fixture that previously set `PYTHONHASHSEED` presumably for historical reasons (when dict order was random). --- numpy/conftest.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index 1454b4af59de..f411588748e0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -147,10 +147,6 @@ def check_fpu_mode(request): def add_np(doctest_namespace): doctest_namespace['np'] = numpy -@pytest.fixture(autouse=True) -def env_setup(monkeypatch): - monkeypatch.setenv('PYTHONHASHSEED', '0') - if HAVE_SCPDT: From 9f42dca359c51cb421e24dcd211f6678b1ff52b0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 29 Aug 2025 12:05:40 -0600 Subject: [PATCH 0388/1718] ENH: Add extended sorting APIs This PR adds new sorting APIs that can be used for extending our current sort types. The new C-APIs do not have the *which* argument of the legacy C-APIs, instead they have *flags* request flag API. In this transition, heapsort can no longer be called directly, introsort (quicksort) is called instead. The sort/argsort methods get new keyword arguments used to set the flags when *kind* is not given, so *flags* is invisible at the Python level. The new keywords are: - stable -- whether the sort should stable, default is False, - descending -- whether to sort in descending order, default is False, - nanfirst -- whether NaNs sort to the last or first, default is False. The main difference here is that the keywords select by functionality, whereas *kind* selects by algorithm. Note that descending and nanfirst are not yet implemented, so an error is raised if either is specified. The added C-API functions are: - (API)PyArray_SortEx, - (API)PyArray_ArgSortEx, Those are the two functions that need changes depending on how we want to implement adding new sort algorithms. They should be pretty easy to adapt to whatever we choose to do. The legacy C-API functions, PyArray_Sort and PyArray_ArgSort, have been modified to call through the new C-API functions. In order to do so the *kind* function has been encoded in the *flags* like so: - "quicksort" - default (0) - "heapsort" - default (0) - "mergesort" - stable (1) I note that the partitioning/selection functions have not been modified, we may want to do that a some point. --- doc/release/upcoming_changes/29642.c_api.rst | 14 ++ doc/release/upcoming_changes/29642.change.rst | 4 + doc/source/reference/c-api/array.rst | 82 +++++-- numpy/_core/include/numpy/ndarraytypes.h | 41 +++- .../src/multiarray/_multiarray_tests.c.src | 16 +- numpy/_core/src/multiarray/item_selection.c | 231 ++++++++++-------- numpy/_core/src/multiarray/methods.c | 98 +++++--- numpy/_core/tests/test_multiarray.py | 4 +- 8 files changed, 325 insertions(+), 165 deletions(-) create mode 100644 doc/release/upcoming_changes/29642.c_api.rst create mode 100644 doc/release/upcoming_changes/29642.change.rst diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst new file mode 100644 index 000000000000..0cc6b1bf4bf4 --- /dev/null +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -0,0 +1,14 @@ +The NPY_SORTKIND enum has been enhanced with new variables +---------------------------------------------------------- +This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. +We have changed the semantics of the old names in the NPY_SORTKIND enum and +added new ones. The changes are backward compatible, and no recompilation is +needed. The new names of interest are: + +* NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) +* NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) +* NPY_SORT_DESCENDING -- the sort must be descending +* NPY_SORT_NANFIRST -- NaNs sort to the beginning + +The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. + diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst new file mode 100644 index 000000000000..6a7f656740fd --- /dev/null +++ b/doc/release/upcoming_changes/29642.change.rst @@ -0,0 +1,4 @@ +Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` +------------------------------------------------------------ +It is unlikely that this change will be noticed, but if you do see a change in +execution time or unstable argsort order, this is likely the cause. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index dc043b77f187..e09b61974128 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2303,21 +2303,29 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, *kind*). - Return an array with the items of *self* sorted along *axis*. The array - is sorted using the algorithm denoted by *kind*, which is an integer/enum pointing - to the type of sorting algorithms used. - -.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis) - - Equivalent to :meth:`ndarray.argsort` (*self*, *axis*). - Return an array of indices such that selection of these indices - along the given ``axis`` would return a sorted version of *self*. If *self* ->descr - is a data-type with fields defined, then self->descr->names is used - to determine the sort order. A comparison where the first field is equal - will use the second field and so on. To alter the sort order of a - structured array, create a new data-type with a different order of names - and construct a view of the array with that new data-type. + Equivalent to :meth:`ndarray.sort` (*self*, *axis*, + *kind*). Return an array with the items of *self* sorted along *axis*. The + array is sorted using an algorithm whose properties are specified by + *kind*, an integer/enum specifying the reguirements of the sorting + algorithm used. If *self* ->descr is a data-type with fields defined, then + self->descr->names is used to determine the sort order. A comparison where + the first field is equal will use the second field and so on. To alter the + sort order of a structured array, create a new data-type with a different + order of names and construct a view of the array with that new data-type. + + +.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) + + Equivalent to :meth:`ndarray.argsort` (*self*, + *axis*, *kind*). Return an array of indices such that selection of these + indices along the given ``axis`` would return a sorted version of *self*. + The array is sorted using an algorithm whose properties are specified by + *kind*, an integer/enum specifying the reguirements of the sorting + algorithm used. If *self* ->descr is a data-type with fields defined, then + self->descr->names is used to determine the sort order. A comparison where + the first field is equal will use the second field and so on. To alter the + sort order of a structured array, create a new data-type with a different + order of names and construct a view of the array with that new data-type. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) @@ -4321,7 +4329,11 @@ Enumerated Types .. c:enum:: NPY_SORTKIND A special variable-type which can take on different values to indicate - the sorting algorithm being used. + the sorting algorithm being used. These algorithm types have not been + treated strictly for some time, but rather treated as stable/not stable. + In NumPy 2.4 they are replaced by requirements (see below), but done in a + backwards compatible way. These values will continue to work, except that + that NPY_HEAPSORT will do the same thing as NPY_QUICKSORT. .. c:enumerator:: NPY_QUICKSORT @@ -4335,11 +4347,41 @@ Enumerated Types .. c:enumerator:: NPY_NSORTS - Defined to be the number of sorts. It is fixed at three by the need for - backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and - :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one - of several stable sorting algorithms depending on the data type. + Defined to be the number of sorts. It is fixed at three by the need for + backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and + :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one + of several stable sorting algorithms depending on the data type. + + In NumPy 2.4 the algorithm names are replaced by requirements. You can still use + the old values, a recompile is not needed, but they are reinterpreted such that + + * NPY_QUICKSORT and NPY_HEAPSORT -> NPY_SORT_DEFAULT + * NPY_MERGESORT and NPY_STABLE -> NPY_SORT_STABLE + + .. c:enumerator:: NPY_SORT_DEFAULT + + The default sort for the type. For the NumPy builtin types it may be + stable or not, but will be ascending and sort NaN types to the end. It + is usually chosen for speed and/or low memory. + + .. c:enumerator:: NPY_SORT_STABLE + + (Requirement) Specifies that the sort must be stable. + + .. c:enumerator:: NPY_SORT_DESCENDING + + (Requirement) Specifies that the sort must be in descending order. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. + + .. c:enumerator:: NPY_SORT_NANFIRST + (Requirement) Specifies that the sort must sort NaNs to + the beginning, e.g. NaNs compare less than any other value for the + type. This is only relevant if the type has NaN equivalents. + This functionality is not yet implemented for any of the NumPy types + and cannot yet be set from the Python interface. It may change in the + future. .. c:enum:: NPY_SCALARKIND diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index baa42406ac88..3df9a4dbfcf9 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -162,18 +162,39 @@ enum NPY_TYPECHAR { }; /* - * Changing this may break Numpy API compatibility - * due to changing offsets in PyArray_ArrFuncs, so be - * careful. Here we have reused the mergesort slot for - * any kind of stable sort, the actual implementation will - * depend on the data type. + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than the algorithm. All + * the previous values are reused in a way that should be downstream + * compatible, but the actual algorithms used may be different than before. The + * new approach should be more flexible and easier to update. The idea is that + * NPY_SORT_STABLE | NPY_SORT_DESCENDING | NPY_SORT_NANFIRST should provide an + * index. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. */ + typedef enum { - _NPY_SORT_UNDEFINED=-1, - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2, - NPY_STABLESORT=2, + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, + NPY_SORT_NANFIRST = 8, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 068fabc7fee8..4b58b2789e65 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2089,10 +2089,18 @@ run_sortkind_converter(PyObject* NPY_UNUSED(self), PyObject *args) return NULL; } switch (kind) { - case _NPY_SORT_UNDEFINED: return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); - case NPY_QUICKSORT: return PyUnicode_FromString("NPY_QUICKSORT"); - case NPY_HEAPSORT: return PyUnicode_FromString("NPY_HEAPSORT"); - case NPY_STABLESORT: return PyUnicode_FromString("NPY_STABLESORT"); + case _NPY_SORT_UNDEFINED: + return PyUnicode_FromString("_NPY_SORT_UNDEFINED"); + case NPY_QUICKSORT: + return PyUnicode_FromString("NPY_QUICKSORT"); + case NPY_HEAPSORT: + return PyUnicode_FromString("NPY_HEAPSORT"); + case NPY_STABLESORT: + return PyUnicode_FromString("NPY_STABLESORT"); + default: + // the other possible values in NPY_SORTKIND can only + // be set with keywords. + break; } return PyLong_FromLong(kind); } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 5c036b704774..9ec70acde6dd 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -1549,56 +1549,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } -/*NUMPY_API - * Sort an array in-place - */ -NPY_NO_EXPORT int -PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArray_SortFunc *sort = NULL; - int n = PyArray_NDIM(op); - - if (check_and_adjust_axis(&axis, n) < 0) { - return -1; - } - - if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { - return -1; - } - - if (which < 0 || which >= NPY_NSORTS) { - PyErr_SetString(PyExc_ValueError, "not a valid sort kind"); - return -1; - } - - sort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort[which]; - - if (sort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - sort = npy_quicksort; - break; - case NPY_HEAPSORT: - sort = npy_heapsort; - break; - case NPY_STABLESORT: - sort = npy_timsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return -1; - } - } - - return _new_sortlike(op, axis, sort, NULL, NULL, 0); -} - - /* * make kth array positive, ravel and sort it */ @@ -1712,52 +1662,6 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, } -/*NUMPY_API - * ArgSort an array - */ -NPY_NO_EXPORT PyObject * -PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) -{ - PyArrayObject *op2; - PyArray_ArgSortFunc *argsort = NULL; - PyObject *ret; - - argsort = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort[which]; - - if (argsort == NULL) { - if (PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - switch (which) { - default: - case NPY_QUICKSORT: - argsort = npy_aquicksort; - break; - case NPY_HEAPSORT: - argsort = npy_aheapsort; - break; - case NPY_STABLESORT: - argsort = npy_atimsort; - break; - } - } - else { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); - return NULL; - } - } - - op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); - if (op2 == NULL) { - return NULL; - } - - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - - Py_DECREF(op2); - return ret; -} - - /*NUMPY_API * ArgPartition an array */ @@ -3154,3 +3058,138 @@ PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index, return PyArray_Pack(PyArray_DESCR(self), data, obj); } + + +/* Table of generic sort functions for use in PyArray_SortEx*/ +static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, + npy_heapsort, + npy_timsort}; + +/*NUMPY_API + * Sort an array in-place with extended parameters + */ +NPY_NO_EXPORT int +PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArray_SortFunc **sort_table = NULL; + PyArray_SortFunc *sort = NULL; + + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { + return -1; + } + + if (PyArray_FailUnlessWriteable(op, "sort array") < 0) { + return -1; + } + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; + switch (flags) { + case NPY_SORT_DEFAULT: + sort = sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = sort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } + return _new_sortlike(op, axis, sort, NULL, NULL, 0); +} + +/* Table of generic argsort function for use by PyArray_ArgSortEx */ +static PyArray_ArgSortFunc* const generic_argsort_table[] = {npy_aquicksort, + npy_aheapsort, + npy_atimsort}; + +/*NUMPY_API + * ArgSort an array with extended parameters + */ +NPY_NO_EXPORT PyObject * +PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) +{ + PyArrayObject *op2; + PyObject *ret; + PyArray_ArgSortFunc **argsort_table = NULL; + PyArray_ArgSortFunc *argsort = NULL; + + // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT + flags &= ~_NPY_SORT_HEAPSORT; + + // Look for type specific functions + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } + + op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); + if (op2 == NULL) { + return NULL; + } + + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); + + Py_DECREF(op2); + return ret; +} + + diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 7a9f5d83a57c..84f749acce3f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1154,7 +1154,6 @@ array_copy_keeporder(PyArrayObject *self, PyObject *args) return PyArray_NewCopy(self, NPY_KEEPORDER); } -#include static PyObject * array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) { @@ -1250,11 +1249,13 @@ array_sort(PyArrayObject *self, { int axis = -1; int val; - NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; PyObject *order = NULL; PyArray_Descr *saved = NULL; PyArray_Descr *newd; + NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; + int descending = -1; + int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1262,18 +1263,42 @@ array_sort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, +// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; + sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + keywords_used |= (nanfirst != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } + } + + order = (order != Py_None)? order: NULL; + // Reorder field names if required. if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; saved = PyArray_DESCR(self); if (!PyDataType_HASFIELDS(saved)) { - PyErr_SetString(PyExc_ValueError, "Cannot specify " \ + PyErr_SetString(PyExc_ValueError, + "Cannot specify " "order when the array has no fields."); return NULL; } @@ -1296,20 +1321,9 @@ array_sort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } val = PyArray_Sort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; @@ -1320,6 +1334,7 @@ array_sort(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_partition(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -1393,15 +1408,20 @@ array_partition(PyArrayObject *self, Py_RETURN_NONE; } + static PyObject * array_argsort(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { int axis = -1; + PyObject *res; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; + PyArray_Descr *newd; NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; - PyObject *order = NULL, *res; - PyArray_Descr *newd, *saved=NULL; int stable = -1; + int descending = -1; + int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1409,12 +1429,35 @@ array_argsort(PyArrayObject *self, "|kind", &PyArray_SortkindConverter, &sortkind, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, +// "$descending", &PyArray_OptionalBoolConverter, &descending, +// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } - if (order == Py_None) { - order = NULL; + + if (sortkind == _NPY_SORT_UNDEFINED) { + // keywords only if sortkind not passed + sortkind = 0; + sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; + sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; + sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; + } + else { + // Check that no keywords are used + int keywords_used = 0; + keywords_used |= (stable != -1); + keywords_used |= (descending != -1); + keywords_used |= (nanfirst != -1); + if (keywords_used) { + PyErr_SetString(PyExc_ValueError, + "`kind` and keyword parameters can't be provided at " + "the same time. Use only one of them."); + return NULL; + } } + + // Reorder field names if required. + order = (order != Py_None)? order: NULL; if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -1443,20 +1486,9 @@ array_argsort(PyArrayObject *self, ((_PyArray_LegacyDescr *)newd)->names = new_name; ((PyArrayObject_fields *)self)->descr = newd; } - if (sortkind != _NPY_SORT_UNDEFINED && stable != -1) { - PyErr_SetString(PyExc_ValueError, - "`kind` and `stable` parameters can't be provided at " - "the same time. Use only one of them."); - return NULL; - } - else if ((sortkind == _NPY_SORT_UNDEFINED && stable == -1) || (stable == 0)) { - sortkind = NPY_QUICKSORT; - } - else if (stable == 1) { - sortkind = NPY_STABLESORT; - } res = PyArray_ArgSort(self, axis, sortkind); + if (order != NULL) { Py_XDECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = saved; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d5aac78c4a4d..8e50e71c3741 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2211,7 +2211,7 @@ def test_sort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.sort(a, kind="stable", stable=True) @@ -2654,7 +2654,7 @@ def test_argsort(self): with assert_raises_regex( ValueError, - "kind` and `stable` parameters can't be provided at the same time" + "`kind` and keyword parameters can't be provided at the same time" ): np.argsort(a, kind="stable", stable=True) From 22be8c5db4d5dec8f3ab336d729695ebcee39601 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 2 Sep 2025 16:16:39 -0600 Subject: [PATCH 0389/1718] DOC: Documentation improvements from review. --- doc/release/upcoming_changes/29642.c_api.rst | 2 +- doc/release/upcoming_changes/29642.change.rst | 5 +- doc/source/reference/c-api/array.rst | 47 +++++++++++-------- numpy/_core/include/numpy/ndarraytypes.h | 11 ++--- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst index 0cc6b1bf4bf4..0a28e32e3530 100644 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -11,4 +11,4 @@ needed. The new names of interest are: * NPY_SORT_NANFIRST -- NaNs sort to the beginning The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. - +Note that NPY_SORT_DESCENDING and NPY_SORT_NANFIRST have yet to be implemented. diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst index 6a7f656740fd..4a1706e00bab 100644 --- a/doc/release/upcoming_changes/29642.change.rst +++ b/doc/release/upcoming_changes/29642.change.rst @@ -1,4 +1,7 @@ Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` ------------------------------------------------------------ It is unlikely that this change will be noticed, but if you do see a change in -execution time or unstable argsort order, this is likely the cause. +execution time or unstable argsort order, that is likely the cause. Please let +us know if there is a performance regression. Congratulate us if it is +improved :) + diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index e09b61974128..b5c1a08e2abf 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2303,29 +2303,36 @@ Item selection and manipulation .. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.sort` (*self*, *axis*, - *kind*). Return an array with the items of *self* sorted along *axis*. The - array is sorted using an algorithm whose properties are specified by - *kind*, an integer/enum specifying the reguirements of the sorting - algorithm used. If *self* ->descr is a data-type with fields defined, then - self->descr->names is used to determine the sort order. A comparison where - the first field is equal will use the second field and so on. To alter the - sort order of a structured array, create a new data-type with a different - order of names and construct a view of the array with that new data-type. - + Return an array with the items of ``self`` sorted along ``axis``. The array + is sorted using an algorithm whose properties are specified by the value of + ``kind``, an integer/enum specifying the reguirements of the sorting + algorithm used. If ``self* ->descr`` is a data-type with fields defined, + then ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.sort`, though with a different meaning + of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis, NPY_SORTKIND kind) - Equivalent to :meth:`ndarray.argsort` (*self*, - *axis*, *kind*). Return an array of indices such that selection of these - indices along the given ``axis`` would return a sorted version of *self*. - The array is sorted using an algorithm whose properties are specified by - *kind*, an integer/enum specifying the reguirements of the sorting - algorithm used. If *self* ->descr is a data-type with fields defined, then - self->descr->names is used to determine the sort order. A comparison where - the first field is equal will use the second field and so on. To alter the - sort order of a structured array, create a new data-type with a different - order of names and construct a view of the array with that new data-type. + Return an array of indices such that selection of these indices along the + given ``axis`` would return a sorted version of ``self``. The array is + sorted using an algorithm whose properties are specified by ``kind``, an + integer/enum specifying the reguirements of the sorting algorithm used. If + ``self->descr`` is a data-type with fields defined, then + ``self->descr->names`` is used to determine the sort order. A comparison + where the first field is equal will use the second field and so on. To + alter the sort order of a structured array, create a new data-type with a + different order of names and construct a view of the array with that new + data-type. + + This is the C level function called by the ndarray method + :meth:`ndarray.argsort`, though with a different + meaning of ``kind`` -- see ``NPY_SORTKIND`` below. .. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 3df9a4dbfcf9..01fe231927f4 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -169,12 +169,11 @@ enum NPY_TYPECHAR { * * Updated in NumPy 2.4 * - * Updated with new names denoting requirements rather than the algorithm. All - * the previous values are reused in a way that should be downstream - * compatible, but the actual algorithms used may be different than before. The - * new approach should be more flexible and easier to update. The idea is that - * NPY_SORT_STABLE | NPY_SORT_DESCENDING | NPY_SORT_NANFIRST should provide an - * index. + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. * * Names with a leading underscore are private, and should only be used * internally by NumPy. From a1fa902f818af4bdd0cdbd4fce85c072cc210760 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Wed, 3 Sep 2025 09:34:33 +0800 Subject: [PATCH 0390/1718] ENH, SIMD: Optimize the logical implementation based on Highway Wrapper --- .../src/umath/loops_logical.dispatch.cpp | 231 +++++++++--------- 1 file changed, 118 insertions(+), 113 deletions(-) diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index ec17f90154c8..a6f174218bd1 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -4,17 +4,16 @@ #include "lowlevel_strided_loops.h" #include "fast_loop_macros.h" #include - +#include "simd/simd.hpp" #include -namespace hn = hwy::HWY_NAMESPACE; struct logical_and_t {}; struct logical_or_t {}; struct absolute_t {}; struct logical_not_t {}; -const hn::ScalableTag u8; -using vec_u8 = hn::Vec; +namespace { +using namespace np::simd; /******************************************************************************* ** Defining the SIMD kernels @@ -24,86 +23,84 @@ using vec_u8 = hn::Vec; * consistent, should not be required if bool is used correctly everywhere but * you never know */ - -HWY_INLINE HWY_ATTR vec_u8 byte_to_true(vec_u8 v) +#if NPY_HWY +HWY_INLINE HWY_ATTR Vec byte_to_true(Vec v) { - return hn::IfThenZeroElse(hn::Eq(v, hn::Zero(u8)), hn::Set(u8, 1)); + return hn::IfThenZeroElse(hn::Eq(v, Zero()), Set(uint8_t(1))); } + /* * convert mask vector (0xff/0x00) to boolean true. similar to byte_to_true(), * but we've already got a mask and can skip negation. */ -HWY_INLINE HWY_ATTR vec_u8 mask_to_true(vec_u8 v) +HWY_INLINE HWY_ATTR Vec mask_to_true(Vec v) { - const vec_u8 truemask = hn::Set(u8, 1 == 1); - return hn::And(truemask, v); + return hn::IfThenElseZero(hn::Ne(v, Zero()), Set(uint8_t(1))); } + /* * For logical_and, we have to be careful to handle non-bool inputs where * bits of each operand might not overlap. Example: a = 0x01, b = 0x80 * Both evaluate to boolean true, however, a & b is false. Return value * should be consistent with byte_to_true(). */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_and_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_and_u8(Vec a, Vec b) { return hn::IfThenZeroElse( - hn::Eq(hn::Zero(u8), hn::Min(a, b)), - hn::Set(u8, 1) + hn::Eq(Zero(), hn::Min(a, b)), + Set(uint8_t(1)) ); } /* * We don't really need the following, but it simplifies the templating code * below since it is paired with simd_logical_and_u8() above. */ -HWY_INLINE HWY_ATTR vec_u8 simd_logical_or_u8(vec_u8 a, vec_u8 b) +HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec b) { - vec_u8 r = hn::Or(a, b); + auto r = hn::Or(a, b); return byte_to_true(r); } -HWY_INLINE HWY_ATTR npy_bool simd_any_u8(vec_u8 v) +HWY_INLINE HWY_ATTR npy_bool simd_any_u8(Vec v) { - return hn::ReduceMax(u8, v) != 0; + return hn::ReduceMax(_Tag(), v) != 0; } -HWY_INLINE HWY_ATTR npy_bool simd_all_u8(vec_u8 v) +HWY_INLINE HWY_ATTR npy_bool simd_all_u8(Vec v) { - return hn::ReduceMin(u8, v) != 0; + return hn::ReduceMin(_Tag(), v) != 0; } +#endif template struct BinaryLogicalTraits; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = false; - static constexpr auto scalar_op = std::logical_or{}; + static constexpr bool is_and = false; + static constexpr auto scalar_op = std::logical_or{}; static constexpr auto scalar_cmp = std::not_equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_any_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_or_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_or_u8(a, b); } +#endif }; template<> struct BinaryLogicalTraits { - static constexpr bool is_and = true; - static constexpr auto scalar_op = std::logical_and{}; + static constexpr bool is_and = true; + static constexpr auto scalar_op = std::logical_and{}; static constexpr auto scalar_cmp = std::equal_to{}; +#if NPY_HWY static constexpr auto anyall = simd_all_u8; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 a, vec_u8 b) { - return simd_logical_and_u8(a, b); - } - - HWY_INLINE HWY_ATTR vec_u8 reduce(vec_u8 a, vec_u8 b) { + static HWY_INLINE HWY_ATTR Vec simd_op(Vec a, Vec b) { return simd_logical_and_u8(a, b); } +#endif }; template @@ -111,52 +108,54 @@ struct UnaryLogicalTraits; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = true; + static constexpr bool is_not = true; static constexpr auto scalar_op = std::equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { - const vec_u8 zero = hn::Zero(u8); - return mask_to_true(hn::VecFromMask(u8, hn::Eq(v, zero))); +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { + const auto zero = Zero(); + return mask_to_true(hn::VecFromMask(_Tag(), hn::Eq(v, zero))); } +#endif }; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = false; + static constexpr bool is_not = false; static constexpr auto scalar_op = std::not_equal_to{}; - HWY_INLINE HWY_ATTR vec_u8 simd_op(vec_u8 v) { +#if NPY_HWY + static HWY_INLINE HWY_ATTR Vec simd_op(Vec v) { return byte_to_true(v); } +#endif }; - +#if NPY_HWY template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_binary_logical_BOOL(npy_bool* op, npy_bool* ip1, npy_bool* ip2, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip1 += wstep, ip2 += wstep, op += wstep) { - for(int i = 0; i < UNROLL; i++) { - vec_u8 a = hn::LoadU(u8, ip1 + vstep * i); - vec_u8 b = hn::LoadU(u8, ip2 + vstep * i); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op + vstep * i); + auto a = LoadU(ip1 + vstep * i); + auto b = LoadU(ip2 + vstep * i); + auto r = Traits::simd_op(a, b); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip1 += vstep, ip2 += vstep, op += vstep) { - vec_u8 a = hn::LoadU(u8, ip1); - vec_u8 b = hn::LoadU(u8, ip2); - vec_u8 r = traits.simd_op(a, b); - hn::StoreU(r, u8, op); + auto a = LoadU(ip1); + auto b = LoadU(ip2); + auto r = Traits::simd_op(a, b); + StoreU(r, op); } // Scalar loop to finish off @@ -169,9 +168,8 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = BinaryLogicalTraits; - Traits traits; constexpr int UNROLL = 8; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop @@ -179,24 +177,24 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { #if defined(NPY_HAVE_SSE2) NPY_PREFETCH(reinterpret_cast(ip + wstep), 0, 3); #endif - vec_u8 v0 = hn::LoadU(u8, ip); - vec_u8 v1 = hn::LoadU(u8, ip + vstep); - vec_u8 v2 = hn::LoadU(u8, ip + vstep * 2); - vec_u8 v3 = hn::LoadU(u8, ip + vstep * 3); - vec_u8 v4 = hn::LoadU(u8, ip + vstep * 4); - vec_u8 v5 = hn::LoadU(u8, ip + vstep * 5); - vec_u8 v6 = hn::LoadU(u8, ip + vstep * 6); - vec_u8 v7 = hn::LoadU(u8, ip + vstep * 7); + auto v0 = LoadU(ip); + auto v1 = LoadU(ip + vstep); + auto v2 = LoadU(ip + vstep * 2); + auto v3 = LoadU(ip + vstep * 3); + auto v4 = LoadU(ip + vstep * 4); + auto v5 = LoadU(ip + vstep * 5); + auto v6 = LoadU(ip + vstep * 6); + auto v7 = LoadU(ip + vstep * 7); - vec_u8 m01 = traits.reduce(v0, v1); - vec_u8 m23 = traits.reduce(v2, v3); - vec_u8 m45 = traits.reduce(v4, v5); - vec_u8 m67 = traits.reduce(v6, v7); + auto m01 = Traits::simd_op(v0, v1); + auto m23 = Traits::simd_op(v2, v3); + auto m45 = Traits::simd_op(v4, v5); + auto m67 = Traits::simd_op(v6, v7); - vec_u8 m0123 = traits.reduce(m01, m23); - vec_u8 m4567 = traits.reduce(m45, m67); + auto m0123 = Traits::simd_op(m01, m23); + auto m4567 = Traits::simd_op(m45, m67); - vec_u8 mv = traits.reduce(m0123, m4567); + auto mv = Traits::simd_op(m0123, m4567); if(Traits::anyall(mv) == !Traits::is_and) { *op = !Traits::is_and; @@ -206,7 +204,7 @@ static void simd_reduce_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep) { - vec_u8 v = hn::LoadU(u8, ip); + auto v = LoadU(ip); if(Traits::anyall(v) == !Traits::is_and) { *op = !Traits::is_and; return; @@ -226,25 +224,24 @@ template HWY_ATTR SIMD_MSVC_NOINLINE static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { using Traits = UnaryLogicalTraits; - Traits traits; constexpr int UNROLL = 16; - const int vstep = hn::Lanes(u8); + HWY_LANES_CONSTEXPR int vstep = Lanes(); const int wstep = vstep * UNROLL; // Unrolled vectors loop for (; len >= wstep; len -= wstep, ip += wstep, op += wstep) { for(int i = 0; i < UNROLL; i++) { - vec_u8 v = hn::LoadU(u8, ip + vstep * i); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op + vstep * i); + auto v = LoadU(ip + vstep * i); + auto r = Traits::simd_op(v); + StoreU(r, op + vstep * i); } } // Single vectors loop for (; len >= vstep; len -= vstep, ip += vstep, op += vstep) { - vec_u8 v = hn::LoadU(u8, ip); - vec_u8 r = traits.simd_op(v); - hn::StoreU(r, u8, op); + auto v = LoadU(ip); + auto r = Traits::simd_op(v); + StoreU(r, op); } // Scalar loop to finish off @@ -253,6 +250,9 @@ static void simd_unary_logical_BOOL(npy_bool* op, npy_bool* ip, npy_intp len) { } } +#endif //NPY_HWY +} // namespace anonymous + /******************************************************************************* ** Defining ufunc inner functions ******************************************************************************/ @@ -260,12 +260,9 @@ template static NPY_INLINE int run_binary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_BINARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], - (npy_bool*)args[1], dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), kMaxLanes)) { + simd_binary_logical_BOOL((npy_bool*)args[2], (npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -276,12 +273,9 @@ template static NPY_INLINE int run_reduce_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_REDUCE(sizeof(npy_bool), NPY_SIMD_WIDTH)) { - simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], - dimensions[0] - ); +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), kMaxLanes)) { + simd_reduce_logical_BOOL((npy_bool*)args[0], (npy_bool*)args[1], dimensions[0]); return 1; } #endif @@ -292,9 +286,8 @@ template static NPY_INLINE int run_unary_simd_logical_BOOL( char** args, npy_intp const* dimensions, npy_intp const* steps) { -#if NPY_SIMD - if (sizeof(npy_bool) == 1 && - IS_BLOCKABLE_UNARY(sizeof(npy_bool), NPY_SIMD_WIDTH)) { +#if NPY_HWY + if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), kMaxLanes)) { simd_unary_logical_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]); return 1; } @@ -304,24 +297,34 @@ static NPY_INLINE int run_unary_simd_logical_BOOL( template void BOOL_binary_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2]; + npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2]; + npy_intp n = dimensions[0]; using Traits = BinaryLogicalTraits; - + +#if NPY_HWY if (run_binary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - BINARY_LOOP { - const npy_bool in1 = *(npy_bool*)ip1; - const npy_bool in2 = *(npy_bool*)ip2; - *((npy_bool*)op1) = Traits::scalar_op(in1, in2); - } +#endif + + for(npy_intp i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { + const npy_bool in1 = *(npy_bool*)ip1; + const npy_bool in2 = *(npy_bool*)ip2; + *((npy_bool*)op1) = Traits::scalar_op(in1, in2); } } template void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *iop1 = args[0]; + npy_bool io1 = *(npy_bool *)iop1; + char *ip2 = args[1]; + npy_intp is2 = steps[1]; + npy_intp n = dimensions[0]; + npy_intp i; using Traits = BinaryLogicalTraits; -#if NPY_SIMD +#if NPY_HWY if (run_reduce_simd_logical_BOOL(args, dimensions, steps)) { return; } @@ -343,7 +346,6 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int * with glibc >= 2.12 and memchr can only check for equal 1 */ static const npy_bool zero[4096]={0}; /* zero by C standard */ - npy_uintp i, n = dimensions[0]; for (i = 0; !*op && i < n - (n % sizeof(zero)); i += sizeof(zero)) { *op = memcmp(&args[1][i], zero, sizeof(zero)) != 0; @@ -355,14 +357,14 @@ void BOOL_binary_reduce_wrapper(char** args, npy_intp const* dimensions, npy_int return; } #endif - else { - BINARY_REDUCE_LOOP(npy_bool) { - const npy_bool in2 = *(npy_bool*)ip2; - io1 = Traits::scalar_op(io1, in2); - if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) break; - } - *((npy_bool*)iop1) = io1; + + for(i = 0; i < n; i++, ip2 += is2) { + const npy_bool in2 = *(npy_bool*)ip2; + io1 = Traits::scalar_op(io1, in2); + if ((Traits::is_and && !io1) || (!Traits::is_and && io1)) + break; } + *((npy_bool*)iop1) = io1; } template @@ -390,15 +392,18 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_logical_or)( template void BOOL_func_wrapper(char** args, npy_intp const* dimensions, npy_intp const* steps) { + char *ip1 = args[0], *op1 = args[1]; + npy_intp is1 = steps[0], os1 = steps[1]; + npy_intp n = dimensions[0]; using Traits = UnaryLogicalTraits; + if (run_unary_simd_logical_BOOL(args, dimensions, steps)) { return; } - else { - UNARY_LOOP { - npy_bool in1 = *(npy_bool*)ip1; - *((npy_bool*)op1) = Traits::scalar_op(in1, 0); - } + + for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { + npy_bool in1 = *(npy_bool*)ip1; + *((npy_bool*)op1) = Traits::scalar_op(in1, 0); } } From df835f4a25e2ef6dba689198c62f3f4747e2f016 Mon Sep 17 00:00:00 2001 From: Tobias Markus Date: Wed, 3 Sep 2025 15:34:02 +0200 Subject: [PATCH 0391/1718] BLD: Add missing include (#29662) Fixes homebrew builds, see https://github.com/orgs/Homebrew/discussions/6386#discussioncomment-14293523 --- numpy/_core/src/multiarray/unique.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 636f1ef0137c..d31fed6f6908 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include From 1b00896c55ad5f2ce9dcab3f6c8af39691c87bdf Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:21:31 -0400 Subject: [PATCH 0392/1718] TST: Replace xunit setup with methods (#29657) * TST: Replace xunit setup with methods * STY: Change var declarations --- numpy/lib/tests/test_function_base.py | 43 +++--- numpy/random/tests/test_generator_mt19937.py | 99 +++++++------ .../test_generator_mt19937_regressions.py | 64 +++++---- numpy/random/tests/test_random.py | 111 +++++++------- numpy/random/tests/test_randomstate.py | 136 +++++++++--------- 5 files changed, 241 insertions(+), 212 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 37f8bbadc31a..ce293ec2f256 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -974,18 +974,20 @@ def test_append(self): class TestDelete: - def setup_method(self): - self.a = np.arange(5) - self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + def _create_arrays(self): + a = np.arange(5) + nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) + return a, nd_a def _check_inverse_of_slicing(self, indices): - a_del = delete(self.a, indices) - nd_a_del = delete(self.nd_a, indices, axis=1) + a, nd_a = self._create_arrays() + a_del = delete(a, indices) + nd_a_del = delete(nd_a, indices, axis=1) msg = f'Delete failed for obj: {indices!r}' - assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, + assert_array_equal(setxor1d(a_del, a[indices, ]), a, err_msg=msg) - xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) - assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) + xor = setxor1d(nd_a_del[0, :, 0], nd_a[0, indices, 0]) + assert_array_equal(xor, nd_a[0, :, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] @@ -997,11 +999,12 @@ def test_slices(self): self._check_inverse_of_slicing(s) def test_fancy(self): + a, _ = self._create_arrays() self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with pytest.raises(IndexError): - delete(self.a, [100]) + delete(a, [100]) with pytest.raises(IndexError): - delete(self.a, [-100]) + delete(a, [-100]) self._check_inverse_of_slicing([0, -1, 2, 2]) @@ -1009,13 +1012,13 @@ def test_fancy(self): # not legal, indexing with these would change the dimension with pytest.raises(ValueError): - delete(self.a, True) + delete(a, True) with pytest.raises(ValueError): - delete(self.a, False) + delete(a, False) # not enough items with pytest.raises(ValueError): - delete(self.a, [False] * 4) + delete(a, [False] * 4) def test_single(self): self._check_inverse_of_slicing(0) @@ -1031,7 +1034,9 @@ def test_0d(self): def test_subclass(self): class SubClass(np.ndarray): pass - a = self.a.view(SubClass) + + a_orig, _ = self._create_arrays() + a = a_orig.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) assert_(isinstance(delete(a, [0, 1]), SubClass)) @@ -1056,12 +1061,13 @@ def test_index_floats(self): @pytest.mark.parametrize("indexer", [np.array([1]), [1]]) def test_single_item_array(self, indexer): - a_del_int = delete(self.a, 1) - a_del = delete(self.a, indexer) + a, nd_a = self._create_arrays() + a_del_int = delete(a, 1) + a_del = delete(a, indexer) assert_equal(a_del_int, a_del) - nd_a_del_int = delete(self.nd_a, 1, axis=1) - nd_a_del = delete(self.nd_a, np.array([1]), axis=1) + nd_a_del_int = delete(nd_a, 1, axis=1) + nd_a_del = delete(nd_a, np.array([1]), axis=1) assert_equal(nd_a_del_int, nd_a_del) def test_single_item_array_non_int(self): @@ -2406,6 +2412,7 @@ def test_float16_underflow(self): # resulting in nan assert_array_equal(sinc(x), np.asarray(1.0)) + class TestUnique: def test_simple(self): diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a06212efce0d..6594f6008c8e 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -175,8 +175,7 @@ def test_multinomial_pvals_float32(self): class TestMultivariateHypergeometric: - def setup_method(self): - self.seed = 8675309 + seed = 8675309 def test_argument_validation(self): # Error cases... @@ -308,37 +307,40 @@ def test_repeatability3(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.rg = Generator(MT19937(self.seed)) - self.bit_generator = self.rg.bit_generator - self.state = self.bit_generator.state - self.legacy_state = (self.state['bit_generator'], - self.state['state']['key'], - self.state['state']['pos']) + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.rg.standard_normal(size=3) - self.bit_generator.state = self.state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.rg.standard_normal() - state = self.bit_generator.state - old = self.rg.standard_normal(size=3) - self.bit_generator.state = state - new = self.rg.standard_normal(size=3) + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) assert_(np.all(old == new)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.rg.negative_binomial(0.5, 0.5) + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) class TestIntegers: @@ -736,9 +738,7 @@ def test_integers_small_dtype_chisquared(self, sample_size, high, class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_integers(self): random = Generator(MT19937(self.seed)) @@ -1899,8 +1899,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def test_uniform(self): random = Generator(MT19937(self.seed)) @@ -2512,8 +2511,7 @@ def test_empty_outputs(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -2558,13 +2556,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -2579,11 +2575,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -2599,18 +2596,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = random.integers @@ -2619,27 +2617,28 @@ def test_integers(self, endpoint): for dt in itype: out = func(low, high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], endpoint=endpoint, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) @pytest.mark.parametrize("config", JUMP_TEST_DATA) diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index abfacb87dbc5..21093ef73eb6 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -6,30 +6,32 @@ class TestRegression: - - def setup_method(self): - self.mt19937 = Generator(MT19937(121263137472525314065)) + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) def test_vonmises_range(self): # Make sure generated random variables are in [-pi, pi]. # Regression test for ticket #986. + mt19937 = self._create_generator() for mu in np.linspace(-7., 7., 5): - r = self.mt19937.vonmises(mu, 1, 50) + r = mt19937.vonmises(mu, 1, 50) assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) def test_hypergeometric_range(self): # Test for ticket #921 - assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) - assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) # Test for ticket #5623 args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems - assert_(self.mt19937.hypergeometric(*args) > 0) + assert_(mt19937.hypergeometric(*args) > 0) def test_logseries_convergence(self): # Test for ticket #923 + mt19937 = self._create_generator() N = 1000 - rvsn = self.mt19937.logseries(0.8, size=N) + rvsn = mt19937.logseries(0.8, size=N) # these two frequency counts should be close to theoretical # numbers with this large sample # theoretical large N result is 0.49706795 @@ -66,34 +68,39 @@ def test_multivariate_normal_size_types(self): # Test for multivariate_normal issue with 'size' argument. # Check that the multivariate_normal size argument can be a # numpy integer. - self.mt19937.multivariate_normal([0], [[0]], size=1) - self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) - self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) def test_beta_small_parameters(self): # Test that beta with small a and b parameters does not produce # NaNs due to roundoff errors causing 0 / 0, gh-5851 - x = self.mt19937.beta(0.0001, 0.0001, size=100) + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') def test_beta_very_small_parameters(self): # gh-24203: beta would hang with very small parameters. - self.mt19937.beta(1e-49, 1e-40) + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) def test_beta_ridiculously_small_parameters(self): # gh-24266: beta would generate nan when the parameters # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() tiny = np.finfo(1.0).tiny - x = self.mt19937.beta(tiny / 32, tiny / 40, size=50) + x = mt19937.beta(tiny / 32, tiny / 40, size=50) assert not np.any(np.isnan(x)) def test_beta_expected_zero_frequency(self): # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta # would generate too many zeros. + mt19937 = self._create_generator() a = 0.0025 b = 0.0025 n = 1000000 - x = self.mt19937.beta(a, b, size=n) + x = mt19937.beta(a, b, size=n) nzeros = np.count_nonzero(x == 0) # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 # is p = 0.0776169083131899, e.g, @@ -114,24 +121,26 @@ def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. # See numpy github issue 6123. + mt19937 = self._create_generator() a = [1, 2, 3] counts = [4, 4, 2] for dt in np.float16, np.float32, np.float64: probs = np.array(counts, dtype=dt) / sum(counts) - c = self.mt19937.choice(a, p=probs) + c = mt19937.choice(a, p=probs) assert_(c in a) with pytest.raises(ValueError): - self.mt19937.choice(a, p=probs * 0.9) + mt19937.choice(a, p=probs * 0.9) def test_shuffle_of_array_of_different_length_strings(self): # Test that permuting an array of different length strings # will not cause a segfault on garbage collection # Tests gh-7710 + mt19937 = self._create_generator() a = np.array(['a', 'a' * 1000]) for _ in range(100): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -141,10 +150,11 @@ def test_shuffle_of_array_of_objects(self): # Test that permuting an array of objects will not cause # a segfault on garbage collection. # See gh-7719 + mt19937 = self._create_generator() a = np.array([np.arange(1), np.arange(4)], dtype=object) for _ in range(1000): - self.mt19937.shuffle(a) + mt19937.shuffle(a) # Force Garbage Collection - should not segfault. import gc @@ -174,10 +184,11 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) def test_gamma_0(self): - assert self.mt19937.standard_gamma(0.0) == 0.0 - assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) - actual = self.mt19937.standard_gamma([0.0], dtype='float') + actual = mt19937.standard_gamma([0.0], dtype='float') expected = np.array([0.], dtype=np.float32) assert_array_equal(actual, expected) @@ -185,21 +196,24 @@ def test_geometric_tiny_prob(self): # Regression test for gh-17007. # When p = 1e-30, the probability that a sample will exceed 2**63-1 # is 0.9999999999907766, so we expect the result to be all 2**63-1. - assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) def test_zipf_large_parameter(self): # Regression test for part of gh-9829: a call such as rng.zipf(10000) # would hang. + mt19937 = self._create_generator() n = 8 - sample = self.mt19937.zipf(10000, size=n) + sample = mt19937.zipf(10000, size=n) assert_array_equal(sample, np.ones(n, dtype=np.int64)) def test_zipf_a_near_1(self): # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) # would hang. + mt19937 = self._create_generator() n = 100000 - sample = self.mt19937.zipf(1.0000000000001, size=n) + sample = mt19937.zipf(1.0000000000001, size=n) # Not much of a test, but let's do something more than verify that # it doesn't hang. Certainly for a monotonically decreasing # discrete distribution truncated to signed 64 bit integers, more diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 4eb455eb77be..b3e69b41956b 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -105,56 +105,62 @@ def test_multidimensional_pvals(self): class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.prng = random.RandomState(self.seed) - self.state = self.prng.get_state() + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state def test_basic(self): - old = self.prng.tomaxint(16) - self.prng.set_state(self.state) - new = self.prng.tomaxint(16) + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.prng.standard_normal(size=3) - self.prng.set_state(self.state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.prng.standard_normal() - state = self.prng.get_state() - old = self.prng.standard_normal(size=3) - self.prng.set_state(state) - new = self.prng.standard_normal(size=3) + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.prng.standard_normal(size=16) - self.prng.set_state(old_state) - x2 = self.prng.standard_normal(size=16) - self.prng.set_state(self.state) - x3 = self.prng.standard_normal(size=16) + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.prng.negative_binomial(0.5, 0.5) + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) def test_set_invalid_state(self): # gh-25402 + prng, _ = self._create_rng() with pytest.raises(IndexError): - self.prng.set_state(()) + prng.set_state(()) class TestRandint: @@ -299,9 +305,7 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): np.random.seed(self.seed) @@ -1053,8 +1057,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def setSeed(self): np.random.seed(self.seed) @@ -1623,8 +1626,7 @@ def test_logseries(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1666,13 +1668,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, @@ -1687,11 +1687,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, @@ -1707,18 +1708,19 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_randint(self): + _, _, _, tgtShape = self._create_arrays() itype = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] func = np.random.randint @@ -1727,24 +1729,25 @@ def test_randint(self): for dt in itype: out = func(low, high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low[0], high, dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) out = func(low, high[0], dtype=dt) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index c56d3c0f186c..ff7daf0496bb 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -191,52 +191,58 @@ def test_multinomial_n_float(self): # Non-index integer types should gracefully truncate floats random.multinomial(100.5, [0.2, 0.8]) + class TestSetState: - def setup_method(self): - self.seed = 1234567890 - self.random_state = random.RandomState(self.seed) - self.state = self.random_state.get_state() + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state def test_basic(self): - old = self.random_state.tomaxint(16) - self.random_state.set_state(self.state) - new = self.random_state.tomaxint(16) + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(self.state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. - - self.random_state.standard_normal() - state = self.random_state.get_state() - old = self.random_state.standard_normal(size=3) - self.random_state.set_state(state) - new = self.random_state.standard_normal(size=3) + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. - old_state = self.state[:-2] - x1 = self.random_state.standard_normal(size=16) - self.random_state.set_state(old_state) - x2 = self.random_state.standard_normal(size=16) - self.random_state.set_state(self.state) - x3 = self.random_state.standard_normal(size=16) + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. - self.random_state.negative_binomial(0.5, 0.5) + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) def test_get_state_warning(self): rs = random.RandomState(PCG64()) @@ -246,34 +252,38 @@ def test_get_state_warning(self): assert state['bit_generator'] == 'PCG64' def test_invalid_legacy_state_setting(self): - state = self.random_state.get_state() + random_state, state = self._create_state() + state = random_state.get_state() new_state = ('Unknown', ) + state[1:] - assert_raises(ValueError, self.random_state.set_state, new_state) - assert_raises(TypeError, self.random_state.set_state, + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, np.array(new_state, dtype=object)) - state = self.random_state.get_state(legacy=False) + state = random_state.get_state(legacy=False) del state['bit_generator'] - assert_raises(ValueError, self.random_state.set_state, state) + assert_raises(ValueError, random_state.set_state, state) def test_pickle(self): - self.random_state.seed(0) - self.random_state.random_sample(100) - self.random_state.standard_normal() - pickled = self.random_state.get_state(legacy=False) + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) assert_equal(pickled['has_gauss'], 1) - rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + rs_unpick = pickle.loads(pickle.dumps(random_state)) unpickled = rs_unpick.get_state(legacy=False) assert_mt19937_state_equal(pickled, unpickled) def test_state_setting(self): - attr_state = self.random_state.__getstate__() - self.random_state.standard_normal() - self.random_state.__setstate__(attr_state) - state = self.random_state.get_state(legacy=False) + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) assert_mt19937_state_equal(attr_state, state) def test_repr(self): - assert repr(self.random_state).startswith('RandomState(MT19937)') + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') class TestRandint: @@ -443,9 +453,7 @@ def test_respect_dtype_singleton(self): class TestRandomDist: # Make sure the random distribution returns the correct value for a # given seed - - def setup_method(self): - self.seed = 1234567890 + seed = 1234567890 def test_rand(self): random.seed(self.seed) @@ -1309,8 +1317,7 @@ def test_zipf(self): class TestBroadcast: # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setup_method(self): - self.seed = 123456789 + seed = 123456789 def set_seed(self): random.seed(self.seed) @@ -1896,8 +1903,7 @@ def test_logseries(self): @pytest.mark.skipif(IS_WASM, reason="can't start thread") class TestThread: # make sure each state produces the same sequence even in threads - def setup_method(self): - self.seeds = range(4) + seeds = range(4) def check_function(self, function, sz): from threading import Thread @@ -1942,13 +1948,11 @@ def gen_random(state, out): # See Issue #4263 class TestSingleEltArrayInput: - def setup_method(self): - self.argOne = np.array([2]) - self.argTwo = np.array([3]) - self.argThree = np.array([4]) - self.tgtShape = (1,) + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() funcs = (random.exponential, random.standard_gamma, random.chisquare, random.standard_t, random.pareto, random.weibull, @@ -1963,11 +1967,12 @@ def test_one_arg_funcs(self): out = func(np.array([0.5])) else: - out = func(self.argOne) + out = func(argOne) - assert_equal(out.shape, self.tgtShape) + assert_equal(out.shape, tgtShape) def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() funcs = (random.uniform, random.normal, random.beta, random.gamma, random.f, random.noncentral_chisquare, @@ -1983,30 +1988,31 @@ def test_two_arg_funcs(self): argTwo = np.array([0.5]) else: - argTwo = self.argTwo + argTwo = argTwo - out = func(self.argOne, argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], argTwo) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, argTwo[0]) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() funcs = [random.noncentral_f, random.triangular, random.hypergeometric] for func in funcs: - out = func(self.argOne, self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne[0], self.argTwo, self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) - out = func(self.argOne, self.argTwo[0], self.argThree) - assert_equal(out.shape, self.tgtShape) + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) # Ensure returned array dtype is correct for platform From 61e488926c4b8ca54cec36f657172f2726c93180 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 3 Sep 2025 13:15:29 -0600 Subject: [PATCH 0393/1718] BUG: use correct input dtype in flatiter assignment --- numpy/_core/src/multiarray/iterators.c | 2 +- numpy/_core/tests/test_stringdtype.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 704fd4738589..1b4ed59fbfe0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -850,7 +850,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) /* We can assume the newly allocated array is aligned */ int is_aligned = IsUintAligned(self->ao); if (PyArray_GetDTypeTransferFunction( - is_aligned, dtype_size, dtype_size, dtype, dtype, 0, + is_aligned, dtype_size, dtype_size, PyArray_DESCR(arrval), dtype, 0, &cast_info, &transfer_flags) < 0) { goto finish; } diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 52a225619ccf..5d2364bf8cfc 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -530,6 +530,13 @@ def test_fancy_indexing(string_list): assert_array_equal(sarr[ind], uarr[ind]) +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) assert_array_equal(np.empty(3, dtype="T"), ["", "", ""]) From c2eeeba0c34200b1a6c4377de662b73b9289c510 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 3 Sep 2025 13:34:34 -0600 Subject: [PATCH 0394/1718] MAINT: Remove NPY_SORT_NANFIRST from docs and code. --- doc/release/upcoming_changes/29642.c_api.rst | 3 +-- doc/source/reference/c-api/array.rst | 9 --------- numpy/_core/include/numpy/ndarraytypes.h | 1 - numpy/_core/src/multiarray/methods.c | 8 -------- 4 files changed, 1 insertion(+), 20 deletions(-) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst index 0a28e32e3530..65c804ef829b 100644 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ b/doc/release/upcoming_changes/29642.c_api.rst @@ -8,7 +8,6 @@ needed. The new names of interest are: * NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) * NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) * NPY_SORT_DESCENDING -- the sort must be descending -* NPY_SORT_NANFIRST -- NaNs sort to the beginning The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. -Note that NPY_SORT_DESCENDING and NPY_SORT_NANFIRST have yet to be implemented. +Note that NPY_SORT_DESCENDING is not yet implemented. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index b5c1a08e2abf..667b9948a3d9 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4381,15 +4381,6 @@ Enumerated Types This functionality is not yet implemented for any of the NumPy types and cannot yet be set from the Python interface. - .. c:enumerator:: NPY_SORT_NANFIRST - - (Requirement) Specifies that the sort must sort NaNs to - the beginning, e.g. NaNs compare less than any other value for the - type. This is only relevant if the type has NaN equivalents. - This functionality is not yet implemented for any of the NumPy types - and cannot yet be set from the Python interface. It may change in the - future. - .. c:enum:: NPY_SCALARKIND A special variable type indicating the number of "kinds" of diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 01fe231927f4..ecd5d9724528 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -193,7 +193,6 @@ typedef enum { NPY_SORT_DEFAULT = 0, NPY_SORT_STABLE = 2, NPY_SORT_DESCENDING = 4, - NPY_SORT_NANFIRST = 8, } NPY_SORTKIND; #define NPY_NSORTS (NPY_STABLESORT + 1) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 84f749acce3f..f2150d63c496 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1255,7 +1255,6 @@ array_sort(PyArrayObject *self, NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; int descending = -1; - int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, @@ -1264,7 +1263,6 @@ array_sort(PyArrayObject *self, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, // "$descending", &PyArray_OptionalBoolConverter, &descending, -// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } @@ -1274,14 +1272,12 @@ array_sort(PyArrayObject *self, sortkind = 0; sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; - sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } else { // Check that no keywords are used int keywords_used = 0; keywords_used |= (stable != -1); keywords_used |= (descending != -1); - keywords_used |= (nanfirst != -1); if (keywords_used) { PyErr_SetString(PyExc_ValueError, "`kind` and keyword parameters can't be provided at " @@ -1421,7 +1417,6 @@ array_argsort(PyArrayObject *self, NPY_SORTKIND sortkind = _NPY_SORT_UNDEFINED; int stable = -1; int descending = -1; - int nanfirst = -1; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, @@ -1430,7 +1425,6 @@ array_argsort(PyArrayObject *self, "|order", NULL, &order, "$stable", &PyArray_OptionalBoolConverter, &stable, // "$descending", &PyArray_OptionalBoolConverter, &descending, -// "$nanfirst", &PyArray_OptionalBoolConverter, &nanfirst, NULL, NULL, NULL) < 0) { return NULL; } @@ -1440,14 +1434,12 @@ array_argsort(PyArrayObject *self, sortkind = 0; sortkind |= (stable > 0)? NPY_SORT_STABLE: 0; sortkind |= (descending > 0)? NPY_SORT_DESCENDING: 0; - sortkind |= (nanfirst > 0)? NPY_SORT_NANFIRST: 0; } else { // Check that no keywords are used int keywords_used = 0; keywords_used |= (stable != -1); keywords_used |= (descending != -1); - keywords_used |= (nanfirst != -1); if (keywords_used) { PyErr_SetString(PyExc_ValueError, "`kind` and keyword parameters can't be provided at " From 750d653b118962dd6c2c0c5dbaa0caeede972ace Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 04:05:25 +0000 Subject: [PATCH 0395/1718] MAINT: Bump mamba-org/setup-micromamba from 2.0.5 to 2.0.6 Bumps [mamba-org/setup-micromamba](https://github.com/mamba-org/setup-micromamba) from 2.0.5 to 2.0.6. - [Release notes](https://github.com/mamba-org/setup-micromamba/releases) - [Commits](https://github.com/mamba-org/setup-micromamba/compare/b09ef9b599704322748535812ca03efb2625677b...7f29b8b80078b1b601dfa018b0f7425c587c63bb) --- updated-dependencies: - dependency-name: mamba-org/setup-micromamba dependency-version: 2.0.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 6d041831c22d..1604bd3046f5 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -184,7 +184,7 @@ jobs: path: ./wheelhouse/*.whl - name: install micromamba - uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b + uses: mamba-org/setup-micromamba@7f29b8b80078b1b601dfa018b0f7425c587c63bb if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to From 2235ce10e5efa15d4a8b0455d36022dd72655446 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Thu, 4 Sep 2025 23:26:08 +0900 Subject: [PATCH 0396/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 37f599f075b3..32626b986817 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [int(2e3), int(2e6)], + [int(1e3), int(1e6)], # percent of np.nan in arrays [10., 90.], # percent of unique values in arrays From 282cc5939519bb22e9cc4b684edeb7ddae30b9dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:05:44 +0000 Subject: [PATCH 0397/1718] MAINT: Bump actions/github-script from 7.0.1 to 8.0.0 Bumps [actions/github-script](https://github.com/actions/github-script) from 7.0.1 to 8.0.0. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/60a0d83039c74a4aee543508d2ffcb1c3799cdea...ed597411d8f924073f98dfc5c65a23a2325f34cd) --- updated-dependencies: - dependency-name: actions/github-script dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 338a53da6a13..e62f26edd85f 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -18,7 +18,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - name: Download diffs - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); @@ -42,7 +42,7 @@ jobs: - name: Get PR number id: get-pr-number - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const fs = require('fs'); @@ -58,7 +58,7 @@ jobs: - name: Post comment id: post-comment - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | From 5de7753badd4de682943864c012eaee98c2e927b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:05:59 +0000 Subject: [PATCH 0398/1718] MAINT: Bump actions/setup-python from 5.6.0 to 6.0.0 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.6.0 to 6.0.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/a26af69be951a213d495a4c3e4e4022e16d87065...e797f83bcb11b83ae66e0230d6156d7c80228e7c) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux.yml | 18 +++++++++--------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/wheels.yml | 4 ++-- .github/workflows/windows.yml | 6 +++--- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 40e372cbf3e4..b55f3dedb67e 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -38,7 +38,7 @@ jobs: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install linter requirements @@ -65,7 +65,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -80,7 +80,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas @@ -129,7 +129,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -169,7 +169,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -246,7 +246,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and benchmarking dependencies @@ -285,7 +285,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -329,7 +329,7 @@ jobs: path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -358,7 +358,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 633de1fbb84c..570e90437e1a 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -70,7 +70,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -199,7 +199,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -228,7 +228,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -290,7 +290,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -354,7 +354,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -391,7 +391,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 511e1d2ba125..5bd1eab7f797 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -63,7 +63,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - uses: ./.github/meson_actions @@ -81,7 +81,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -127,7 +127,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - name: Install dependencies @@ -181,7 +181,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -196,7 +196,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' @@ -247,7 +247,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index da0972678b15..97ef55fb0228 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -124,7 +124,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.version }} diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e362a29f628a..e2dcdccf868b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 2bfc4197e9b8..8dfeac05d4ba 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -32,7 +32,7 @@ jobs: with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1604bd3046f5..9cf98d481fe3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -143,7 +143,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -250,7 +250,7 @@ jobs: submodules: true persist-credentials: false # Used to push the built wheels - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: # Build sdist on lowest supported Python python-version: "3.11" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index dba4f74496b2..66f544b51d7d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} @@ -106,7 +106,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: ${{ matrix.compiler-pyversion[1] }} architecture: arm64 @@ -180,7 +180,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' architecture: ${{ matrix.architecture }} From 398917323d3ee430161d3001384c8b5071f207e0 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 5 Sep 2025 05:36:41 -0700 Subject: [PATCH 0399/1718] DOC: Make the image credit author link clickable (#29588) * DOC: Make the image credit author link clickable * DOC: Remove an extra blank [skip azp] [skip cirrus] [skip actions] --- doc/source/user/absolute_beginners.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index d0d7e70fa284..3ba12b84052e 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1740,4 +1740,5 @@ For directions regarding installing Matplotlib, see the official ------------------------------------------------------- -*Image credits: Jay Alammar https://jalammar.github.io/* +*Image credits: Jay Alammar* +`https://jalammar.github.io/ `_ From 5f0b57a109af8baec31265e191971b46ebeaed46 Mon Sep 17 00:00:00 2001 From: Sanjay Kumar Sakamuri Kamalakar Date: Fri, 5 Sep 2025 20:12:59 +0530 Subject: [PATCH 0400/1718] Update regex patterns for VSX3 and VSX4 features --- meson_cpu/ppc64/meson.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index bad95257ca95..57fe47140429 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -23,7 +23,7 @@ if host_machine.endian() == 'little' VSX.update(implies: VSX2) endif VSX3 = mod_features.new( - 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*[mcpu=|vsx].*'}, + 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], extra_tests: { @@ -31,7 +31,7 @@ VSX3 = mod_features.new( } ) VSX4 = mod_features.new( - 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*[mcpu=|vsx].*'}, + 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], extra_tests: { From 598f2df11abe2ed592db911f199a06ddbff9b657 Mon Sep 17 00:00:00 2001 From: koki watanabe Date: Sat, 6 Sep 2025 00:12:29 +0900 Subject: [PATCH 0401/1718] fix: change parameter --- benchmarks/benchmarks/bench_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 32626b986817..3f7988a2ab7f 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -123,7 +123,7 @@ class Unique(Benchmark): param_names = ["array_size", "percent_nans", "percent_unique_values", "dtype"] params = [ # sizes of the 1D arrays - [int(1e3), int(1e6)], + [200, int(2e5)], # percent of np.nan in arrays [10., 90.], # percent of unique values in arrays From 52989856645d1db14f1e12ad464c0566c91417c1 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 5 Sep 2025 11:19:01 -0400 Subject: [PATCH 0402/1718] TST: Replace xunit setup with methods (#29666) * TST: Replace xunit setup with methods * TST: Change random to RandomState * TST: Move vars into test method --- numpy/_core/tests/test_multiarray.py | 360 +++++++++++++++------------ numpy/testing/tests/test_utils.py | 27 +- 2 files changed, 213 insertions(+), 174 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 8e50e71c3741..2d8587ad5ac1 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -105,16 +105,14 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None): class TestFlags: - def setup_method(self): - self.a = np.arange(10) - def test_writeable(self): + arr = np.arange(10) mydict = locals() - self.a.flags.writeable = False - assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.a.flags.writeable = True - self.a[0] = 5 - self.a[0] = 0 + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 def test_writeable_any_base(self): # Ensure that any base being writeable is sufficient to change flag; @@ -252,18 +250,19 @@ class MyArr: assert np.asarray(MyArr()).flags.writeable is writeable def test_otherflags(self): - assert_equal(self.a.flags.carray, True) - assert_equal(self.a.flags['C'], True) - assert_equal(self.a.flags.farray, False) - assert_equal(self.a.flags.behaved, True) - assert_equal(self.a.flags.fnc, False) - assert_equal(self.a.flags.forc, True) - assert_equal(self.a.flags.owndata, True) - assert_equal(self.a.flags.writeable, True) - assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.writebackifcopy, False) - assert_equal(self.a.flags['X'], False) - assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -311,41 +310,44 @@ def test_int(self): class TestAttributes: - def setup_method(self): - self.one = np.arange(10) - self.two = np.arange(20).reshape(4, 5) - self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three def test_attributes(self): - assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4, 5)) - assert_equal(self.three.shape, (2, 5, 6)) - self.three.shape = (10, 3, 2) - assert_equal(self.three.shape, (10, 3, 2)) - self.three.shape = (2, 5, 6) - assert_equal(self.one.strides, (self.one.itemsize,)) - num = self.two.itemsize - assert_equal(self.two.strides, (5 * num, num)) - num = self.three.itemsize - assert_equal(self.three.strides, (30 * num, 6 * num, num)) - assert_equal(self.one.ndim, 1) - assert_equal(self.two.ndim, 2) - assert_equal(self.three.ndim, 3) - num = self.two.itemsize - assert_equal(self.two.size, 20) - assert_equal(self.two.nbytes, 20 * num) - assert_equal(self.two.itemsize, self.two.dtype.itemsize) - assert_equal(self.two.base, np.arange(20)) + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) def test_dtypeattr(self): - assert_equal(self.one.dtype, np.dtype(np.int_)) - assert_equal(self.three.dtype, np.dtype(np.float64)) - assert_equal(self.one.dtype.char, np.dtype(int).char) - assert self.one.dtype.char in "lq" - assert_equal(self.three.dtype.char, 'd') - assert_(self.three.dtype.str[0] in '<>') - assert_equal(self.one.dtype.str[1], 'i') - assert_equal(self.three.dtype.str[1], 'f') + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 @@ -356,7 +358,7 @@ def test_int_subclassing(self): assert_(not isinstance(numpy_int, int)) def test_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, @@ -373,7 +375,7 @@ def make_array(size, offset, strides): make_array(0, 0, 10) def test_set_stridesattr(self): - x = self.one + x, _, _ = self._create_arrays() def make_array(size, offset, strides): try: @@ -732,46 +734,46 @@ def test_structured_non_void(self): class TestZeroRank: - def setup_method(self): - self.d = np.array(0), np.array('x', object) + def _create_arrays(self): + return np.array(0), np.array('x', object) def test_ellipsis_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): - a, b = self.d + a, b = self._create_arrays() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): - a, b = self.d + a, b = self._create_arrays() def assign(x, i, v): x[i] = v @@ -781,7 +783,7 @@ def assign(x, i, v): assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -792,7 +794,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a, b = self.d + a, _ = self._create_arrays() def subscript(x, i): x[i] @@ -836,26 +838,26 @@ def test_real_imag(self): class TestScalarIndexing: - def setup_method(self): - self.d = np.array([0, 1])[0] + def _create_array(self): + return np.array([0, 1])[0] def test_ellipsis_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): - a = self.d + a = self._create_array() assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): - a = self.d + a = self._create_array() assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): - a = self.d + a = self._create_array() def assign(x, i, v): x[i] = v @@ -863,7 +865,7 @@ def assign(x, i, v): assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): - a = self.d + a = self._create_array() assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) @@ -874,7 +876,7 @@ def test_newaxis(self): assert_equal(a[(np.newaxis,) * 10].shape, (1,) * 10) def test_invalid_newaxis(self): - a = self.d + a = self._create_array() def subscript(x, i): x[i] @@ -5572,8 +5574,7 @@ def test_invalid_axis(self): # gh-7528 class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" - @pytest.fixture() - def x(self): + def _create_data(self): shape = (2, 4, 3) rand = np.random.random x = rand(shape) + rand(shape).astype(complex) * 1j @@ -5631,7 +5632,8 @@ def test_empty_files_binary(self, tmp_filename): y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self, x, tmp_filename): + def test_roundtrip_file(self, tmp_filename): + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) # NB. doesn't work with flush+seek, due to use of C stdio @@ -5639,18 +5641,21 @@ def test_roundtrip_file(self, x, tmp_filename): y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip(self, x, tmp_filename): + def test_roundtrip(self, tmp_filename): + x = self._create_data() x.tofile(tmp_filename) y = np.fromfile(tmp_filename, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip_dump_pathlib(self, x, tmp_filename): + def test_roundtrip_dump_pathlib(self, tmp_filename): + x = self._create_data() p = pathlib.Path(tmp_filename) x.dump(p) y = np.load(p, allow_pickle=True) assert_array_equal(y, x) - def test_roundtrip_binary_str(self, x): + def test_roundtrip_binary_str(self): + x = self._create_data() s = x.tobytes() y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flat) @@ -5659,7 +5664,8 @@ def test_roundtrip_binary_str(self, x): y = np.frombuffer(s, dtype=x.dtype) assert_array_equal(y, x.flatten('F')) - def test_roundtrip_str(self, x): + def test_roundtrip_str(self): + x = self._create_data() x = x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") @@ -5667,14 +5673,16 @@ def test_roundtrip_str(self, x): assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_equal(x[~nan_mask], y[~nan_mask]) - def test_roundtrip_repr(self, x): + def test_roundtrip_repr(self): + x = self._create_data() x = x.real.ravel() s = "@".join(repr(x)[11:-1] for x in x) y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self, x, tmp_filename): + def test_unseekable_fromfile(self, tmp_filename): # gh-6246 + x = self._create_data() x.tofile(tmp_filename) def fail(*args, **kwargs): @@ -5685,8 +5693,9 @@ def fail(*args, **kwargs): f.tell = fail assert_raises(OSError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self, x, tmp_filename): + def test_io_open_unbuffered_fromfile(self, tmp_filename): # gh-6632 + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=x.dtype) @@ -5712,8 +5721,9 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, x, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_filename): # gh-6632 + x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) @@ -5777,7 +5787,8 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, x, tmp_filename): + def test_fromfile_offset(self, tmp_filename): + x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5812,13 +5823,14 @@ def test_fromfile_offset(self, x, tmp_filename): sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, x, tmp_filename): + def test_fromfile_bad_dup(self, tmp_filename): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 + x = self._create_data() old_dup = os.dup try: with open(tmp_filename, 'wb') as f: @@ -6052,39 +6064,43 @@ def test_mmap_close(self): mm.close() class TestFlat: - def setup_method(self): - a0 = np.arange(20.0) - a = a0.reshape(4, 5) - a0 = a0.reshape((4, 5)) + def _create_arrays(self): + a = np.arange(20.0).reshape(4, 5) a.flags.writeable = False - self.a = a - self.b = a[::2, ::2] - self.a0 = a0 - self.b0 = a0[::2, ::2] + b = a[::2, ::2] + return a, b def test_contiguous(self): testpassed = False + a, _ = self._create_arrays() try: - self.a.flat[12] = 100.0 + a.flat[12] = 100.0 except ValueError: testpassed = True assert_(testpassed) - assert_(self.a.flat[12] == 12.0) + assert_(a.flat[12] == 12.0) def test_discontiguous(self): testpassed = False + _, b = self._create_arrays() try: - self.b.flat[4] = 100.0 + b.flat[4] = 100.0 except ValueError: testpassed = True assert_(testpassed) - assert_(self.b.flat[4] == 12.0) + assert_(b.flat[4] == 12.0) def test___array__(self): - c = self.a.flat.__array__() - d = self.b.flat.__array__() - e = self.a0.flat.__array__() - f = self.b0.flat.__array__() + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0 = a0.reshape((4, 5)) + a.flags.writeable = False + b = a[::2, ::2] + b0 = a0[::2, ::2] + c = a.flat.__array__() + d = b.flat.__array__() + e = a0.flat.__array__() + f = b0.flat.__array__() assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) @@ -6098,14 +6114,15 @@ def test___array__(self): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_refcount(self): # includes regression test for reference count error gh-13165 - inds = [np.intp(0), np.array([True] * self.a.size), np.array([0]), None] + a, _ = self._create_arrays() + inds = [np.intp(0), np.array([True] * a.size), np.array([0]), None] indtype = np.dtype(np.intp) rc_indtype = sys.getrefcount(indtype) for ind in inds: rc_ind = sys.getrefcount(ind) for _ in range(100): try: - self.a.flat[ind] + a.flat[ind] except IndexError: pass assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) @@ -6381,12 +6398,12 @@ class TestStats: funcs = [_mean, _var, _std] - def setup_method(self): - np.random.seed(range(3)) - self.rmat = np.random.random((4, 5)) - self.cmat = self.rmat + 1j * self.rmat - self.omat = np.array([Decimal(str(r)) for r in self.rmat.flat]) - self.omat = self.omat.reshape(4, 5) + def _create_data(self): + rng = np.random.default_rng(range(3)) + rmat = rng.random((4, 5)) + cmat = rmat + 1j * rmat + omat = np.array([Decimal(str(r)) for r in rmat.flat]).reshape(4, 5) + return rmat, cmat, omat def test_python_type(self): for x in (np.float16(1.), 1, 1., 1 + 0j): @@ -6495,26 +6512,28 @@ def test_dtype_from_dtype(self): assert_(res is tgt) def test_ddof(self): + rmat, _, _ = self._create_data() for f in [_var]: for ddof in range(3): - dim = self.rmat.shape[1] - tgt = f(self.rmat, axis=1) * dim - res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) + dim = rmat.shape[1] + tgt = f(rmat, axis=1) * dim + res = f(rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): - dim = self.rmat.shape[1] - tgt = f(self.rmat, axis=1) * np.sqrt(dim) - res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) + dim = rmat.shape[1] + tgt = f(rmat, axis=1) * np.sqrt(dim) + res = f(rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): - dim = self.rmat.shape[1] + rmat, _, _ = self._create_data() + dim = rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - res = f(self.rmat, axis=1, ddof=ddof) + res = f(rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) @@ -6534,7 +6553,8 @@ def test_empty(self): assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] @@ -6592,7 +6612,8 @@ def test_mean_where(self): assert_equal(np.mean(a, where=False), np.nan) def test_var_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6606,9 +6627,10 @@ def test_var_values(self): ('clongdouble', 7), )) def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() # Test fast-paths for every builtin complex type for axis in [0, 1, None]: - mat = self.cmat.copy().astype(complex_dtype) + mat = cmat.copy().astype(complex_dtype) msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() @@ -6618,7 +6640,8 @@ def test_var_complex_values(self, complex_dtype, ndec): def test_var_dimensions(self): # _var paths for complex number introduce additions on views that # increase dimensions. Ensure this generalizes to higher dims - mat = np.stack([self.cmat] * 3) + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) for axis in [0, 1, 2, -1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) @@ -6629,7 +6652,8 @@ def test_var_dimensions(self): def test_var_complex_byteorder(self): # Test that var fast-path does not cause failures for complex arrays # with non-native byteorder - cmat = self.cmat.copy().astype('complex128') + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) assert_almost_equal(cmat.var(), cmat_swapped.var()) @@ -6677,7 +6701,8 @@ def test_var_where(self): assert_equal(np.var(a, where=False), np.nan) def test_std_values(self): - for mat in [self.rmat, self.cmat, self.omat]: + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) @@ -6808,61 +6833,63 @@ def test_vdot_uncontiguous(self): class TestDot: - def setup_method(self): - np.random.seed(128) - self.A = np.random.rand(4, 2) - self.b1 = np.random.rand(2, 1) - self.b2 = np.random.rand(2) - self.b3 = np.random.rand(1, 2) - self.b4 = np.random.rand(4) - self.N = 7 + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 def test_dotmatmat(self): - A = self.A + A, _, _, _, _ = self._create_data() res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): - A, b1 = self.A, self.b1 + A, b1, _, _, _ = self._create_data() res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): - A, b2 = self.A, self.b2 + A, _, b2, _, _ = self._create_data() res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): - b3, A = self.b3, self.A + A, _, _, b3, _ = self._create_data() res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): - A, b4 = self.A, self.b4 + A, _, _, _, b4 = self._create_data() res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): - b1, b3 = self.b1, self.b3 + _, b1, _, b3, _ = self._create_data() res = np.dot(b3, b1) tgt = np.array([[0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -6882,17 +6909,17 @@ def test_dotcolumnvect2(self): assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): - np.random.seed(100) - b1 = np.random.rand(1, 1) - b2 = np.random.rand(1, 4) + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): - np.random.seed(100) - b1 = np.random.rand(4, 1) - b2 = np.random.rand(1, 1) + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) res = np.dot(b1, b2) tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) @@ -7709,23 +7736,27 @@ def test_3d_tensor(self): class TestChoose: - def setup_method(self): - self.x = 2 * np.ones((3,), dtype=int) - self.y = 3 * np.ones((3,), dtype=int) - self.x2 = 2 * np.ones((2, 3), dtype=int) - self.y2 = 3 * np.ones((2, 3), dtype=int) - self.ind = [0, 0, 1] + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind def test_basic(self): - A = np.choose(self.ind, (self.x, self.y)) + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): - A = np.choose(self.ind, (self.x2, self.y2)) + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): - A = np.choose(self.ind, (self.x, self.y2)) + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) @pytest.mark.parametrize("ops", @@ -7755,38 +7786,43 @@ def test_dimension_and_args_limit(self): class TestRepeat: - def setup_method(self): - self.m = np.array([1, 2, 3, 4, 5, 6]) - self.m_rect = self.m.reshape((2, 3)) + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect def test_basic(self): - A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): - A = np.repeat(self.m, 2) + m, _ = self._create_data() + A = np.repeat(m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): - A = np.repeat(self.m_rect, [2, 1], axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) - A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + A = np.repeat(m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): - A = np.repeat(self.m_rect, 2, axis=0) + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - A = np.repeat(self.m_rect, 2, axis=1) + A = np.repeat(m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 5e1c625955bb..815f7ac2930a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -36,6 +36,9 @@ class _GenericTest: + def _assert_func(self, *args, **kwargs): + pass + def _test_equal(self, a, b): self._assert_func(a, b) @@ -82,8 +85,8 @@ def test_array_likes(self): class TestArrayEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_equal + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) def test_generic_rank1(self): """Test rank 1 array for all dtypes.""" @@ -389,8 +392,8 @@ def test_build_err_msg_custom_precision(self): class TestEqual(TestArrayEqual): - def setup_method(self): - self._assert_func = assert_equal + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) def test_nan_items(self): self._assert_func(np.nan, np.nan) @@ -484,8 +487,8 @@ def test_object(self): class TestArrayAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_array_almost_equal + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -700,8 +703,8 @@ def all(self, *args, **kwargs): class TestAlmostEqual(_GenericTest): - def setup_method(self): - self._assert_func = assert_almost_equal + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) def test_closeness(self): # Note that in the course of time we ended up with @@ -870,8 +873,8 @@ def all(self, *args, **kwargs): class TestApproxEqual: - def setup_method(self): - self._assert_func = assert_approx_equal + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) def test_simple_0d_arrays(self): x = np.array(1234.22) @@ -913,8 +916,8 @@ def test_nan_items(self): class TestArrayAssertLess: - def setup_method(self): - self._assert_func = assert_array_less + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) def test_simple_arrays(self): x = np.array([1.1, 2.2]) From 57f1cf9ebdf3541aff57c9e982dd4d333fc96911 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:06:15 +0000 Subject: [PATCH 0403/1718] MAINT: Bump github/codeql-action from 3.30.0 to 3.30.1 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.0 to 3.30.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d...f1f6e5f6af878fb37288ce1c627459e94dbf7d01) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ecdf8cfeb85e..363ad25f2e50 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/autobuild@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3.30.0 + uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 19acbd619437..98cab6b712f6 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v2.1.27 + uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v2.1.27 with: sarif_file: results.sarif From 1a9d7316cac3390e657718f3c2ffa0af0b35eef8 Mon Sep 17 00:00:00 2001 From: Samruddhi Baviskar <79337465+samruddhibaviskar11@users.noreply.github.com> Date: Sat, 6 Sep 2025 02:26:45 +0530 Subject: [PATCH 0404/1718] ENH: Use itertools.product for ndindex to improve performance (#29165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * PERF: Rewrite ndindex using itertools.product for speed and memory efficiency * Refactor ndindex using itertools.product and add ASV benchmarks and expanded tests * Fix linting issues flagged by Ruff across ndindex implementation, tests, and benchmarks * Add release note and finalize ndindex improvements * BUG: Raise ValueError for negative dimensions in ndindex * MAINT: Expose ndindex in numpy.lib for public usage and benchmarks * MAINT: Fix ndindex __module__ attribute for public API test * Remove numpy.distutils tests after dropping distutils in NumPy 2.0 * Format imports in numpy.lib.__init__.py with Ruff * Set ndindex.__module__ = 'numpy.lib' to match test expectation * BUG: Raise ValueError for negative dimensions in ndindex; MAINT: Remove ndindex from numpy.lib public API; TEST: Update negative dimension test * Revert distutils test deletions as requested * Revert numpy/lib/__init__.py to upstream/main state as requested * lib: ndindex—handle empty shape and fix implementation bugs - Use min(..., default=0) to handle empty shapes safely. - Fix cascade issues in ndindex implementation. - Add regression tests to cover empty shapes and previous bugs. * TST: remove imports to fix lint for test_index_tricks.py * STYLE: Remove ruff formatting and lint fixes * REF: ndindex test cases — added functional tests and applied Ruff fixes * Add benchmark comparing ndindex vs itertools.product * MAINT: remove unused in ndindex * DOC: clarify release note for ndindex performance improvement * trigger CI rebuild --- benchmarks/benchmarks/bench_ndindex.py | 54 ++++++++ .../upcoming_changes/29165.performance.rst | 7 + numpy/lib/_index_tricks_impl.py | 12 +- numpy/lib/tests/test_index_tricks.py | 120 ++++++++++++++++++ 4 files changed, 186 insertions(+), 7 deletions(-) create mode 100644 benchmarks/benchmarks/bench_ndindex.py create mode 100644 doc/release/upcoming_changes/29165.performance.rst diff --git a/benchmarks/benchmarks/bench_ndindex.py b/benchmarks/benchmarks/bench_ndindex.py new file mode 100644 index 000000000000..132d4eeed472 --- /dev/null +++ b/benchmarks/benchmarks/bench_ndindex.py @@ -0,0 +1,54 @@ +from itertools import product + +import numpy as np + +from .common import Benchmark + + +class NdindexBenchmark(Benchmark): + """ + Benchmark comparing numpy.ndindex() and itertools.product() + for different multi-dimensional shapes. + """ + + # Fix: Define each dimension separately, not as tuples + # ASV will pass each parameter list element to setup() + params = [ + [(10, 10), (20, 20), (50, 50), (10, 10, 10), (20, 30, 40), (50, 60, 90)] + ] + param_names = ["shape"] + + def setup(self, shape): + """Setup method called before each benchmark run.""" + # Access ndindex through NumPy's main namespace + self.ndindex = np.ndindex + + def time_ndindex(self, shape): + """ + Measure time taken by np.ndindex. + It creates an iterator that goes over each index. + """ + for _ in self.ndindex(*shape): + pass # Just loop through, no work inside + + def time_itertools_product(self, shape): + """ + Measure time taken by itertools.product. + Same goal: iterate over all index positions. + """ + for _ in product(*(range(s) for s in shape)): + pass + + def peakmem_ndindex(self, shape): + """ + Measure peak memory used when fully consuming + np.ndindex iterator by converting it to a list. + """ + return list(self.ndindex(*shape)) + + def peakmem_itertools_product(self, shape): + """ + Measure peak memory used when fully consuming + itertools.product iterator by converting it to a list. + """ + return list(product(*(range(s) for s in shape))) diff --git a/doc/release/upcoming_changes/29165.performance.rst b/doc/release/upcoming_changes/29165.performance.rst new file mode 100644 index 000000000000..4e1a9a4ecdbc --- /dev/null +++ b/doc/release/upcoming_changes/29165.performance.rst @@ -0,0 +1,7 @@ +Rewrite of `np.ndindex` using `itertools.product` +-------------------------------------------------- +The `numpy.ndindex` function now uses `itertools.product` internally, +providing significant improvements in performance for large iteration spaces, +while maintaining the original behavior and interface. +For example, for an array of shape (50, 60, 90) the NumPy `ndindex` +benchmark improves performance by a factor 5.2. \ No newline at end of file diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 131bbae5d098..a0d04ad3285f 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -2,6 +2,7 @@ import math import sys import warnings +from itertools import product import numpy as np import numpy._core.numeric as _nx @@ -12,7 +13,6 @@ from numpy._core.numerictypes import issubdtype from numpy._utils import set_module from numpy.lib._function_base_impl import diff -from numpy.lib.stride_tricks import as_strided array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -688,10 +688,9 @@ class ndindex: def __init__(self, *shape): if len(shape) == 1 and isinstance(shape[0], tuple): shape = shape[0] - x = as_strided(_nx.zeros(1), shape=shape, - strides=_nx.zeros_like(shape)) - self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], - order='C') + if min(shape, default=0) < 0: + raise ValueError("negative dimensions are not allowed") + self._iter = product(*map(range, shape)) def __iter__(self): return self @@ -724,8 +723,7 @@ def __next__(self): iteration. """ - next(self._it) - return self._it.multi_index + return next(self._iter) # You can do all this with slice() plus a few special objects, diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index ed8709db5238..387fdfec28f1 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -566,3 +566,123 @@ def test_ndindex(): # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) + + +def test_ndindex_zero_dimensions_explicit(): + """Test ndindex produces empty iterators for explicit + zero-length dimensions.""" + assert list(np.ndindex(0, 3)) == [] + assert list(np.ndindex(3, 0, 2)) == [] + assert list(np.ndindex(0)) == [] + + +@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)]) +def test_ndindex_non_integer_dimensions(bad_shape): + """Test that non-integer dimensions raise TypeError.""" + with pytest.raises(TypeError): + # Passing invalid_shape_arg directly to ndindex. It will try to use it + # as a dimension and should trigger a TypeError. + list(np.ndindex(bad_shape)) + + +def test_ndindex_stop_iteration_behavior(): + """Test that StopIteration is raised properly after exhaustion.""" + it = np.ndindex(2, 2) + # Exhaust the iterator + list(it) + # Should raise StopIteration on subsequent calls + with pytest.raises(StopIteration): + next(it) + + +def test_ndindex_iterator_independence(): + """Test that each ndindex instance creates independent iterators.""" + shape = (2, 3) + iter1 = np.ndindex(*shape) + iter2 = np.ndindex(*shape) + + next(iter1) + next(iter1) + + assert_equal(next(iter2), (0, 0)) + assert_equal(next(iter1), (0, 2)) + + +def test_ndindex_tuple_vs_args_consistency(): + """Test that ndindex(shape) and ndindex(*shape) produce same results.""" + # Single dimension + assert_equal(list(np.ndindex(5)), list(np.ndindex((5,)))) + + # Multiple dimensions + assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3)))) + + # Complex shape + shape = (2, 1, 4) + assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape))) + + +def test_ndindex_against_ndenumerate_compatibility(): + """Test ndindex produces same indices as ndenumerate.""" + for shape in [(1, 2, 3), (3,), (2, 2), ()]: + ndindex_result = list(np.ndindex(shape)) + ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))] + assert_array_equal(ndindex_result, ndenumerate_indices) + + +def test_ndindex_multidimensional_correctness(): + """Test ndindex produces correct indices for multidimensional arrays.""" + shape = (2, 1, 3) + result = list(np.ndindex(*shape)) + expected = [ + (0, 0, 0), + (0, 0, 1), + (0, 0, 2), + (1, 0, 0), + (1, 0, 1), + (1, 0, 2), + ] + assert_equal(result, expected) + + +def test_ndindex_large_dimensions_behavior(): + """Test ndindex behaves correctly when initialized with large dimensions.""" + large_shape = (1000, 1000) + iter_obj = np.ndindex(*large_shape) + first_element = next(iter_obj) + assert_equal(first_element, (0, 0)) + + +def test_ndindex_empty_iterator_behavior(): + """Test detailed behavior of empty iterators.""" + empty_iter = np.ndindex(0, 5) + assert_equal(list(empty_iter), []) + + empty_iter2 = np.ndindex(3, 0, 2) + with pytest.raises(StopIteration): + next(empty_iter2) + + +@pytest.mark.parametrize( + "negative_shape_arg", + [ + (-1,), # Single negative dimension + (2, -3, 4), # Negative dimension in the middle + (5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions + ], +) +def test_ndindex_negative_dimensions(negative_shape_arg): + """Test that negative dimensions raise ValueError.""" + with pytest.raises(ValueError): + ndindex(negative_shape_arg) + + +def test_ndindex_empty_shape(): + import numpy as np + # ndindex() and ndindex(()) should return a single empty tuple + assert list(np.ndindex()) == [()] + assert list(np.ndindex(())) == [()] + +def test_ndindex_negative_dim_raises(): + # ndindex(-1) should raise a ValueError + with pytest.raises(ValueError): + list(np.ndindex(-1)) From facd503f42b96e40f6d97989174db5ceec56f6d7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Sep 2025 23:18:57 +0200 Subject: [PATCH 0405/1718] TYP: fix ``np.bool`` method declarations --- numpy/__init__.pyi | 216 ++++++++++++++++--- numpy/_typing/_callable.pyi | 77 +------ numpy/typing/tests/data/fail/arithmetic.pyi | 2 +- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- 4 files changed, 192 insertions(+), 105 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8dc1daf7f797..65a60f2b2b9c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,12 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _BoolOp, - _BoolBitOp, - _BoolSub, - _BoolTrueDiv, - _BoolMod, - _BoolDivMod, _IntTrueDiv, _UnsignedIntOp, _UnsignedIntBitOp, @@ -803,6 +797,7 @@ _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) @@ -3898,12 +3893,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... + @overload def __int__(self: np.bool[L[False]], /) -> L[0]: ... @overload def __int__(self: np.bool[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... @overload @@ -3913,23 +3910,191 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __invert__(self, /) -> np.bool: ... - __add__: _BoolOp[np.bool] - __radd__: _BoolOp[np.bool] - __sub__: _BoolSub - __rsub__: _BoolSub - __mul__: _BoolOp[np.bool] - __rmul__: _BoolOp[np.bool] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - __floordiv__: _BoolOp[int8] - __rfloordiv__: _BoolOp[int8] - __pow__: _BoolOp[int8] - __rpow__: _BoolOp[int8] - - __lshift__: _BoolBitOp[int8] - __rlshift__: _BoolBitOp[int8] - __rshift__: _BoolBitOp[int8] - __rrshift__: _BoolBitOp[int8] + @overload + def __add__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + + @overload + def __radd__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __radd__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + + @overload + def __sub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + + @overload + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + + @overload + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rmul__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... @overload def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... @@ -3973,11 +4138,6 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ - __mod__: _BoolMod - __rmod__: _BoolMod - __divmod__: _BoolDivMod - __rdivmod__: _BoolDivMod - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 1ce7de5bb423..5bb8beb81871 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -40,12 +40,11 @@ from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") +_T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple: TypeAlias = tuple[_T1, _T1] +_2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -53,80 +52,8 @@ _NBit2 = TypeVar("_NBit2", bound=NBitBase) _IntType = TypeVar("_IntType", bound=integer) _FloatType = TypeVar("_FloatType", bound=floating) _NumberType = TypeVar("_NumberType", bound=number) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) -@type_check_only -class _BoolOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolBitOp(Protocol[_GenericType_co]): - @overload - def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - -@type_check_only -class _BoolSub(Protocol): - # Note that `other: bool` is absent here - @overload - def __call__(self, other: bool, /) -> NoReturn: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolTrueDiv(Protocol): - @overload - def __call__(self, other: float | _IntLike_co, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: _NumberType, /) -> _NumberType: ... - -@type_check_only -class _BoolMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> int8: ... - @overload # platform dependent - def __call__(self, other: int, /) -> int_: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: _IntType, /) -> _IntType: ... - @overload - def __call__(self, other: _FloatType, /) -> _FloatType: ... - -@type_check_only -class _BoolDivMod(Protocol): - @overload - def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... - @overload # platform dependent - def __call__(self, other: int, /) -> _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... - @overload - def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... - @overload - def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... - @type_check_only class _IntTrueDiv(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index d62906ae87d9..a68df2ea53c3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -114,7 +114,7 @@ AR_f **= AR_LIKE_c # type: ignore[arg-type] # Scalars -b_ - b_ # type: ignore[call-overload] +b_ - b_ # type: ignore[operator] dt + dt # type: ignore[operator] td - dt # type: ignore[operator] diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 29dfe79287ad..fe51f15adc52 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -8,7 +8,7 @@ i = 0 f8 = np.float64() -b_ >> f8 # type: ignore[call-overload] +b_ >> f8 # type: ignore[operator] i8 << f8 # type: ignore[call-overload] i | f8 # type: ignore[operator] i8 ^ f8 # type: ignore[call-overload] From 79080f1419f3dc4ce154ff5cc4acaee5d6b39dc0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 6 Sep 2025 06:18:46 +0200 Subject: [PATCH 0406/1718] TYP: appease ruff --- numpy/__init__.pyi | 14 +++++++------- numpy/_typing/_callable.pyi | 14 ++------------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 65a60f2b2b9c..1cb5790f3dfb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3893,7 +3893,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... def __bool__(self, /) -> _BoolItemT_co: ... - + @overload def __int__(self: np.bool[L[False]], /) -> L[0]: ... @overload @@ -3924,7 +3924,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __radd__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __radd__(self, other: builtins.bool, /) -> bool_: ... + def __radd__(self, other: builtins.bool, /) -> bool_: ... @overload def __radd__(self, other: int, /) -> int_: ... @overload @@ -3934,7 +3934,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __sub__(self, other: _NumberT, /) -> _NumberT: ... - @overload + @overload def __sub__(self, other: int, /) -> int_: ... @overload def __sub__(self, other: float, /) -> float64: ... @@ -3943,7 +3943,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rsub__(self, other: _NumberT, /) -> _NumberT: ... - @overload + @overload def __rsub__(self, other: int, /) -> int_: ... @overload def __rsub__(self, other: float, /) -> float64: ... @@ -3964,7 +3964,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmul__(self, other: _NumberT, /) -> _NumberT: ... @overload - def __rmul__(self, other: builtins.bool, /) -> bool_: ... + def __rmul__(self, other: builtins.bool, /) -> bool_: ... @overload def __rmul__(self, other: int, /) -> int_: ... @overload @@ -3986,7 +3986,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... @overload - def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... @overload def __rpow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4020,7 +4020,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... @overload - def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... @overload def __rfloordiv__(self, other: int, /) -> int_: ... @overload diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 5bb8beb81871..647d1a5afb56 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -8,16 +8,7 @@ See the `Mypy documentation`_ on protocols for more details. """ -from typing import ( - Any, - NoReturn, - Protocol, - TypeAlias, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only import numpy as np from numpy import ( @@ -26,7 +17,6 @@ from numpy import ( float64, floating, generic, - int8, int_, integer, number, @@ -38,7 +28,7 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import _BoolLike_co, _IntLike_co, _NumberLike_co +from ._scalars import _NumberLike_co _T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) From 1bcc509d5224cc729ea6c3229d1924b94cc0f2b0 Mon Sep 17 00:00:00 2001 From: Sanjay Kumar Sakamuri Kamalakar Date: Sat, 6 Sep 2025 17:16:34 +0530 Subject: [PATCH 0407/1718] Update VXE and VXE2 detection regex patterns --- meson_cpu/s390x/meson.build | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index a69252d1607c..b7a420c27f0d 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -7,12 +7,12 @@ VX = mod_features.new( ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, - detect: {'val': 'VXE', 'match': 'VX'}, + detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, - detect: {'val': 'VXE2', 'match': 'VX.*'}, + detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} From 3e396702d1718b374ee8cddd39d8a56fcf04a3b3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:42:36 +0200 Subject: [PATCH 0408/1718] TYP: fix ``np.number`` method declarations --- numpy/__init__.pyi | 61 +++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1cb5790f3dfb..9ae8664da0f1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,7 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _IntTrueDiv, _UnsignedIntOp, _UnsignedIntBitOp, _UnsignedIntMod, @@ -146,11 +145,14 @@ from numpy._typing._callable import ( _FloatOp, _FloatMod, _FloatDivMod, - _NumberOp, _ComparisonOpLT, _ComparisonOpLE, _ComparisonOpGT, _ComparisonOpGE, + _SupportsLT, + _SupportsLE, + _SupportsGT, + _SupportsGE, ) # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform @@ -211,7 +213,7 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar, deprecated +from typing_extensions import CapsuleType, TypeVar, deprecated, override from numpy import ( char, @@ -3855,23 +3857,44 @@ class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): def __pos__(self) -> Self: ... def __abs__(self) -> Self: ... - __add__: _NumberOp - __radd__: _NumberOp - __sub__: _NumberOp - __rsub__: _NumberOp - __mul__: _NumberOp - __rmul__: _NumberOp - __floordiv__: _NumberOp - __rfloordiv__: _NumberOp - __pow__: _NumberOp - __rpow__: _NumberOp - __truediv__: _NumberOp - __rtruediv__: _NumberOp + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property From 5d7e9cc522e2379d7b66f8b9593adb0abc2c16a6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:43:11 +0200 Subject: [PATCH 0409/1718] TYP: fix ``np.integer`` method declarations --- numpy/__init__.pyi | 16 ++++++++++++++-- numpy/_typing/_callable.pyi | 29 ----------------------------- 2 files changed, 14 insertions(+), 31 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9ae8664da0f1..f0470e94e7e8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4204,8 +4204,20 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __index__(self, /) -> int: ... def __invert__(self, /) -> Self: ... - __truediv__: _IntTrueDiv[_NBit] - __rtruediv__: _IntTrueDiv[_NBit] + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... def __mod__(self, value: _IntLike_co, /) -> integer: ... def __rmod__(self, value: _IntLike_co, /) -> integer: ... # Ensure that objects annotated as `integer` support bit-wise operations diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 647d1a5afb56..b67ee18ba919 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -16,10 +16,8 @@ from numpy import ( complexfloating, float64, floating, - generic, int_, integer, - number, signedinteger, unsignedinteger, ) @@ -28,7 +26,6 @@ from . import NBitBase from ._array_like import NDArray from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -from ._scalars import _NumberLike_co _T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) @@ -39,28 +36,6 @@ _2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -_IntType = TypeVar("_IntType", bound=integer) -_FloatType = TypeVar("_FloatType", bound=floating) -_NumberType = TypeVar("_NumberType", bound=number) -_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) - -@type_check_only -class _IntTrueDiv(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - @type_check_only class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @@ -206,10 +181,6 @@ class _FloatDivMod(Protocol[_NBit1]): self, other: integer[_NBit2] | floating[_NBit2], / ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... -@type_check_only -class _NumberOp(Protocol): - def __call__(self, other: _NumberLike_co, /) -> Any: ... - @final @type_check_only class _SupportsLT(Protocol): From cad7c071f2072ec34cc77e008f1defc7a65ce735 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 01:43:41 +0200 Subject: [PATCH 0410/1718] TYP: fix ``np.bool`` comparison method declarations --- numpy/__init__.pyi | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f0470e94e7e8..e539a16827c2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4161,10 +4161,33 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ - __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... # NOTE: This should _not_ be `Final` or a `TypeAlias` bool_ = bool From eee1d753849aee953bd782a3aeddba365f9c4375 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 03:42:33 +0200 Subject: [PATCH 0411/1718] TYP: fix ``np.signedinteger`` method declarations --- numpy/__init__.pyi | 224 +++++++++++++++--- numpy/_typing/_callable.pyi | 49 ---- numpy/typing/tests/data/fail/bitwise_ops.pyi | 4 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 18 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 11 +- numpy/typing/tests/data/reveal/mod.pyi | 22 +- 6 files changed, 223 insertions(+), 105 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e539a16827c2..7ccb52751da5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -138,10 +138,6 @@ from numpy._typing._callable import ( _UnsignedIntBitOp, _UnsignedIntMod, _UnsignedIntDivMod, - _SignedIntOp, - _SignedIntBitOp, - _SignedIntMod, - _SignedIntDivMod, _FloatOp, _FloatMod, _FloatDivMod, @@ -4243,6 +4239,9 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... def __mod__(self, value: _IntLike_co, /) -> integer: ... def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + # Ensure that objects annotated as `integer` support bit-wise operations def __lshift__(self, other: _IntLike_co, /) -> integer: ... def __rlshift__(self, other: _IntLike_co, /) -> integer: ... @@ -4258,30 +4257,199 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): class signedinteger(integer[_NBit1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... - __add__: _SignedIntOp[_NBit1] - __radd__: _SignedIntOp[_NBit1] - __sub__: _SignedIntOp[_NBit1] - __rsub__: _SignedIntOp[_NBit1] - __mul__: _SignedIntOp[_NBit1] - __rmul__: _SignedIntOp[_NBit1] - __floordiv__: _SignedIntOp[_NBit1] - __rfloordiv__: _SignedIntOp[_NBit1] - __pow__: _SignedIntOp[_NBit1] - __rpow__: _SignedIntOp[_NBit1] - __lshift__: _SignedIntBitOp[_NBit1] - __rlshift__: _SignedIntBitOp[_NBit1] - __rshift__: _SignedIntBitOp[_NBit1] - __rrshift__: _SignedIntBitOp[_NBit1] - __and__: _SignedIntBitOp[_NBit1] - __rand__: _SignedIntBitOp[_NBit1] - __xor__: _SignedIntBitOp[_NBit1] - __rxor__: _SignedIntBitOp[_NBit1] - __or__: _SignedIntBitOp[_NBit1] - __ror__: _SignedIntBitOp[_NBit1] - __mod__: _SignedIntMod[_NBit1] - __rmod__: _SignedIntMod[_NBit1] - __divmod__: _SignedIntDivMod[_NBit1] - __rdivmod__: _SignedIntDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + + # integral ops (should be kept in sync) + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... int8 = signedinteger[_8Bit] int16 = signedinteger[_16Bit] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index b67ee18ba919..d1fd8243b427 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -16,7 +16,6 @@ from numpy import ( complexfloating, float64, floating, - int_, integer, signedinteger, unsignedinteger, @@ -89,54 +88,6 @@ class _UnsignedIntDivMod(Protocol[_NBit1]): self, other: unsignedinteger[_NBit2], / ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... -@type_check_only -class _SignedIntOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... - -@type_check_only -class _SignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... - @type_check_only class _FloatOp(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index fe51f15adc52..6876393c9392 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -9,9 +9,9 @@ i = 0 f8 = np.float64() b_ >> f8 # type: ignore[operator] -i8 << f8 # type: ignore[call-overload] +i8 << f8 # type: ignore[operator] i | f8 # type: ignore[operator] -i8 ^ f8 # type: ignore[call-overload] +i8 ^ f8 # type: ignore[operator] u8 & f8 # type: ignore[call-overload] ~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 81a98a9d96d2..4b17ca2366b2 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -587,9 +587,9 @@ assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) -assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 + u4, Any) +assert_type(i8 + u8, np.signedinteger) +assert_type(i8 + i4, np.signedinteger) +assert_type(i8 + u4, np.signedinteger) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) @@ -607,7 +607,7 @@ assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) @@ -616,7 +616,7 @@ assert_type(f + i8, np.float64) assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) -assert_type(i4 + u8, Any) +assert_type(i4 + u8, np.signedinteger) assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) @@ -624,7 +624,7 @@ assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i4 + i8, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) @@ -638,14 +638,14 @@ assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 + i4, np.signedinteger) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) assert_type(AR_f + i4, npt.NDArray[np.float64]) -assert_type(i8 + u4, Any) -assert_type(i4 + u4, Any) +assert_type(i8 + u4, np.signedinteger) +assert_type(i4 + u4, np.signedinteger) assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index bef7c6739605..66adcf3dfb30 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -2,7 +2,6 @@ from typing import Literal as L, TypeAlias, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit FalseType: TypeAlias = L[False] TrueType: TypeAlias = L[True] @@ -43,11 +42,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 9bbbd5c52d7f..338a9a1ff729 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -108,40 +108,40 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.float64 | np.floating[_64Bit]) -assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) -assert_type(i4 % i8, np.int64 | np.int32) -assert_type(i4 % f8, np.float64 | np.float32) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) -assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) -assert_type(f % i8, np.float64 | np.floating[_64Bit]) +assert_type(f % i8, np.float64) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.int64 | np.int32) +assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) # workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) From 20575c2cfdb1e22e9b4951ef0c34486c68e36e05 Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Sat, 16 Aug 2025 20:35:43 +0800 Subject: [PATCH 0412/1718] ENH: MAINT: add object copy for f2py meson backend --- numpy/f2py/_backends/_meson.py | 13 +++++++++++++ numpy/f2py/_backends/meson.build.template | 3 +++ 2 files changed, 16 insertions(+) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index cbd9b0e32729..4c498bab2f25 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -50,6 +50,7 @@ def __init__( self.pipeline = [ self.initialize_template, self.sources_substitution, + self.objects_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, @@ -79,6 +80,11 @@ def sources_substitution(self) -> None: [f"{self.indent}'''{source}'''," for source in self.sources] ) + def objects_substitution(self) -> None: + self.substitutions["obj_list"] = ",\n".join( + [f"{self.indent}'''{obj}'''," for obj in self.objects] + ) + def deps_substitution(self) -> None: self.substitutions["dep_list"] = f",\n{self.indent}".join( [f"{self.indent}dependency('{dep}')," for dep in self.deps] @@ -186,6 +192,7 @@ def run_meson(self, build_dir: Path): def compile(self) -> None: self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + _prepare_objects(self.modulename, self.extra_objects, self.build_dir) self.write_meson_build(self.build_dir) self.run_meson(self.build_dir) self._move_exec_to_root(self.build_dir) @@ -216,6 +223,12 @@ def _prepare_sources(mname, sources, bdir): ] return extended_sources +def _prepare_objects(mname, objects, bdir): + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy objects + for obj in objects: + if Path(obj).exists() and Path(obj).is_file(): + shutil.copy(obj, bdir) def _get_flags(fc_flags): flag_values = [] diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index fdcc1b17ce21..58c6758cc503 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -43,6 +43,9 @@ ${source_list}, include_directories: [ inc_np, ${inc_list} + ], + objects: [ +${obj_list} ], dependencies : [ py_dep, From b78c703b3a62ae0d10056803b07a922d33768b6d Mon Sep 17 00:00:00 2001 From: Shirong_Wang Date: Wed, 20 Aug 2025 19:02:02 +0800 Subject: [PATCH 0413/1718] TST: add test for f2py meson with object file --- numpy/f2py/tests/test_f2py2e.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 2f91eb77c4bd..959e1527c482 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -673,6 +673,25 @@ def test_inclheader(capfd, hello_world_f90, monkeypatch): assert "#include " in ocmr assert "#include " in ocmr +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + def test_inclpath(): """Add to the include directories From 627fc3ae57713baf11e3a850c8021f99802ba3fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 7 Sep 2025 20:12:43 +0200 Subject: [PATCH 0414/1718] TYP: fix ``np.unsignedinteger`` method declarations --- numpy/__init__.pyi | 330 +++++++++++++++--- numpy/_typing/_callable.pyi | 63 +--- numpy/typing/tests/data/fail/bitwise_ops.pyi | 2 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 18 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 10 +- 5 files changed, 301 insertions(+), 122 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7ccb52751da5..ed8fa9a93eda 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,10 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _UnsignedIntOp, - _UnsignedIntBitOp, - _UnsignedIntMod, - _UnsignedIntDivMod, _FloatOp, _FloatMod, _FloatDivMod, @@ -4254,7 +4250,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __xor__(self, other: _IntLike_co, /) -> integer: ... def __rxor__(self, other: _IntLike_co, /) -> integer: ... -class signedinteger(integer[_NBit1]): +class signedinteger(integer[_NBit]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4267,7 +4263,9 @@ class signedinteger(integer[_NBit1]): @overload def __add__(self, other: complex, /) -> complex128: ... @overload - def __add__(self, other: integer, /) -> signedinteger: ... + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4277,7 +4275,9 @@ class signedinteger(integer[_NBit1]): @overload def __radd__(self, other: complex, /) -> complex128: ... @overload - def __radd__(self, other: integer, /) -> signedinteger: ... + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4287,7 +4287,9 @@ class signedinteger(integer[_NBit1]): @overload def __sub__(self, other: complex, /) -> complex128: ... @overload - def __sub__(self, other: integer, /) -> signedinteger: ... + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4297,7 +4299,9 @@ class signedinteger(integer[_NBit1]): @overload def __rsub__(self, other: complex, /) -> complex128: ... @overload - def __rsub__(self, other: integer, /) -> signedinteger: ... + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4307,7 +4311,9 @@ class signedinteger(integer[_NBit1]): @overload def __mul__(self, other: complex, /) -> complex128: ... @overload - def __mul__(self, other: integer, /) -> signedinteger: ... + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4317,7 +4323,9 @@ class signedinteger(integer[_NBit1]): @overload def __rmul__(self, other: complex, /) -> complex128: ... @overload - def __rmul__(self, other: integer, /) -> signedinteger: ... + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4327,7 +4335,9 @@ class signedinteger(integer[_NBit1]): @overload def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4337,9 +4347,11 @@ class signedinteger(integer[_NBit1]): @overload def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: integer, mod: None = None, /) -> signedinteger: ... + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... - # integral ops (should be kept in sync) + # modular division ops @override # type: ignore[override] @overload @@ -4347,7 +4359,9 @@ class signedinteger(integer[_NBit1]): @overload def __floordiv__(self, other: float, /) -> float64: ... @overload - def __floordiv__(self, other: integer, /) -> signedinteger: ... + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4355,7 +4369,9 @@ class signedinteger(integer[_NBit1]): @overload def __rfloordiv__(self, other: float, /) -> float64: ... @overload - def __rfloordiv__(self, other: integer, /) -> signedinteger: ... + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4363,7 +4379,9 @@ class signedinteger(integer[_NBit1]): @overload def __mod__(self, other: float, /) -> float64: ... @overload - def __mod__(self, other: integer, /) -> signedinteger: ... + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4371,7 +4389,9 @@ class signedinteger(integer[_NBit1]): @overload def __rmod__(self, other: float, /) -> float64: ... @overload - def __rmod__(self, other: integer, /) -> signedinteger: ... + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... @override # type: ignore[override] @overload @@ -4379,7 +4399,9 @@ class signedinteger(integer[_NBit1]): @overload def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __divmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... @override # type: ignore[override] @overload @@ -4387,7 +4409,9 @@ class signedinteger(integer[_NBit1]): @overload def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __rdivmod__(self, other: integer, /) -> _2Tuple[signedinteger]: ... + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... # bitwise ops @@ -4467,31 +4491,247 @@ longlong = signedinteger[_NBitLongLong] class unsignedinteger(integer[_NBit1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... - # NOTE: `uint64 + signedinteger -> float64` - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... uint8: TypeAlias = unsignedinteger[_8Bit] uint16: TypeAlias = unsignedinteger[_16Bit] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index d1fd8243b427..96fa4000889f 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -11,15 +11,7 @@ See the `Mypy documentation`_ on protocols for more details. from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only import numpy as np -from numpy import ( - complex128, - complexfloating, - float64, - floating, - integer, - signedinteger, - unsignedinteger, -) +from numpy import complex128, complexfloating, float64, floating, integer from . import NBitBase from ._array_like import NDArray @@ -35,59 +27,6 @@ _2Tuple: TypeAlias = tuple[_T, _T] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -@type_check_only -class _UnsignedIntOp(Protocol[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - @overload - def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> float64: ... - @overload - def __call__(self, other: complex, /) -> complex128: ... - @overload - def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - @overload - def __call__(self, other: signedinteger, /) -> Any: ... - -@type_check_only -class _UnsignedIntBitOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger: ... - @overload - def __call__(self, other: signedinteger, /) -> signedinteger: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> Any: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... - -@type_check_only -class _UnsignedIntDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... - @overload - def __call__(self, other: int | signedinteger, /) -> _2Tuple[Any]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... - @type_check_only class _FloatOp(Protocol[_NBit1]): @overload diff --git a/numpy/typing/tests/data/fail/bitwise_ops.pyi b/numpy/typing/tests/data/fail/bitwise_ops.pyi index 6876393c9392..f4de2928ff54 100644 --- a/numpy/typing/tests/data/fail/bitwise_ops.pyi +++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -12,6 +12,6 @@ b_ >> f8 # type: ignore[operator] i8 << f8 # type: ignore[operator] i | f8 # type: ignore[operator] i8 ^ f8 # type: ignore[operator] -u8 & f8 # type: ignore[call-overload] +u8 & f8 # type: ignore[operator] ~f8 # type: ignore[operator] # TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 4b17ca2366b2..1c564df5de70 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -587,9 +587,9 @@ assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) -assert_type(i8 + u8, np.signedinteger) +assert_type(i8 + u8, Any) assert_type(i8 + i4, np.signedinteger) -assert_type(i8 + u4, np.signedinteger) +assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) @@ -598,7 +598,7 @@ assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) @@ -616,8 +616,8 @@ assert_type(f + i8, np.float64) assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) -assert_type(i4 + u8, np.signedinteger) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) @@ -632,7 +632,7 @@ assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(u4 + u8, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) @@ -644,9 +644,9 @@ assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) assert_type(AR_f + i4, npt.NDArray[np.float64]) -assert_type(i8 + u4, np.signedinteger) -assert_type(i4 + u4, np.signedinteger) -assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 66adcf3dfb30..49986bd5d12c 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -84,11 +84,11 @@ assert_type(u4 | i4, np.signedinteger) assert_type(u4 ^ i4, np.signedinteger) assert_type(u4 & i4, np.signedinteger) -assert_type(u4 << i, np.signedinteger) -assert_type(u4 >> i, np.signedinteger) -assert_type(u4 | i, np.signedinteger) -assert_type(u4 ^ i, np.signedinteger) -assert_type(u4 & i, np.signedinteger) +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) assert_type(u8 << b_, np.uint64) assert_type(u8 >> b_, np.uint64) From e1a3ad7b0c42d4ba716f9c923ba57c3afbcc521f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Gro=C3=9F?= Date: Mon, 8 Sep 2025 01:40:32 +0200 Subject: [PATCH 0415/1718] replace atry_collapse() with afound_new_run(). change atimsort_() --- numpy/_core/src/npysort/timsort.cpp | 68 +++++++---------------------- 1 file changed, 15 insertions(+), 53 deletions(-) diff --git a/numpy/_core/src/npysort/timsort.cpp b/numpy/_core/src/npysort/timsort.cpp index 9ecf88c0aeb9..0dfb4d32f64a 100644 --- a/numpy/_core/src/npysort/timsort.cpp +++ b/numpy/_core/src/npysort/timsort.cpp @@ -782,59 +782,24 @@ amerge_at_(type *arr, npy_intp *tosort, const run *stack, const npy_intp at, template static int -atry_collapse_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, - buffer_intp *buffer) +afound_new_run_(type *arr, npy_intp *tosort, run *stack, npy_intp *stack_ptr, npy_intp n2, + npy_intp num, buffer_intp *buffer) { int ret; - npy_intp A, B, C, top; - top = *stack_ptr; - - while (1 < top) { - B = stack[top - 2].l; - C = stack[top - 1].l; - - if ((2 < top && stack[top - 3].l <= B + C) || - (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) { - A = stack[top - 3].l; - - if (A <= C) { - ret = amerge_at_(arr, tosort, stack, top - 3, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 3].l += B; - stack[top - 2] = stack[top - 1]; - --top; - } - else { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - - if (NPY_UNLIKELY(ret < 0)) { - return ret; - } - - stack[top - 2].l += C; - --top; - } - } - else if (1 < top && B <= C) { - ret = amerge_at_(arr, tosort, stack, top - 2, buffer); - + if (*stack_ptr > 0) { + npy_intp s1 = stack[*stack_ptr - 1].s; + npy_intp n1 = stack[*stack_ptr - 1].l; + int power = powerloop(s1, n1, n2, num); + while (*stack_ptr > 1 && stack[*stack_ptr - 2].power > power) { + ret = amerge_at_(arr, tosort, stack, *stack_ptr - 2, buffer); if (NPY_UNLIKELY(ret < 0)) { return ret; } - - stack[top - 2].l += C; - --top; - } - else { - break; + stack[*stack_ptr - 2].l += stack[*stack_ptr - 1].l; + --(*stack_ptr); } + stack[*stack_ptr - 1].power = power; } - - *stack_ptr = top; return 0; } @@ -897,16 +862,13 @@ atimsort_(void *v, npy_intp *tosort, npy_intp num) for (l = 0; l < num;) { n = acount_run_((type *)v, tosort, l, num, minrun); - stack[stack_ptr].s = l; - stack[stack_ptr].l = n; - ++stack_ptr; - ret = atry_collapse_((type *)v, tosort, stack, &stack_ptr, - &buffer); - + ret = afound_new_run_((type*)v, tosort, stack, &stack_ptr, n, num, &buffer); if (NPY_UNLIKELY(ret < 0)) { goto cleanup; } - + stack[stack_ptr].s = l; + stack[stack_ptr].l = n; + ++stack_ptr; l += n; } From eeab643fbdd39f3eb2c1613ab91d4a91b91c0987 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 8 Sep 2025 09:54:37 -0600 Subject: [PATCH 0416/1718] MAINT: update spin to 0.14 in requirements files --- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index a51143a780e7..8bd2521933d7 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.13 +spin==0.14 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 87831586e01e..10ac5f7ecc9f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.13 +spin==0.14 # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index dd16787923e3..bd2f75ab8c1d 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.13 +spin==0.14 # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.30.0.1 scipy-openblas64==0.3.30.0.1 From ba128dd0f051be8355721096c3f62301daaf5c96 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Mon, 8 Sep 2025 15:58:13 -0400 Subject: [PATCH 0417/1718] TST: update test_regression::test_gph25784 (#29714) - restrict Windows only - use options that are supported on all compilers --- numpy/f2py/tests/test_regression.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 93eb29e8e723..c4636a764914 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -158,7 +158,7 @@ def test_gh26623(): @pytest.mark.slow -@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') def test_gh25784(): # Compile dubious file using passed flags try: @@ -167,7 +167,7 @@ def test_gh25784(): options=[ # Meson will collect and dedup these to pass to fortran_args: "--f77flags='-ffixed-form -O2'", - "--f90flags=\"-ffixed-form -Og\"", + "--f90flags=\"-ffixed-form -g\"", ], module_name="Blah", ) From e10df5274b9209ff329ecf1403f4bfe491dfb6be Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 8 Sep 2025 16:00:47 -0400 Subject: [PATCH 0418/1718] TST: Replace test_deprecations setup/teardown with context manager (#29692) * WIP: Replace xunit setup with methods * WIP: Revert "WIP: Replace xunit setup with methods" This reverts commit 87c374fd36d668d0ff2e14c6cea9340667fcb4cd. * TST: Change setup to context manager * TST: Remove thread_unsafe marker --- numpy/_core/tests/test_deprecations.py | 56 +++++++++++--------------- 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 252189ef321a..3c6ed420dbfa 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -20,22 +20,20 @@ class _DeprecationTestCase: message = '' warning_cls = DeprecationWarning - def setup_method(self): - self.warn_ctx = warnings.catch_warnings(record=True) - self.log = self.warn_ctx.__enter__() - - # Do *not* ignore other DeprecationWarnings. Ignoring warnings - # can give very confusing results because of - # https://bugs.python.org/issue4180 and it is probably simplest to - # try to keep the tests cleanly giving only the right warning type. - # (While checking them set to "error" those are ignored anyway) - # We still have them show up, because otherwise they would be raised - warnings.filterwarnings("always", category=self.warning_cls) - warnings.filterwarnings("always", message=self.message, - category=self.warning_cls) - - def teardown_method(self): - self.warn_ctx.__exit__() + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, @@ -72,9 +70,6 @@ def assert_deprecated(self, function, num=1, ignore_others=False, """ __tracebackhide__ = True # Hide traceback for py.test - # reset the log - self.log[:] = [] - if exceptions is np._NoValue: exceptions = (self.warning_cls,) @@ -83,11 +78,12 @@ def assert_deprecated(self, function, num=1, ignore_others=False, else: context_manager = contextlib.nullcontext() with context_manager: - function(*args, **kwargs) + with self.filter_warnings() as w_context: + function(*args, **kwargs) # just in case, clear the registry num_found = 0 - for warning in self.log: + for warning in w_context: if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: @@ -95,8 +91,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, "expected %s but got: %s" % (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: - msg = f"{len(self.log)} warnings found but {num} expected." - lst = [str(w) for w in self.log] + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings(): @@ -131,7 +127,6 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase): class TestTestDeprecated: def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setup_method() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -140,7 +135,6 @@ def foo(): warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.teardown_method() class TestBincount(_DeprecationTestCase): @@ -242,9 +236,7 @@ def test_deprecated(self, func): @pytest.mark.parametrize("func", [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) def test_both_passed(self, func): - with warnings.catch_warnings(): - # catch the DeprecationWarning so that it does not raise: - warnings.simplefilter("always", DeprecationWarning) + with pytest.warns(DeprecationWarning): with pytest.raises(TypeError): func([0., 1.], 0., interpolation="nearest", method="nearest") @@ -365,12 +357,10 @@ def test_lib_functions_deprecation_call(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): - with warnings.catch_warnings(record=True) as caught_warnings: + with pytest.warns(DeprecationWarning, + match="alias 'a' was deprecated in NumPy 2.0") as w: func() - assert len(caught_warnings) == 1 - w = caught_warnings[0] - assert w.category is DeprecationWarning - assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + assert len(w) == 1 def test_a_dtype_alias(self): for dtype in ["a", "a10"]: From c26d2ef099d527106aa3ee413a7200e2ece43f86 Mon Sep 17 00:00:00 2001 From: Timmy Date: Mon, 8 Sep 2025 21:26:44 +0100 Subject: [PATCH 0419/1718] fix BUG: standardize 'Mean of empty slice' inconsistent message --- numpy/_core/_methods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index a3d5b02a2a14..b013fd952beb 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -119,7 +119,7 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): - warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) # Cast bool, unsigned int, and int to float64 by default if dtype is None: From 3ed58a635dd9bcb6ab367a1e22f15f384b743703 Mon Sep 17 00:00:00 2001 From: Nicholas Bidler Date: Mon, 8 Sep 2025 13:47:25 -0700 Subject: [PATCH 0420/1718] DOC: clarify numpy.asarray, numpy.asanyarray, numpy.asarray_chkfinite parameter 'order' (#29655) * DOC: clarified numpy.asarray parameter 'order' Changed description to make explicit that setting 'A' requires user to set 'copy' to 'True' to get the described behavior. Made explicit that 'A' has two cases where it acts as equivalent to setting 'F'. See 28247. * DOC: clarified numpy.asarray parameter 'order' See 28247. Changed description of 'order' to make the behavior of 'A' more explicit and how to cause it to preserve non-contiguous Fortran-order. Changed description of 'C' and 'F' to clarify that output will be contiguous. * DOC: clarified numpy.asarray() parameter 'order' See 28247. To differentiate between 'A' and 'K', made explicit that 'A' will not ensure the output is contiguous. 'K' has more complex behavior related to flags and stride detection, but will try to keep the output as close as possible to the input. * DOC: clarify numpy.asarray parameter 'order' See 28247. Made explicit that 'C' and 'F' can copy input to output, 'A' does not ensure contiguous output. * DOC numpy.asarray parameter 'order' description added line break * DOC: Additional request to change identical text blocks. * DOC: fixed linebreak position --- numpy/_core/_add_newdocs.py | 27 +++++++++++++++------------ numpy/lib/_function_base_impl.py | 15 +++++++++------ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e3009a490bd3..ad7029deaa0f 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -967,12 +967,13 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'K'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1059,12 +1060,14 @@ dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 416b18f387c3..421dee5578ab 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -613,12 +613,15 @@ def asarray_chkfinite(a, dtype=None, order=None): dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F', 'A', 'K'}, optional - Memory layout. 'A' and 'K' depend on the order of input array a. - 'C' row-major (C-style), - 'F' column-major (Fortran-style) memory representation. - 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise - 'K' (keep) preserve input order - Defaults to 'C'. + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or + Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. Returns ------- From 6fb592f15e13f0d1c2853d3ce453508db81b1831 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Mon, 8 Sep 2025 17:59:09 -0400 Subject: [PATCH 0421/1718] TST: not to include the LONGDOUBLE test on AIX --- numpy/f2py/tests/test_array_from_pyobj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index a8f952752cf4..15383e9431cc 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -147,9 +147,9 @@ def is_intent_exact(self, *names): # and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # -# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) - and sys.platform != "win32" + and sys.platform not in ["win32", "aix"] and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ From c4734a679e338feb59dc5ab074f138e14cbad682 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Aug 2025 11:40:48 +0200 Subject: [PATCH 0422/1718] CI: run some wheel build jobs by default, remove the rest This accounts for the actual release builds moving to the separate `numpy-release` repo. We want to keep a decent subset of wheel builds in the main repo and always run them, just like other CI jobs that test some config that matters. --- .github/workflows/wheels.yml | 215 +------ tools/wheels/LICENSE_linux.txt | 902 ------------------------------ tools/wheels/LICENSE_osx.txt | 902 ------------------------------ tools/wheels/LICENSE_win32.txt | 881 ----------------------------- tools/wheels/check_license.py | 58 -- tools/wheels/cibw_before_build.sh | 13 - tools/wheels/cibw_test_command.sh | 3 +- tools/wheels/upload_wheels.sh | 54 -- 8 files changed, 9 insertions(+), 3019 deletions(-) delete mode 100644 tools/wheels/LICENSE_linux.txt delete mode 100644 tools/wheels/LICENSE_osx.txt delete mode 100644 tools/wheels/LICENSE_win32.txt delete mode 100644 tools/wheels/check_license.py delete mode 100644 tools/wheels/upload_wheels.sh diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 9cf98d481fe3..1885577f7a12 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -1,34 +1,14 @@ -# Workflow to build and test wheels. -# To work on the wheel building infrastructure on a fork, comment out: +# Workflow to build and test wheels, similarly to numpy/numpy-release. +# To work on these jobs in a fork, comment out: # -# if: github.repository == 'numpy/numpy' -# -# in the get_commit_message job. Be sure to include [wheel build] in your commit -# message to trigger the build. All files related to wheel building are located -# at tools/wheels/ -# Alternatively, you can add labels to the pull request in order to trigger wheel -# builds. -# The labels that trigger builds are: -# 36 - Build(for changes to the building process, -# 14 - Release(ensure wheels build before release) +# if: github.repository == 'numpy/numpy' name: Wheel builder on: - schedule: - # ┌───────────── minute (0 - 59) - # │ ┌───────────── hour (0 - 23) - # │ │ ┌───────────── day of the month (1 - 31) - # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) - # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) - # │ │ │ │ │ - - cron: "42 2 * * SUN,WED" pull_request: branches: - main - maintenance/** - push: - tags: - - v* workflow_dispatch: concurrency: @@ -39,42 +19,12 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - get_commit_message: - name: Get commit message - runs-on: ubuntu-latest - # Only workflow_dispatch is enabled on forks. - # To enable this job and subsequent jobs on a fork for other events, comment out: - if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' - outputs: - message: ${{ steps.commit_message.outputs.message }} - steps: - - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - # Gets the correct commit message for pull request - with: - ref: ${{ github.event.pull_request.head.sha }} - persist-credentials: false - - name: Get commit message - id: commit_message - env: - HEAD: ${{ github.ref }} - run: | - set -xe - COMMIT_MSG=$(git log --no-merges -1 --oneline) - echo "message=$COMMIT_MSG" >> $GITHUB_OUTPUT - echo github.ref "$HEAD" - build_wheels: name: Build wheel ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.buildplat[0] }} strategy: - # Ensure that a wheel builder finishes even if another fails fail-fast: false matrix: # Github Actions doesn't support pairing matrix values together, let's improvise @@ -85,34 +35,13 @@ jobs: - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - [macos-13, macosx_x86_64, openblas] - - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, openblas] - - [macos-14, macosx_arm64, accelerate] - [windows-2022, win_amd64, ""] - - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] - exclude: - # Don't build PyPy 32-bit windows - - buildplat: [windows-2022, win32, ""] - python: "pp311" - # Don't build PyPy arm64 windows - - buildplat: [windows-11-arm, win_arm64, ""] - python: "pp311" - # No PyPy on musllinux images - - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] - python: "pp311" - - buildplat: [ ubuntu-22.04-arm, musllinux_aarch64, "" ] - python: "pp311" - - buildplat: [ macos13, macosx_x86_64, openblas ] - python: "cp313t" - - buildplat: [ macos13, macosx_x86_64, openblas ] - python: "cp314t" + python: ["cp311"] env: - IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} + IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -131,6 +60,7 @@ jobs: uses: ./.github/windows_arm64_steps - name: pkg-config-for-win + if: runner.os == 'windows' run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" @@ -140,12 +70,6 @@ jobs: # passed through, so convert it to '/' $CIBW = $CIBW.replace("\","/") echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - if: runner.os == 'windows' - - # Used to push the built wheels - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: "3.x" - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' @@ -182,126 +106,3 @@ jobs: with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - - name: install micromamba - uses: mamba-org/setup-micromamba@7f29b8b80078b1b601dfa018b0f7425c587c63bb - if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # Note that this step is *after* specific pythons have been used to - # build and test the wheel - # for installation of anaconda-client, for upload to anaconda.org - # environment will be activated after creation, and in future bash steps - init-shell: bash - environment-name: upload-env - create-args: >- - anaconda-client - - - name: win-arm64 install anaconda client - if: ${{ matrix.buildplat[1] == 'win_arm64' }} - run: | - # Rust installation needed for rpds-py. - Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe - .\rustup-init.exe -y - $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" - pip install anaconda-client - - - - name: Upload wheels - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - # see https://github.com/marketplace/actions/setup-miniconda for why - # `-el {0}` is required. - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels - - build_sdist: - name: Build sdist - needs: get_commit_message - if: >- - contains(needs.get_commit_message.outputs.message, '[wheel build]') || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && - (contains(github.event.pull_request.labels.*.name, '36 - Build') || - contains(github.event.pull_request.labels.*.name, '14 - Release'))) || - (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && ( ! endsWith(github.ref, 'dev0'))) - runs-on: ubuntu-latest - env: - IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} - # commented out so the sdist doesn't upload to nightly - # IS_SCHEDULE_DISPATCH: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - steps: - - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: true - persist-credentials: false - # Used to push the built wheels - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - # Build sdist on lowest supported Python - python-version: "3.11" - - name: Build sdist - run: | - python -m pip install -U pip build - python -m build --sdist -Csetup-args=-Dallow-noblas=true - - name: Test the sdist - run: | - # TODO: Don't run test suite, and instead build wheels from sdist - # Depends on pypa/cibuildwheel#1020 - python -m pip install dist/*.gz -Csetup-args=-Dallow-noblas=true - pip install -r requirements/test_requirements.txt - cd .. # Can't import numpy within numpy src directory - python -c "import numpy, sys; print(numpy.__version__); sys.exit(numpy.test() is False)" - - - name: Check README rendering for PyPI - run: | - python -mpip install twine - twine check dist/* - - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: sdist - path: ./dist/* - - - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 - with: - # for installation of anaconda-client, required for upload to - # anaconda.org - # default (and activated) environment name is test - # Note that this step is *after* specific pythons have been used to - # build and test - auto-update-conda: true - python-version: "3.11" - - - name: Upload sdist - if: success() && github.repository == 'numpy/numpy' - shell: bash -el {0} - env: - NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} - # commented out so the sdist doesn't upload to nightly - # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} - run: | - conda install -y anaconda-client - source tools/wheels/upload_wheels.sh - set_upload_vars - # trigger an upload to - # https://anaconda.org/scientific-python-nightly-wheels/numpy - # for cron jobs or "Run workflow" (restricted to main branch). - # Tags will upload to - # https://anaconda.org/multibuild-wheels-staging/numpy - # The tokens were originally generated at anaconda.org - upload_wheels diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt deleted file mode 100644 index db488c6cff47..000000000000 --- a/tools/wheels/LICENSE_linux.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs/libgfortran*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy.libs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt deleted file mode 100644 index 5cea18441b35..000000000000 --- a/tools/wheels/LICENSE_osx.txt +++ /dev/null @@ -1,902 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - -Name: libquadmath -Files: numpy/.dylibs/libquadmath*.so -Description: dynamically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt deleted file mode 100644 index aed96845583b..000000000000 --- a/tools/wheels/LICENSE_win32.txt +++ /dev/null @@ -1,881 +0,0 @@ - ----- - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy.libs\libscipy_openblas*.dll -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Open-MPI - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: GCC runtime library -Files: numpy.libs\libscipy_openblas*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-or-later WITH GCC-exception-3.1 - Copyright (C) 2002-2017 Free Software Foundation, Inc. - - Libgfortran is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgfortran is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . - ----- - -Full text of license texts referred to above follows (that they are -listed below does not necessarily imply the conditions apply to the -present binary release): - ----- - -GCC RUNTIME LIBRARY EXCEPTION - -Version 3.1, 31 March 2009 - -Copyright (C) 2009 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -This GCC Runtime Library Exception ("Exception") is an additional -permission under section 7 of the GNU General Public License, version -3 ("GPLv3"). It applies to a given file (the "Runtime Library") that -bears a notice placed by the copyright holder of the file stating that -the file is governed by GPLv3 along with this Exception. - -When you use GCC to compile a program, GCC may combine portions of -certain GCC header files and runtime libraries with the compiled -program. The purpose of this Exception is to allow compilation of -non-GPL (including proprietary) programs to use, in this way, the -header files and runtime libraries covered by this Exception. - -0. Definitions. - -A file is an "Independent Module" if it either requires the Runtime -Library for execution after a Compilation Process, or makes use of an -interface provided by the Runtime Library, but is not otherwise based -on the Runtime Library. - -"GCC" means a version of the GNU Compiler Collection, with or without -modifications, governed by version 3 (or a specified later version) of -the GNU General Public License (GPL) with the option of using any -subsequent versions published by the FSF. - -"GPL-compatible Software" is software whose conditions of propagation, -modification and use would permit combination with GCC in accord with -the license of GCC. - -"Target Code" refers to output from any compiler for a real or virtual -target processor architecture, in executable form or suitable for -input to an assembler, loader, linker and/or execution -phase. Notwithstanding that, Target Code does not include data in any -format that is used as a compiler intermediate representation, or used -for producing a compiler intermediate representation. - -The "Compilation Process" transforms code entirely represented in -non-intermediate languages designed for human-written code, and/or in -Java Virtual Machine byte code, into Target Code. Thus, for example, -use of source code generators and preprocessors need not be considered -part of the Compilation Process, since the Compilation Process can be -understood as starting with the output of the generators or -preprocessors. - -A Compilation Process is "Eligible" if it is done using GCC, alone or -with other GPL-compatible software, or if it is done without using any -work based on GCC. For example, using non-GPL-compatible Software to -optimize any GCC intermediate representations would not qualify as an -Eligible Compilation Process. - -1. Grant of Additional Permission. - -You have permission to propagate a work of Target Code formed by -combining the Runtime Library with Independent Modules, even if such -propagation would otherwise violate the terms of GPLv3, provided that -all Target Code was generated by Eligible Compilation Processes. You -may then convey such a combination under terms of your choice, -consistent with the licensing of the Independent Modules. - -2. No Weakening of GCC Copyleft. - -The availability of this Exception does not imply any general -presumption that third-party software is unaffected by the copyleft -requirements of the license of GCC. - ----- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py deleted file mode 100644 index 9aa50015d00b..000000000000 --- a/tools/wheels/check_license.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python -""" -check_license.py [MODULE] - -Check the presence of a LICENSE.txt in the installed module directory, -and that it appears to contain text prevalent for a NumPy binary -distribution. - -""" -import argparse -import pathlib -import re -import sys - - -def check_text(text): - ok = "Copyright (c)" in text and re.search( - r"This binary distribution of \w+ also bundles the following software", - text, - ) - return ok - - -def main(): - p = argparse.ArgumentParser(usage=__doc__.rstrip()) - p.add_argument("module", nargs="?", default="numpy") - args = p.parse_args() - - # Drop '' from sys.path - sys.path.pop(0) - - # Find module path - __import__(args.module) - mod = sys.modules[args.module] - - # LICENSE.txt is installed in the .dist-info directory, so find it there - sitepkgs = pathlib.Path(mod.__file__).parent.parent - distinfo_path = next(iter(sitepkgs.glob("numpy-*.dist-info"))) - - # Check license text - license_txt = distinfo_path / "licenses" / "LICENSE.txt" - with open(license_txt, encoding="utf-8") as f: - text = f.read() - - ok = check_text(text) - if not ok: - print( - f"ERROR: License text {license_txt} does not contain expected " - "text fragments\n" - ) - print(text) - sys.exit(1) - - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index ed2640471fed..f5da8968258a 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -2,22 +2,9 @@ set -xe PROJECT_DIR="${1:-$PWD}" - # remove any cruft from a previous run rm -rf build -# Update license -echo "" >> $PROJECT_DIR/LICENSE.txt -echo "----" >> $PROJECT_DIR/LICENSE.txt -echo "" >> $PROJECT_DIR/LICENSE.txt -if [[ $RUNNER_OS == "Linux" ]] ; then - cat $PROJECT_DIR/tools/wheels/LICENSE_linux.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "macOS" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_osx.txt >> $PROJECT_DIR/LICENSE.txt -elif [[ $RUNNER_OS == "Windows" ]]; then - cat $PROJECT_DIR/tools/wheels/LICENSE_win32.txt >> $PROJECT_DIR/LICENSE.txt -fi - if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 60e90ef5beb6..57f97f5ed706 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -1,5 +1,5 @@ # This script is used by .github/workflows/wheels.yml to run the full test -# suite, checks for license inclusion and that the openblas version is correct. +# suite, and that the openblas version is correct. set -xe PROJECT_DIR="$1" @@ -45,4 +45,3 @@ fi # durations for the 10 slowests tests to help with debugging slow or hanging # tests python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto', '--timeout=1800', '--durations=10']))" -python $PROJECT_DIR/tools/wheels/check_license.py diff --git a/tools/wheels/upload_wheels.sh b/tools/wheels/upload_wheels.sh deleted file mode 100644 index ccd713c907a2..000000000000 --- a/tools/wheels/upload_wheels.sh +++ /dev/null @@ -1,54 +0,0 @@ -set_travis_vars() { - # Set env vars - echo "TRAVIS_EVENT_TYPE is $TRAVIS_EVENT_TYPE" - echo "TRAVIS_TAG is $TRAVIS_TAG" - if [[ "$TRAVIS_EVENT_TYPE" == "push" && "$TRAVIS_TAG" == v* ]]; then - IS_PUSH="true" - else - IS_PUSH="false" - fi - if [[ "$TRAVIS_EVENT_TYPE" == "cron" ]]; then - IS_SCHEDULE_DISPATCH="true" - else - IS_SCHEDULE_DISPATCH="false" - fi -} -set_upload_vars() { - echo "IS_PUSH is $IS_PUSH" - echo "IS_SCHEDULE_DISPATCH is $IS_SCHEDULE_DISPATCH" - if [[ "$IS_PUSH" == "true" ]]; then - echo push and tag event - export ANACONDA_ORG="multibuild-wheels-staging" - export TOKEN="$NUMPY_STAGING_UPLOAD_TOKEN" - export ANACONDA_UPLOAD="true" - elif [[ "$IS_SCHEDULE_DISPATCH" == "true" ]]; then - echo scheduled or dispatched event - export ANACONDA_ORG="scientific-python-nightly-wheels" - export TOKEN="$NUMPY_NIGHTLY_UPLOAD_TOKEN" - export ANACONDA_UPLOAD="true" - else - echo non-dispatch event - export ANACONDA_UPLOAD="false" - fi -} -upload_wheels() { - echo ${PWD} - if [[ ${ANACONDA_UPLOAD} == true ]]; then - if [[ -z ${TOKEN} ]]; then - echo no token set, not uploading - else - # sdists are located under dist folder when built through setup.py - if compgen -G "./dist/*.gz"; then - echo "Found sdist" - anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./dist/*.gz - elif compgen -G "./wheelhouse/*.whl"; then - echo "Found wheel" - anaconda -q -t ${TOKEN} upload --force -u ${ANACONDA_ORG} ./wheelhouse/*.whl - else - echo "Files do not exist" - return 1 - fi - echo "PyPI-style index: https://pypi.anaconda.org/$ANACONDA_ORG/simple" - fi - fi -} From 6a16e90eb42e9d584436ece5322ca04c8f03c0dc Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 11 Aug 2025 21:39:46 +0200 Subject: [PATCH 0423/1718] CI: remove redundant Windows, musllinux jobs These configs are covered by `wheels.yml`, which is now part of the regular test suite rather than off-by-default on PRs. --- .github/workflows/linux.yml | 35 ----------- .github/workflows/linux_musl.yml | 69 --------------------- .github/workflows/windows.yml | 102 +------------------------------ 3 files changed, 3 insertions(+), 203 deletions(-) delete mode 100644 .github/workflows/linux_musl.yml diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b55f3dedb67e..5a453521f2dd 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -157,41 +157,6 @@ jobs: env: PYTHONOPTIMIZE: 2 - - aarch64_test: - needs: [smoke_test] - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: '3.11' - - - name: Install Python dependencies - run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install -r requirements/test_requirements.txt - python -m pip install -r requirements/ci32_requirements.txt - mkdir -p ./.openblas - python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - - name: Build - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - spin build - - - name: Test - run: | - spin test -j2 -m full -- --timeout=600 --durations=10 - - armhf_test: # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode # running on aarch64 (ARM 64-bit) GitHub runners. diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml deleted file mode 100644 index 547c031bc84b..000000000000 --- a/.github/workflows/linux_musl.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Test musllinux_x86_64 - -on: - pull_request: - branches: - - main - - maintenance/** - - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - - -permissions: - contents: read # to fetch code (actions/checkout) - - -jobs: - musllinux_x86_64: - runs-on: ubuntu-latest - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - container: - # Use container used for building musllinux wheels - # it has git installed, all the pythons, etc - image: quay.io/pypa/musllinux_1_2_x86_64 - - steps: - - name: setup - run: | - apk update --quiet - - # using git commands to clone because versioneer doesn't work when - # actions/checkout is used for the clone step in a container - - git config --global --add safe.directory $PWD - - if [ $GITHUB_EVENT_NAME != pull_request ]; then - git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git reset --hard $GITHUB_SHA - else - git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE - git fetch origin $GITHUB_REF:my_ref_name - git checkout $GITHUB_BASE_REF - git -c user.email="you@example.com" merge --no-commit my_ref_name - fi - git submodule update --init - - ln -s /usr/local/bin/python3.11 /usr/local/bin/python - - - name: test-musllinux_x86_64 - env: - PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas - run: | - python -m venv test_env - source test_env/bin/activate - - pip install -r requirements/ci_requirements.txt - pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - - # use meson to build and test - spin build --with-scipy-openblas=64 - spin test -j auto -- --timeout=600 --durations=10 - - - name: Meson Log - shell: bash - run: | - cat build/meson-logs/meson-log.txt diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 66f544b51d7d..2cfb98e67aba 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -14,18 +14,11 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - python64bit_openblas: + clangcl_python64bit_openblas32: name: x86-64, LP64 OpenBLAS runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler-pyversion: - - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t-dev"] - steps: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -37,7 +30,7 @@ jobs: - name: Setup Python uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: ${{ matrix.compiler-pyversion[1] }} + python-version: "3.14t-dev" - name: Install build dependencies from PyPI run: | @@ -48,23 +41,7 @@ jobs: choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - name: Install Clang-cl - if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - # llvm is preinstalled, but leave - # this here in case we need to pin the - # version at some point. - #choco install llvm -y - - - name: Install NumPy (MSVC) - if: matrix.compiler-pyversion[0] == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - - name: Install NumPy (Clang-cl) - if: matrix.compiler-pyversion[0] == 'Clang-cl' run: | "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-build.ini -Encoding ascii pip install -r requirements/ci_requirements.txt @@ -85,80 +62,7 @@ jobs: run: | spin test -- --timeout=600 --durations=10 - python64bit_openblas_winarm64: - name: arm64, LPARM64 OpenBLAS - runs-on: windows-11-arm - # To enable this job on a fork, comment out: - if: github.repository == 'numpy/numpy' - strategy: - fail-fast: false - matrix: - compiler-pyversion: - - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t-dev"] - - steps: - - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - with: - python-version: ${{ matrix.compiler-pyversion[1] }} - architecture: arm64 - - - name: Setup MSVC - if: matrix.compiler-pyversion[0] == 'MSVC' - uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 - with: - architecture: arm64 - - - name: Install build dependencies from PyPI - run: | - pip install -r requirements/build_requirements.txt - - - name: Install pkg-config - run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - - - name: Install Clang-cl - if: matrix.compiler-pyversion[0] == 'Clang-cl' - uses: ./.github/windows_arm64_steps - - - name: Install NumPy (MSVC) - if: matrix.compiler-pyversion[0] == 'MSVC' - run: | - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv - - - name: Install NumPy (Clang-cl) - if: matrix.compiler-pyversion[0] == 'Clang-cl' - run: | - "[binaries]","c = 'clang-cl'","cpp = 'clang-cl'","ar = 'llvm-lib'","c_ld = 'lld-link'","cpp_ld = 'lld-link'" | Out-File $PWD/clang-cl-arm64.ini -Encoding ascii - pip install -r requirements/ci_requirements.txt - spin build --with-scipy-openblas=32 -j2 -- --vsenv --native-file=$PWD/clang-cl-arm64.ini - - - name: Meson Log - shell: bash - if: ${{ failure() }} - run: | - cat build/meson-logs/meson-log.txt - - - name: Install test dependencies - run: | - python -m pip install -r requirements/test_requirements.txt - python -m pip install threadpoolctl - - - name: Run test suite - run: | - spin test -- --timeout=600 --durations=10 - - msvc_python_no_openblas: + msvc_python32bit_no_openblas: name: MSVC, ${{ matrix.architecture }} Python , no BLAS runs-on: ${{ matrix.os }} strategy: From c9cae4d6d5f7d550146cbd850d776f830cb9d088 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Sep 2025 17:08:13 +0000 Subject: [PATCH 0424/1718] MAINT: Bump github/codeql-action from 3.30.1 to 3.30.2 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.1 to 3.30.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f1f6e5f6af878fb37288ce1c627459e94dbf7d01...d3678e237b9c32a6c9bffb3315c335f976f3549f) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 363ad25f2e50..13619ca762c3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/init@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/autobuild@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1 + uses: github/codeql-action/analyze@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 98cab6b712f6..9eb76bdc0525 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v2.1.27 + uses: github/codeql-action/upload-sarif@d3678e237b9c32a6c9bffb3315c335f976f3549f # v2.1.27 with: sarif_file: results.sarif From 788889237029e2f3403d5b21b513be99dafa163e Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 9 Sep 2025 16:17:53 +0200 Subject: [PATCH 0425/1718] CI: sync wheel build cleanups back from `numpy-release` repo --- pyproject.toml | 43 ++++++++++------------------- tools/wheels/cibw_before_build.sh | 46 +++++++++++++++---------------- tools/wheels/cibw_test_command.sh | 28 ++++--------------- 3 files changed, 44 insertions(+), 73 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b678be83a486..5ffbd9ea0247 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,9 +182,8 @@ tracker = "https://github.com/numpy/numpy/issues" [tool.cibuildwheel] # Note: the below skip command doesn't do much currently, the platforms to -# build wheels for in CI are controlled in `.github/workflows/wheels.yml` and -# `tools/ci/cirrus_wheels.yml`. -build-frontend = "build" +# build wheels for in CI are controlled in `.github/workflows/wheels.yml`. +# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" before-test = "pip install -r {project}/requirements/test_requirements.txt" @@ -202,45 +201,33 @@ manylinux-aarch64-image = "manylinux_2_28" musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" -[tool.cibuildwheel.pyodide] -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" - -[tool.cibuildwheel.pyodide.config-settings] -build-dir = "build" -setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] - [tool.cibuildwheel.linux.environment] -# RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too +# RUNNER_OS is a GitHub Actions specific env var; define it here so it's +# defined when running cibuildwheel locally RUNNER_OS="Linux" # /project will be the $PWD equivalent inside the docker used to build the wheel PKG_CONFIG_PATH="/project/.openblas" -LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/project/.openblas/lib" - -[tool.cibuildwheel.macos] -# universal2 wheels are not supported (see gh-21233), use `delocate-fuse` if you need them -# note that universal2 wheels are not built, they're listed in the tool.cibuildwheel.skip -# section -# Not clear why the DYLD_LIBRARY_PATH is not passed through from the environment -repair-wheel-command = [ - "export DYLD_LIBRARY_PATH=$PWD/.openblas/lib", - "echo DYLD_LIBRARY_PATH $DYLD_LIBRARY_PATH", - "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}", -] [tool.cibuildwheel.windows] config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" -# This does not work, use CIBW_ENVIRONMENT_WINDOWS -environment = {PKG_CONFIG_PATH="./.openblas"} [[tool.cibuildwheel.overrides]] select = ["*-win32"] config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" +[tool.cibuildwheel.pyodide] +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] + + [tool.meson-python] meson = 'vendored-meson/meson/meson.py' diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index f5da8968258a..381c329a5372 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -13,40 +13,40 @@ elif [ -z $INSTALL_OPENBLAS ]; then export INSTALL_OPENBLAS=true fi -# Install Openblas from scipy-openblas64 +# Install OpenBLAS from scipy-openblas32|64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - # by default, use scipy-openblas64 + # By default, use scipy-openblas64 + # On 32-bit platforms and on win-arm64, use scipy-openblas32 OPENBLAS=openblas64 - # Possible values for RUNNER_ARCH in github are - # X86, X64, ARM, or ARM64 - # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() - # when wheel build is run outside github? - # On 32-bit platforms, use scipy_openblas32 - # On win-arm64 use scipy_openblas32 + # Possible values for RUNNER_ARCH in GitHub Actions are: X86, X64, ARM, or ARM64 if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then OPENBLAS=openblas32 elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then OPENBLAS=openblas32 fi - echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} - PKG_CONFIG_PATH=$PROJECT_DIR/.openblas - rm -rf $PKG_CONFIG_PATH - mkdir -p $PKG_CONFIG_PATH - python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc - # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build - # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will - # pull these into the wheel. Use python to avoid windows/posix problems - python < $pkgconf_path/scipy-openblas.pc + + # Copy scipy-openblas DLL's to a fixed location so we can point delvewheel + # at it in `repair_windows.sh` (needed only on Windows because of the lack + # of RPATH support). + if [[ $RUNNER_OS == "Windows" ]]; then + python < Date: Tue, 9 Sep 2025 14:21:46 -0400 Subject: [PATCH 0426/1718] TST: xfail test_kind::test_quad_precision on AIX/PPC (#29693) * TST: xfail test_kind::test_quad_precision on AIX/PPC * Change to use platform.system() instead --- numpy/f2py/tests/test_kind.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index ecf884fb4999..c219cc8bfd09 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -10,6 +10,7 @@ from . import util +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @@ -37,7 +38,7 @@ def test_real(self): i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" - @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ From ed275f33ce4b6a4d207e0226bc724816e9ae59ef Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 9 Sep 2025 12:32:15 -0600 Subject: [PATCH 0427/1718] MAINT: Update main after 2.3.3 release. - Forward port 2.3.3-notes.rst - Forward port 2.3.3-changelog.rst - Update release.rst --- doc/changelog/2.3.3-changelog.rst | 50 +++++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.3-notes.rst | 59 ++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 doc/changelog/2.3.3-changelog.rst create mode 100644 doc/source/release/2.3.3-notes.rst diff --git a/doc/changelog/2.3.3-changelog.rst b/doc/changelog/2.3.3-changelog.rst new file mode 100644 index 000000000000..0398b30072af --- /dev/null +++ b/doc/changelog/2.3.3-changelog.rst @@ -0,0 +1,50 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection + diff --git a/doc/source/release.rst b/doc/source/release.rst index 3af644a57562..72ddab818a77 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.3 2.3.2 2.3.1 2.3.0 diff --git a/doc/source/release/2.3.3-notes.rst b/doc/source/release/2.3.3-notes.rst new file mode 100644 index 000000000000..3c293c3db322 --- /dev/null +++ b/doc/source/release/2.3.3-notes.rst @@ -0,0 +1,59 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.3 Release Notes +========================= + +The NumPy 2.3.3 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. Note +that the 3.14.0 final is currently expected in Oct, 2025. This release is based +on 3.14.0rc2. + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aleksandr A. Voyt + +* Bernard Roesler + +* Charles Harris +* Hunter Hogan + +* Joren Hammudoglu +* Maanas Arora +* Matti Picus +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sanjay Kumar Sakamuri Kamalakar + +* Tobias Markus + +* Warren Weckesser +* Zebreus + + +Pull requests merged +==================== + +A total of 23 pull requests were merged for this release. + +* `#29440 `__: MAINT: Prepare 2.3.x for further development. +* `#29446 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29447 `__: BLD: allow targeting webassembly without emscripten +* `#29460 `__: MAINT: Backport write_release.py +* `#29473 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29500 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) (#29333) +* `#29501 `__: MAINT: Add .file entry to all .s SVML files +* `#29556 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29562 `__: BLD: update vendored Meson to 1.8.3 [wheel build] +* `#29563 `__: BUG: Fix metadata not roundtripping when pickling datetime (#29555) +* `#29587 `__: TST: update link and version for Intel SDE download +* `#29593 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29672 `__: MAINT: Update pythoncapi-compat from main. +* `#29673 `__: MAINT: Update cibuildwheel. +* `#29674 `__: MAINT: Fix typo in wheels.yml +* `#29683 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29684 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29685 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29687 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29689 `__: BLD: Add missing include +* `#29691 `__: BUG: use correct input dtype in flatiter assignment +* `#29700 `__: TYP: fix np.bool method declarations +* `#29701 `__: BUG: Correct ambiguous logic for s390x CPU feature detection From 83402f7a43363e8041b8f297fdd4f76a7fa9fea1 Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Wed, 10 Sep 2025 01:09:26 +0530 Subject: [PATCH 0428/1718] CI: Add native ``ppc64le`` CI job using GitHub Actions (#29212) --- .github/workflows/linux-ppc64le.yml | 52 +++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .github/workflows/linux-ppc64le.yml diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml new file mode 100644 index 000000000000..223da69a7b8d --- /dev/null +++ b/.github/workflows/linux-ppc64le.yml @@ -0,0 +1,52 @@ +name: Native ppc64le Linux Test + +on: + pull_request: + branches: + - main + - maintenance/** + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + native_ppc64le: + # This job runs only in the main NumPy repository. + # It requires a native ppc64le GHA runner, which is not available on forks. + # For more details, see: https://github.com/numpy/numpy/issues/29125 + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-24.04-ppc64le + name: "Native PPC64LE" + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ + build-essential libopenblas-dev liblapack-dev pkg-config + pip install --upgrade pip + pip install -r requirements/build_requirements.txt + pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin + echo "/home/runner/.local/bin" >> $GITHUB_PATH + + - name: Meson Build + run: | + spin build -- -Dallow-noblas=false + + - name: Meson Log + if: always() + run: cat build/meson-logs/meson-log.txt + + - name: Run Tests + run: | + spin test -- --timeout=60 --durations=10 \ No newline at end of file From dc3e1e776e68cdd9445237ce19b14379bd49b2e4 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Tue, 9 Sep 2025 21:51:20 -0700 Subject: [PATCH 0429/1718] BUG: enable x86-simd-sort to build on KNL with -mavx512f --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index c306ac581a59..fe2b5bf62275 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit c306ac581a59f89585d778254c4ed7197e64ba2d +Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e From 65343f756c29cadef77ad9ffb2180eca9dd8f7b8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 13:27:51 +0200 Subject: [PATCH 0430/1718] DOC: update `doc/BRANCH_WALKTHROUGH.rst` for release process changes `pavement.py` was recently removed, and the replacement no longer needs a manual change. The git version no longer depends on `.dev0` tags, so remove that note. However, keep adding those tags, because why not - it's useful to navigate to the start of the development for a release series. --- doc/BRANCH_WALKTHROUGH.rst | 39 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/doc/BRANCH_WALKTHROUGH.rst b/doc/BRANCH_WALKTHROUGH.rst index 5767fb6e6a10..3f9db71a0282 100644 --- a/doc/BRANCH_WALKTHROUGH.rst +++ b/doc/BRANCH_WALKTHROUGH.rst @@ -1,6 +1,6 @@ -This guide contains a walkthrough of branching NumPy 1.21.x on Linux. The -commands can be copied into the command line, but be sure to replace 1.21 and -1.22 by the correct versions. It is good practice to make ``.mailmap`` as +This guide contains a walkthrough of branching NumPy 2.3.x on Linux. The +commands can be copied into the command line, but be sure to replace 2.3 and +2.4 by the correct versions. It is good practice to make ``.mailmap`` as current as possible before making the branch, that may take several weeks. This should be read together with the @@ -12,14 +12,13 @@ Branching Make the branch --------------- -This is only needed when starting a new maintenance branch. Because -NumPy now depends on tags to determine the version, the start of a new -development cycle in the main branch needs an annotated tag. That is done +This is only needed when starting a new maintenance branch. The start of a new +development cycle in the main branch should get an annotated tag. That is done as follows:: $ git checkout main $ git pull upstream main - $ git commit --allow-empty -m'REL: Begin NumPy 1.22.0 development' + $ git commit --allow-empty -m'REL: Begin NumPy 2.4.0 development' $ git push upstream HEAD If the push fails because new PRs have been merged, do:: @@ -28,20 +27,20 @@ If the push fails because new PRs have been merged, do:: and repeat the push. Once the push succeeds, tag it:: - $ git tag -a -s v1.22.0.dev0 -m'Begin NumPy 1.22.0 development' - $ git push upstream v1.22.0.dev0 + $ git tag -a -s v2.4.0.dev0 -m'Begin NumPy 2.4.0 development' + $ git push upstream v2.4.0.dev0 then make the new branch and push it:: - $ git branch maintenance/1.21.x HEAD^ - $ git push upstream maintenance/1.21.x + $ git branch maintenance/2.3.x HEAD^ + $ git push upstream maintenance/2.3.x Prepare the main branch for further development ----------------------------------------------- -Make a PR branch to prepare main for further development:: +Make a PR branch to prepare ``main`` for further development:: - $ git checkout -b 'prepare-main-for-1.22.0-development' v1.22.0.dev0 + $ git checkout -b 'prepare-main-for-2.4.0-development' v2.4.0.dev0 Delete the release note fragments:: @@ -49,18 +48,12 @@ Delete the release note fragments:: Create the new release notes skeleton and add to index:: - $ cp doc/source/release/template.rst doc/source/release/1.22.0-notes.rst - $ gvim doc/source/release/1.22.0-notes.rst # put the correct version - $ git add doc/source/release/1.22.0-notes.rst + $ cp doc/source/release/template.rst doc/source/release/2.4.0-notes.rst + $ gvim doc/source/release/2.4.0-notes.rst # put the correct version + $ git add doc/source/release/2.4.0-notes.rst $ gvim doc/source/release.rst # add new notes to notes index $ git add doc/source/release.rst -Update ``pavement.py`` and update the ``RELEASE_NOTES`` variable to point to -the new notes:: - - $ gvim pavement.py - $ git add pavement.py - Update ``cversions.txt`` to add current release. There should be no new hash to worry about at this early point, just add a comment following previous practice:: @@ -71,7 +64,7 @@ practice:: Check your work, commit it, and push:: $ git status # check work - $ git commit -m'REL: Prepare main for NumPy 1.22.0 development' + $ git commit -m'REL: Prepare main for NumPy 2.4.0 development' $ git push origin HEAD Now make a pull request. From ed2eab6f408009cbf4282842711a376c435ccd77 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 14:24:49 +0200 Subject: [PATCH 0431/1718] DOC: update `doc/HOWTO_RELEASE.rst` for release process changes --- doc/HOWTO_RELEASE.rst | 188 ++++++++++++++++-------------------------- 1 file changed, 70 insertions(+), 118 deletions(-) diff --git a/doc/HOWTO_RELEASE.rst b/doc/HOWTO_RELEASE.rst index 53c3904703a4..d756a75a6bce 100644 --- a/doc/HOWTO_RELEASE.rst +++ b/doc/HOWTO_RELEASE.rst @@ -4,142 +4,113 @@ releases for NumPy. Current build and release info ============================== -Useful info can be found in the following locations: +Useful info can be found in `building-from-source` in the docs as well as in +these three files: -* **Source tree** - - - `INSTALL.rst `_ - - `pavement.py `_ - -* **NumPy docs** - - - `HOWTO_RELEASE.rst `_ - - `RELEASE_WALKTHROUGH.rst `_ - - `BRANCH_WALKTHROUGH.rst `_ +- `HOWTO_RELEASE.rst `_ +- `RELEASE_WALKTHROUGH.rst `_ +- `BRANCH_WALKTHROUGH.rst `_ Supported platforms and versions ================================ -:ref:`NEP 29 ` outlines which Python versions -are supported; For the first half of 2020, this will be Python >= 3.6. We test -NumPy against all these versions every time we merge code to main. Binary -installers may be available for a subset of these versions (see below). +:ref:`NEP 29 ` outlines which Python versions are supported *at a +minimum*. We usually decide to keep support for a given Python version slightly +longer than that minimum, to avoid giving other projects issues - this is at +the discretion of the release manager. -* **OS X** +* **macOS** - OS X versions >= 10.9 are supported, for Python version support see - :ref:`NEP 29 `. We build binary wheels for OSX that are compatible with - Python.org Python, system Python, homebrew and macports - see this - `OSX wheel building summary `_ - for details. + We aim to support the same set of macOS versions as are supported by + Python.org and `cibuildwheel`_ for any given Python version. + We build binary wheels for macOS that are compatible with common Python + installation methods, e.g., from python.org, ``python-build-standalone`` (the + ones ``uv`` installs), system Python, conda-forge, Homebrew and MacPorts. * **Windows** We build 32- and 64-bit wheels on Windows. Windows 7, 8 and 10 are supported. - We build NumPy using the `mingw-w64 toolchain`_, `cibuildwheels`_ and GitHub - actions. + We build NumPy using the most convenient compilers, which are (as of Aug + 2025) MSVC for x86/x86-64 and Clang-cl for arm64, `cibuildwheel`_ and GitHub + Actions. -.. _cibuildwheels: https://cibuildwheel.readthedocs.io/en/stable/ +.. _cibuildwheel: https://cibuildwheel.readthedocs.io/en/stable/ * **Linux** - We build and ship `manylinux_2_28 `_ - wheels for NumPy. Many Linux distributions include their own binary builds - of NumPy. + We build and ship ``manylinux`` and ``musllinux`` wheels for x86-64 and + aarch64 platforms on PyPI. Wheels for 32-bit platforms are not currently + provided. We aim to support the lowest non-EOL versions, and upgrade roughly + in sync with `cibuildwheel`_. See + `pypa/manylinux `__ and + `this distro compatibility table `__ + for more details. -* **BSD / Solaris** +* **BSD / Solaris / AIX** - No binaries are provided, but successful builds on Solaris and BSD have been - reported. + No binary wheels are provided on PyPI, however we expect building from source + on these platforms to work fine. -Tool chain +Toolchains ========== -We build all our wheels on cloud infrastructure - so this list of compilers is -for information and debugging builds locally. See the ``.travis.yml`` script -in the `numpy wheels`_ repo for an outdated source of the build recipes using -multibuild. - -.. _numpy wheels : https://github.com/MacPython/numpy-wheels - -Compilers ---------- -The same gcc version is used as the one with which Python itself is built on -each platform. At the moment this means: - -- OS X builds on travis currently use `clang`. It appears that binary wheels - for OSX >= 10.6 can be safely built from the travis-ci OSX 10.9 VMs - when building against the Python from the Python.org installers; -- Windows builds use the `mingw-w64 toolchain`_; -- Manylinux2014 wheels use the gcc provided on the Manylinux docker images. - -You will need Cython for building the binaries. Cython compiles the ``.pyx`` -files in the NumPy distribution to ``.c`` files. - -.. _mingw-w64 toolchain : https://mingwpy.github.io +For building wheels, we use the following toolchains: + +- Linux: we use the default compilers in the ``manylinux``/``musllinux`` Docker + images, which is usually a relatively recent GCC version. +- macOS: we use the Apple Clang compilers and XCode version installed on the + GitHub Actions runner image. +- Windows: for x86 and x86-64 we use the default MSVC and Visual Studio + toolchain installed on the relevant GitHub actions runner image. Note that in + the past it has sometimes been necessary to use an older toolchain to avoid + causing problems through the static ``libnpymath`` library for SciPy - please + inspect the `numpy/numpy-release `__ + code and CI logs in case the exact version numbers need to be determined. + +For building from source, minimum compiler versions are tracked in the top-level +``meson.build`` file. OpenBLAS -------- -All the wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. -The shared object (or DLL) is shipped with in the wheel, renamed to prevent name +Most wheels link to a version of OpenBLAS_ supplied via the openblas-libs_ repo. +The shared object (or DLL) is shipped within the wheel, renamed to prevent name collisions with other OpenBLAS shared objects that may exist in the filesystem. -.. _OpenBLAS: https://github.com/xianyi/OpenBLAS +.. _OpenBLAS: https://github.com/OpenMathLib/OpenBLAS .. _openblas-libs: https://github.com/MacPython/openblas-libs - -Building source archives and wheels ------------------------------------ -The NumPy wheels and sdist are now built using cibuildwheel with -github actions. - - Building docs ------------- -We are no longer building ``PDF`` files. All that will be needed is - -- virtualenv (pip). - -The other requirements will be filled automatically during the documentation -build process. - +We are no longer building ``pdf`` files. The requirements for building the +``html`` docs are no different than for regular development. See the README of +the `numpy/doc `__ repository and the step by +step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` for more details. Uploading to PyPI ----------------- -The only application needed for uploading is - -- twine (pip). - -You will also need a PyPI token, which is best kept on a keyring. See the -twine keyring_ documentation for how to do that. - -.. _keyring: https://twine.readthedocs.io/en/stable/#keyring-support - +Creating a release on PyPI and uploading wheels and sdist is automated in CI +and uses `PyPI's trusted publishing `__. +See the README in the `numpy/numpy-release `__ +repository and the step by step instructions in ``doc/RELEASE_WALKTHROUGH.rst`` +for more details. Generating author/PR lists -------------------------- You will need a personal access token ``_ -so that scripts can access the github NumPy repository. - -- gitpython (pip) -- pygithub (pip) +so that scripts can access the GitHub NumPy repository. With that token, the +author/PR changelog content can be generated by running ``spin changelog``. It +may require a few extra packages, like ``gitpython`` and ``pygithub``. What is released ================ -* **Wheels** - We currently support Python 3.10-3.13 on Windows, OSX, and Linux. - - * Windows: 32-bit and 64-bit wheels built using Github actions; - * OSX: x64_86 and arm64 OSX wheels built using Github actions; - * Linux: x64_86 and aarch64 Manylinux2014 wheels built using Github actions. - -* **Other** - Release notes and changelog +On PyPI we release wheels for a number of platforms (as discussed higher up), +and an sdist. -* **Source distribution** - We build source releases in the .tar.gz format. +On GitHub Releases we release the same sdist (because the source archives which +are autogenerated by GitHub itself aren't complete), as well as the release +notes and changelog. Release process @@ -147,30 +118,11 @@ Release process Agree on a release schedule --------------------------- -A typical release schedule is one beta, two release candidates and a final -release. It's best to discuss the timing on the mailing list first, in order -for people to get their commits in on time, get doc wiki edits merged, etc. -After a date is set, create a new maintenance/x.y.z branch, add new empty -release notes for the next version in the main branch and update the Trac -Milestones. - - -Make sure current branch builds a package correctly ---------------------------------------------------- -The CI builds wheels when a PR header begins with ``REL``. Your last -PR before releasing should be so marked and all the tests should pass. -You can also do:: - - git clean -fxdq - python setup.py bdist_wheel - python setup.py sdist - -For details of the build process itself, it is best to read the -Step-by-Step Directions below. - -.. note:: The following steps are repeated for the beta(s), release - candidates(s) and the final release. - +A typical release schedule for a feature release is two release candidates and +a final release. It's best to discuss the timing on the mailing list first, in +order for people to get their commits in on time. After a date is set, create a +new ``maintenance/x.y.z`` branch, add new empty release notes for the next version +in the main branch and update the Milestones on the issue tracker. Check deprecations ------------------ From 60f8c87482518e5facab0dee3499ce5456a8e3bd Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Sep 2025 15:44:44 +0200 Subject: [PATCH 0432/1718] DOC: update `doc/RELEASE_WALKTHROUGH.rst` for release process changes --- doc/RELEASE_WALKTHROUGH.rst | 228 +++++++++++++++--------------------- 1 file changed, 95 insertions(+), 133 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index a3787ae95ee5..82f8d2c9e9f0 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -1,8 +1,9 @@ -This is a walkthrough of the NumPy 2.1.0 release on Linux, modified for -building with GitHub Actions and cibuildwheels and uploading to the -`anaconda.org staging repository for NumPy `_. -The commands can be copied into the command line, but be sure to replace 2.1.0 -by the correct version. This should be read together with the +This is a walkthrough of the NumPy 2.4.0 release on Linux, which will be the +first feature release using the `numpy/numpy-release +`__ repository. + +The commands can be copied into the command line, but be sure to replace 2.4.0 +with the correct version. This should be read together with the :ref:`general release guide `. Facility preparation @@ -26,29 +27,24 @@ Prior to release Add/drop Python versions ------------------------ -When adding or dropping Python versions, two files need to be edited: - -- .github/workflows/wheels.yml # for github cibuildwheel -- pyproject.toml # for classifier and minimum version check. - +When adding or dropping Python versions, multiple config and CI files need to +be edited in addition to changing the minimum version in ``pyproject.toml``. Make these changes in an ordinary PR against main and backport if necessary. -Add ``[wheel build]`` at the end of the title line of the commit summary so -that wheel builds will be run to test the changes. We currently release wheels -for new Python versions after the first Python rc once manylinux and -cibuildwheel support it. +We currently release wheels for new Python versions after the first Python RC +once manylinux and cibuildwheel support that new Python version. Backport pull requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/2.1.x branch. +maintenance/2.4.x branch. -Update 2.1.0 milestones +Update 2.4.0 milestones ----------------------- -Look at the issues/prs with 2.1.0 milestones and either push them off to a +Look at the issues/prs with 2.4.0 milestones and either push them off to a later version, or maybe remove the milestone. You may need to add a milestone. @@ -63,14 +59,13 @@ Four documents usually need to be updated or created for the release PR: - The ``pyproject.toml`` file These changes should be made in an ordinary PR against the maintenance branch. -The commit heading should contain a ``[wheel build]`` directive to test if the -wheels build. Other small, miscellaneous fixes may be part of this PR. The -commit message might be something like:: +Other small, miscellaneous fixes may be part of this PR. The commit message +might be something like:: - REL: Prepare for the NumPy 2.1.0 release [wheel build] + REL: Prepare for the NumPy 2.4.0 release - - Create 2.1.0-changelog.rst. - - Update 2.1.0-notes.rst. + - Create 2.4.0-changelog.rst. + - Update 2.4.0-notes.rst. - Update .mailmap. - Update pyproject.toml @@ -97,12 +92,12 @@ Generate the changelog The changelog is generated using the changelog tool:: - $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst + $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be checked for non-standard contributor names. It is also a good idea to remove any links that may be present in the PR titles as they don't translate well to -markdown, replace them with monospaced text. The non-standard contributor names +Markdown, replace them with monospaced text. The non-standard contributor names should be fixed by updating the ``.mailmap`` file, which is a lot of work. It is best to make several trial runs before reaching this point and ping the malefactors using a GitHub issue to get the needed information. @@ -116,7 +111,7 @@ run ``spin notes``, which will incorporate the snippets into the ``doc/source/release/notes-towncrier.rst`` file and delete the snippets:: $ spin notes - $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.1.0-notes.rst + $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst Once the ``notes-towncrier`` contents has been incorporated into release note the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes @@ -142,8 +137,8 @@ isn't already present. Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/2.1.x - $ git pull upstream maintenance/2.1.x + $ git checkout maintenance/2.4.x + $ git pull upstream maintenance/2.4.x $ git submodule update $ git clean -xdfq @@ -154,100 +149,71 @@ Sanity check:: Tag the release and push the tag. This requires write permission for the numpy repository:: - $ git tag -a -s v2.1.0 -m"NumPy 2.1.0 release" - $ git push upstream v2.1.0 + $ git tag -a -s v2.4.0 -m"NumPy 2.4.0 release" + $ git push upstream v2.4.0 If you need to delete the tag due to error:: - $ git tag -d v2.1.0 - $ git push --delete upstream v2.1.0 - - -2. Build wheels ---------------- - -Tagging the build at the beginning of this process will trigger a wheel build -via cibuildwheel and upload wheels and an sdist to the staging repo. All wheels -are currently built on GitHub actions and take about 1 1/4 hours to build. - -If you wish to manually trigger a wheel build, you can do so: - -- On GitHub actions -> `Wheel builder`_ there is a "Run workflow" button, click - on it and choose the tag to build - -If some wheel builds fail for unrelated reasons, you can re-run them: - -- On GitHub actions select `Wheel builder`_ click on the task that contains - the build you want to re-run, it will have the tag as the branch. On the - upper right will be a re-run button, hit it and select "re-run failed" - -If some wheels fail to upload to anaconda, you can select those builds in the -`Wheel builder`_ and manually download the build artifact. This is a temporary -workaround, but sometimes the quickest way to get a release out. - -.. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files -.. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml - - -3. Download wheels ------------------- + $ git tag -d v2.4.0 + $ git push --delete upstream v2.4.0 -When the wheels have all been successfully built and staged, download them from the -Anaconda staging directory using the ``tools/download-wheels.py`` script:: - $ cd ../numpy - $ mkdir -p release/installers - $ python3 tools/download-wheels.py 2.1.0 - - -4. Generate the README files ----------------------------- - -This needs to be done after all installers are downloaded, but before the pavement -file is updated for continued development:: - - $ python write_release 2.1.0 +2. Build wheels and sdist +------------------------- +Create a ``maintenance/2.4.x`` branch in the ``numpy-release`` repository, +and open a PR changing the ``SOURCE_REF_TO_BUILD`` identifier at the top of +``.github/workflows/wheels.yml`` to ``v2.4.0``. That will do a full set of +wheel builds on the PR, if everything looks good merge the PR. -5. Upload to PyPI ------------------ +All wheels are currently built in that repository on GitHub Actions, they take +about 1 hour to build. -Upload to PyPI using ``twine``:: +If you wish to manually trigger a wheel build, you can do so: in your browser, +go to `numpy-release/actions/workflows/wheels.yml `__ +and click on the "Run workflow" button, then choose the tag to build. If some +wheel builds fail for unrelated reasons, you can re-run them as normal +in the GitHub Actions UI with "re-run failed". - $ cd ../numpy - $ twine upload release/installers/*.whl - $ twine upload release/installers/*.gz # Upload last. +Once you are ready to publish a release to PyPI, use that same "Run workflow" +button and choose ``pypi`` in the *environment* dropdown. All wheels and the +sdist will build and be ready to release to PyPI after manual inspection that +everything passed. E.g., the number of artifacts is correct, and the wheel +filenames and sizes look as expected. If desired, you can also download an +artifact for local unzipping and inspection. You will get an email notification +as well with a "Review pending deployments" link. Once you're ready, press the +button to start the uploads to PyPI, which will complete the release. -The source file should be uploaded last to avoid synchronization problems that -might occur if pip users access the files while this is in process, causing pip -to build from source rather than downloading a binary wheel. PyPI only allows a -single source distribution, here we have chosen the gz version. If the -uploading breaks because of network related reasons, you can try re-running the -commands, possibly after a fix. Twine will now handle the error generated by -PyPI when the same file is uploaded twice. +3. Upload files to GitHub Releases +---------------------------------- -6. Upload files to GitHub -------------------------- - -Go to ``_, there should be a ``v2.1.0 +Go to ``_, there should be a ``v2.4.0 tag``, click on it and hit the edit button for that tag and update the title to -'v2.1.0 (). There are two ways to add files, using an editable text -window and as binary uploads. Start by editing the ``release/README.md`` that -is translated from the rst version using pandoc. Things that will need fixing: -PR lines from the changelog, if included, are wrapped and need unwrapping, -links should be changed to monospaced text. Then copy the contents to the -clipboard and paste them into the text window. It may take several tries to get -it look right. Then - -- Upload ``release/installers/numpy-2.1.0.tar.gz`` as a binary file. +"v2.4.0 ()". There are two ways to add files, using an editable text +window and as binary uploads. + +Start by running ``spin notes 2.4.0`` and then edit the ``release/README.md`` +that is translated from the rst version using pandoc. Things that will need +fixing: PR lines from the changelog, if included, are wrapped and need +unwrapping, links should be changed to monospaced text. Then copy the contents +to the clipboard and paste them into the text window. It may take several tries +to get it look right. Then + +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI upload it to GitHub as + a binary file. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/2.1.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. -- Hit the ``{Publish,Update} release`` button at the bottom. +- Hit the ``Publish release`` button at the bottom. + +.. note:: + Please ensure that all 3 files are uploaded are present and the + release text is complete. Releases are configured to be immutable, so + mistakes can't (easily) be fixed anymore. -7. Upload documents to numpy.org (skip for prereleases) +4. Upload documents to numpy.org (skip for prereleases) ------------------------------------------------------- .. note:: You will need a GitHub personal access token to push the update. @@ -257,7 +223,7 @@ and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: $ git clean -xdfq - $ git co v2.1.0 + $ git co v2.4.0 $ rm -rf doc/build # want version to be current $ python -m spin docs merge-doc --build $ pushd doc/build/merge @@ -284,45 +250,41 @@ from ``numpy.org``:: Update the stable link and update:: - $ ln -sfn 2.1 stable + $ ln -sfn 2.4 stable $ ls -l # check the link Once everything seems satisfactory, update, commit and upload the changes:: - $ git commit -a -m"Add documentation for v2.1.0" + $ git commit -a -m"Add documentation for v2.4.0" $ git push git@github.com:numpy/doc $ popd -8. Reset the maintenance branch into a development state (skip for prereleases) +5. Reset the maintenance branch into a development state (skip for prereleases) ------------------------------------------------------------------------------- Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: - $ git checkout -b begin-2.1.1 maintenance/2.1.x - $ cp doc/source/release/template.rst doc/source/release/2.1.1-notes.rst - $ gvim doc/source/release/2.1.1-notes.rst - $ git add doc/source/release/2.1.1-notes.rst - -Add new release notes to the documentation release list and update the -``RELEASE_NOTES`` variable in ``pavement.py``:: + $ git checkout -b begin-2.4.1 maintenance/2.4.x + $ cp doc/source/release/template.rst doc/source/release/2.4.1-notes.rst + $ gvim doc/source/release/2.4.1-notes.rst + $ git add doc/source/release/2.4.1-notes.rst - $ gvim doc/source/release.rst pavement.py - -Update the ``version`` in ``pyproject.toml``:: +Add new release notes to the documentation release list. Then update the +``version`` in ``pyproject.toml``:: $ gvim pyproject.toml Commit the result:: - $ git commit -a -m"MAINT: Prepare 2.1.x for further development" + $ git commit -a -m"MAINT: Prepare 2.4.x for further development" $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. -9. Announce the release on numpy.org (skip for prereleases) +6. Announce the release on numpy.org (skip for prereleases) ----------------------------------------------------------- This assumes that you have forked ``_:: @@ -330,7 +292,7 @@ This assumes that you have forked ``_:: $ cd ../numpy.org $ git checkout main $ git pull upstream main - $ git checkout -b announce-numpy-2.1.0 + $ git checkout -b announce-numpy-2.4.0 $ gvim content/en/news.md - For all releases, go to the bottom of the page and add a one line link. Look @@ -342,35 +304,35 @@ This assumes that you have forked ``_:: commit and push:: - $ git commit -a -m"announce the NumPy 2.1.0 release" + $ git commit -a -m"announce the NumPy 2.4.0 release" $ git push origin HEAD Go to GitHub and make a PR. -10. Announce to mailing lists ------------------------------ +7. Announce to mailing lists +---------------------------- -The release should be announced on the numpy-discussion, scipy-devel, and +The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the release notes above. If you crosspost, make sure that python-announce-list is BCC so that replies will not be sent to that list. -11. Post-release update main (skip for prereleases) ---------------------------------------------------- +8. Post-release update main (skip for prereleases) +-------------------------------------------------- Checkout main and forward port the documentation changes. You may also want to update these notes if procedures have changed or improved:: - $ git checkout -b post-2.1.0-release-update main - $ git checkout maintenance/2.1.x doc/source/release/2.1.0-notes.rst - $ git checkout maintenance/2.1.x doc/changelog/2.1.0-changelog.rst - $ git checkout maintenance/2.1.x .mailmap # only if updated for release. + $ git checkout -b post-2.4.0-release-update main + $ git checkout maintenance/2.4.x doc/source/release/2.4.0-notes.rst + $ git checkout maintenance/2.4.x doc/changelog/2.4.0-changelog.rst + $ git checkout maintenance/2.4.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes $ git status # check status before commit - $ git commit -a -m"MAINT: Update main after 2.1.0 release." + $ git commit -a -m"MAINT: Update main after 2.4.0 release." $ git push origin HEAD Go to GitHub and make a PR. From 8b5500e5ee9ff31cd3960b0c4c72875017ff0bee Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 18:04:03 +0200 Subject: [PATCH 0433/1718] TYP: fix ``np.floating`` method declarations --- numpy/__init__.pyi | 164 ++++++++++++++++-- numpy/_typing/_callable.pyi | 55 +----- numpy/typing/tests/data/reveal/arithmetic.pyi | 38 ++-- numpy/typing/tests/data/reveal/mod.pyi | 15 +- 4 files changed, 172 insertions(+), 100 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..be21a68a4892 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -134,9 +134,6 @@ from numpy._typing import ( ) from numpy._typing._callable import ( - _FloatOp, - _FloatMod, - _FloatDivMod, _ComparisonOpLT, _ComparisonOpLE, _ComparisonOpGT, @@ -4753,22 +4750,151 @@ class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]) class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes def is_integer(self, /) -> builtins.bool: ... diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 96fa4000889f..a4d4f3452a12 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -8,69 +8,16 @@ See the `Mypy documentation`_ on protocols for more details. """ -from typing import Any, Protocol, TypeAlias, TypeVar, final, overload, type_check_only +from typing import Any, Protocol, TypeVar, final, overload, type_check_only import numpy as np -from numpy import complex128, complexfloating, float64, floating, integer -from . import NBitBase from ._array_like import NDArray -from ._nbit import _NBitInt from ._nested_sequence import _NestedSequence -_T = TypeVar("_T") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple: TypeAlias = tuple[_T, _T] - -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -@type_check_only -class _FloatOp(Protocol[_NBit1]): - @overload - def __call__(self, other: int, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -@type_check_only -class _FloatMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... - @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1] | floating[_NBit2]: ... - -class _FloatDivMod(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... - @overload - def __call__( - self, other: int, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... - @overload - def __call__( - self, other: float, / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... - @overload - def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... - @final @type_check_only class _SupportsLT(Protocol): diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 1c564df5de70..ac4114abadd4 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -486,7 +486,7 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c16 + f16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) @@ -499,12 +499,12 @@ assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) +assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) -assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) @@ -512,10 +512,10 @@ assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) -assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) @@ -540,7 +540,7 @@ assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.float64 | np.floating[_128Bit]) +assert_type(f8 + f16, np.floating) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) assert_type(f8 + f4, np.float64) @@ -551,10 +551,10 @@ assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_128Bit] | np.float64) +assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.float32 | np.float64) +assert_type(f4 + f8, np.floating) assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) @@ -562,26 +562,26 @@ assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) -assert_type(f4 + f8, np.float32 | np.float64) -assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) -assert_type(f4 + i4, np.float32) +assert_type(f4 + i4, np.floating) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complex64 | np.complex128) -assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f16 + f4, np.floating) assert_type(f8 + f4, np.float64) -assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) +assert_type(i8 + f4, np.floating) assert_type(f4 + f4, np.float32) -assert_type(i4 + f4, np.float32) +assert_type(i4 + f4, np.floating) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complex64 | np.complex128) -assert_type(f + f4, np.float64 | np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 338a9a1ff729..ef07dc0c8c8a 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -3,7 +3,6 @@ from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _64Bit f8: np.float64 i8: np.int64 @@ -113,7 +112,7 @@ assert_type(i8 % f8, np.float64) assert_type(i4 % i8, np.signedinteger) assert_type(i4 % f8, np.float64) assert_type(i4 % i4, np.int32) -assert_type(i4 % f4, np.float32) +assert_type(i4 % f4, np.floating) assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) @@ -122,9 +121,9 @@ assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) # workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) @@ -134,7 +133,7 @@ assert_type(f8 % i8, np.float64) assert_type(i8 % i4, np.signedinteger) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) -assert_type(f4 % i4, np.float32) +assert_type(f4 % i4, np.floating) assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) @@ -144,15 +143,15 @@ assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) # workarounds for https://github.com/microsoft/pyright/issues/9663 -assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) -assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.float32) +assert_type(i8 % f4, np.floating) assert_type(f4 % f4, np.float32) assert_type(f8 % AR_b, npt.NDArray[np.float64]) From 44c7433a5ae5f1091edb18b2f74bccece9347bea Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 18:52:31 +0200 Subject: [PATCH 0434/1718] TYP: fix ``timedelta64`` and ``datetime64`` method declarations --- numpy/__init__.pyi | 95 +++++++++++++++---- numpy/_typing/_callable.pyi | 87 ----------------- numpy/typing/tests/data/reveal/arithmetic.pyi | 2 +- 3 files changed, 77 insertions(+), 107 deletions(-) delete mode 100644 numpy/_typing/_callable.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index be21a68a4892..a19cd8475c10 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -133,17 +133,6 @@ from numpy._typing import ( _GUFunc_Nin2_Nout1, ) -from numpy._typing._callable import ( - _ComparisonOpLT, - _ComparisonOpLE, - _ComparisonOpGT, - _ComparisonOpGE, - _SupportsLT, - _SupportsLE, - _SupportsGT, - _SupportsGE, -) - # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( float96, @@ -1042,6 +1031,26 @@ class _FormerAttrsDict(TypedDict): ### Protocols (for internal use only) +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + @type_check_only class _SupportsFileMethods(SupportsFlush, Protocol): # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` @@ -5365,10 +5374,35 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... - __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property @@ -5465,10 +5499,33 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self, x: datetime64, /) -> timedelta64: ... - __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): @abstractmethod diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi deleted file mode 100644 index a4d4f3452a12..000000000000 --- a/numpy/_typing/_callable.pyi +++ /dev/null @@ -1,87 +0,0 @@ -""" -A module with various ``typing.Protocol`` subclasses that implement -the ``__call__`` magic method. - -See the `Mypy documentation`_ on protocols for more details. - -.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols - -""" - -from typing import Any, Protocol, TypeVar, final, overload, type_check_only - -import numpy as np - -from ._array_like import NDArray -from ._nested_sequence import _NestedSequence - -_T1_contra = TypeVar("_T1_contra", contravariant=True) -_T2_contra = TypeVar("_T2_contra", contravariant=True) - -@final -@type_check_only -class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsLE(Protocol): - def __le__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _SupportsGE(Protocol): - def __ge__(self, other: Any, /) -> Any: ... - -@final -@type_check_only -class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGE, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsLT, /) -> np.bool: ... - -@final -@type_check_only -class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): - @overload - def __call__(self, other: _T1_contra, /) -> np.bool: ... - @overload - def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... - @overload - def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index ac4114abadd4..491bce43fdae 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -3,7 +3,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit, _128Bit +from numpy._typing import _64Bit, _128Bit b: bool c: complex From 31a84e11810fb8b4b7fea3e512e8e23e8e9d5fca Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:01:02 +0200 Subject: [PATCH 0435/1718] TYP: fix ``ndarray.strides`` decorator order --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..21693f4fd662 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2162,8 +2162,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... - @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... def fill(self, value: Any, /) -> None: ... From dcb8abd45dab5c166ab7d9c5113e3d2f55ef5043 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Sep 2025 17:08:55 +0000 Subject: [PATCH 0436/1718] MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.2.2...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux-ppc64le.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 223da69a7b8d..c561c3be4611 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le name: "Native PPC64LE" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: recursive fetch-tags: true From ccbfe25034310d4d48598ae5b0d2e2f89c8e14bd Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:11:21 +0200 Subject: [PATCH 0437/1718] TYP: remove unused ``# type: ignore`` comments in ``__init__.pyi`` --- numpy/__init__.pyi | 58 +++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ed8fa9a93eda..f1194e0855ff 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2343,7 +2343,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -2355,7 +2355,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... @overload - def searchsorted( # type: ignore[misc] + def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like side: _SortSide = ..., @@ -2409,7 +2409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> _ArrayT: ... @overload - def take( # type: ignore[misc] + def take( self: NDArray[_ScalarT], indices: _IntLike_co, axis: SupportsIndex | None = ..., @@ -2417,7 +2417,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ScalarT: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., @@ -3045,7 +3045,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3087,7 +3087,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload @@ -3215,7 +3215,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload @@ -3294,9 +3294,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3305,9 +3305,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3316,9 +3316,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3327,9 +3327,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3338,9 +3338,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3349,9 +3349,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3360,9 +3360,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3371,9 +3371,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3382,9 +3382,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3393,9 +3393,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3651,7 +3651,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): ) -> Any: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _IntLike_co, axis: SupportsIndex | None = ..., @@ -3659,7 +3659,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> Self: ... @overload - def take( # type: ignore[misc] + def take( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., From 84a88828a34581857ce5fb7fb0844fa4604ff348 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:15:38 +0200 Subject: [PATCH 0438/1718] TYP: ignore ``deprecated`` mypy errors in ``__init__.pyi`` --- numpy/__init__.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f1194e0855ff..700798001863 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -16,7 +16,7 @@ from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes -from numpy._typing import ( +from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, @@ -431,7 +431,7 @@ from numpy.lib._arraypad_impl import ( pad, ) -from numpy.lib._arraysetops_impl import ( +from numpy.lib._arraysetops_impl import ( # type: ignore[deprecated] ediff1d, in1d, intersect1d, @@ -446,7 +446,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( +from numpy.lib._function_base_impl import ( # type: ignore[deprecated] select, piecewise, trim_zeros, @@ -549,7 +549,7 @@ from numpy.lib._polynomial_impl import ( polyfit, ) -from numpy.lib._shape_base_impl import ( +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] column_stack, row_stack, dstack, From 7017d097a6785876934e163b42ed6e95d5f24029 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 10 Sep 2025 19:18:57 +0200 Subject: [PATCH 0439/1718] TYP: ignore LSP errors for ``flatiter.__hash__ = None`` --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 700798001863..c04f12d86db7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1605,7 +1605,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property def base(self) -> _ArrayT_co: ... @property From b3a680080099158e48eea0e35d09bcfbfe6d0da8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Sep 2025 17:07:21 +0000 Subject: [PATCH 0440/1718] MAINT: Bump github/codeql-action from 3.30.2 to 3.30.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.2 to 3.30.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d3678e237b9c32a6c9bffb3315c335f976f3549f...192325c86100d080feab897ff886c34abd4c83a3) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 13619ca762c3..6ea330ea2637 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d3678e237b9c32a6c9bffb3315c335f976f3549f # v3.30.2 + uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9eb76bdc0525..5d8f1b91cbae 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d3678e237b9c32a6c9bffb3315c335f976f3549f # v2.1.27 + uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v2.1.27 with: sarif_file: results.sarif From 9f7642d6ac96800366cbb62dff11ef02c069fc08 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 10:40:41 -0700 Subject: [PATCH 0441/1718] Mark some functions [[maybe_unused]] --- numpy/_core/src/npysort/x86-simd-sort | 2 +- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index fe2b5bf62275..8f80b71c1326 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e +Subproject commit 8f80b71c1326b76ce72faf9d86b399e2b81f5bd8 diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 063e713c5256..11509e79c68b 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -17,11 +17,7 @@ namespace np { namespace qsort_simd { */ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { -#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); -#else - avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); -#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) @@ -39,11 +35,7 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ */ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { -#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); -#else - avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); -#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { From cb207265fd03b32ffa5e004d00cb2358d6504113 Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 12:13:35 -0700 Subject: [PATCH 0442/1718] Revert "Mark some functions [[maybe_unused]]" This reverts commit 9f7642d6ac96800366cbb62dff11ef02c069fc08. --- numpy/_core/src/npysort/x86-simd-sort | 2 +- numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 8f80b71c1326..fe2b5bf62275 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 8f80b71c1326b76ce72faf9d86b399e2b81f5bd8 +Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 11509e79c68b..063e713c5256 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -17,7 +17,11 @@ namespace np { namespace qsort_simd { */ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { +#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); +#else + avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); +#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) @@ -35,7 +39,11 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ */ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { +#if defined(NPY_HAVE_AVX512_SPR) x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); +#else + avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); +#endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { From f9583306b74a11b09fc7cb016b5cb9429692a43a Mon Sep 17 00:00:00 2001 From: Raghuveer Date: Thu, 11 Sep 2025 12:14:28 -0700 Subject: [PATCH 0443/1718] x86-simd-sort: Mark some functions [[maybe_unused]] --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index fe2b5bf62275..6a7a01da4b0d 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit fe2b5bf62275ba6173c53b6c5b747c362b641d5e +Subproject commit 6a7a01da4b0dfde108aa626a2364c954e2c50fe1 From b84208a68567d38f8605dfe396f25ba5a72a2039 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 13:23:44 -0600 Subject: [PATCH 0444/1718] TST: Simplify fixture that is equivalent to a function call --- numpy/_core/tests/string_testing.py | 10 ++++++++++ numpy/_core/tests/test_multithreading.py | 14 +++++--------- numpy/_core/tests/test_stringdtype.py | 8 +++++--- numpy/conftest.py | 8 -------- 4 files changed, 20 insertions(+), 20 deletions(-) create mode 100644 numpy/_core/tests/string_testing.py diff --git a/numpy/_core/tests/string_testing.py b/numpy/_core/tests/string_testing.py new file mode 100644 index 000000000000..9022b33bc41a --- /dev/null +++ b/numpy/_core/tests/string_testing.py @@ -0,0 +1,10 @@ +import string +import numpy as np + +def random_string_list(): + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b81c989ae5c7..17199ec75f6e 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,11 +1,11 @@ import concurrent.futures -import string import threading import pytest import numpy as np from numpy._core import _rational_tests +from numpy._core.tests.string_testing import random_string_list from numpy.testing import IS_64BIT, IS_WASM from numpy.testing._private.utils import run_threaded @@ -218,16 +218,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation( - dtype, random_string_list): +def test_stringdtype_multithreaded_access_and_mutation(dtype): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = rng.choice(chars, size=100 * 10, replace=True) - random_string_list = ret.view("U100") + string_list = random_string_list() def func(arr): rnd = rng.random() @@ -247,10 +243,10 @@ def func(arr): else: np.multiply(arr, np.int64(2), out=arr) else: - arr[:] = random_string_list + arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(random_string_list, dtype=dtype) + arr = np.array(string_list, dtype=dtype) futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 5d2364bf8cfc..817c7d5846b6 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -9,6 +9,7 @@ import numpy as np from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA +from numpy._core.tests.string_testing import random_string_list from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal @@ -266,12 +267,13 @@ def test_bytes_casts(self, dtype, strings): sarr.astype("S20") -def test_additional_unicode_cast(random_string_list, dtype): - arr = np.array(random_string_list, dtype=dtype) +def test_additional_unicode_cast(dtype): + string_list = random_string_list() + arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) # tests the casts via the comparison promoter - assert_array_equal(arr, arr.astype(random_string_list.dtype)) + assert_array_equal(arr, arr.astype(string_list.dtype)) def test_insert_scalar(dtype, string_list): diff --git a/numpy/conftest.py b/numpy/conftest.py index f411588748e0..5df9ac73baf4 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -230,14 +230,6 @@ def warnings_errors_and_rng(test=None): ] -@pytest.fixture -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - @pytest.fixture(params=[True, False]) def coerce(request): return request.param From c416b8ee4219b481eadd40b76c9c5915c4c5c849 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 13:27:21 -0600 Subject: [PATCH 0445/1718] MAINT: delete unused variables in unary logical dispatch --- numpy/_core/src/umath/loops_logical.dispatch.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index a6f174218bd1..c05584f467aa 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -108,7 +108,6 @@ struct UnaryLogicalTraits; template<> struct UnaryLogicalTraits { - static constexpr bool is_not = true; static constexpr auto scalar_op = std::equal_to{}; #if NPY_HWY @@ -121,7 +120,6 @@ struct UnaryLogicalTraits { template<> struct UnaryLogicalTraits { - static constexpr bool is_not = false; static constexpr auto scalar_op = std::not_equal_to{}; #if NPY_HWY From 965a8ccb9b14e74822cb2f21c846743c4d7cef33 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:07:47 -0600 Subject: [PATCH 0446/1718] TST: move fixtures used by stringdtype tests to test_stringdtype.py --- numpy/_core/tests/_natype.py | 8 ---- numpy/_core/tests/string_testing.py | 10 ----- numpy/_core/tests/test_multithreading.py | 8 ++-- numpy/_core/tests/test_stringdtype.py | 48 ++++++++++++++++++++++-- numpy/conftest.py | 21 ----------- 5 files changed, 48 insertions(+), 47 deletions(-) delete mode 100644 numpy/_core/tests/string_testing.py diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 1c2175b35933..767d4d3832ab 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -195,11 +195,3 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): pd_NA = NAType() - - -def get_stringdtype_dtype(na_object, coerce=True): - # explicit is check for pd_NA because != with pd_NA returns pd_NA - if na_object is pd_NA or na_object != "unset": - return np.dtypes.StringDType(na_object=na_object, coerce=coerce) - else: - return np.dtypes.StringDType(coerce=coerce) diff --git a/numpy/_core/tests/string_testing.py b/numpy/_core/tests/string_testing.py deleted file mode 100644 index 9022b33bc41a..000000000000 --- a/numpy/_core/tests/string_testing.py +++ /dev/null @@ -1,10 +0,0 @@ -import string -import numpy as np - -def random_string_list(): - chars = list(string.ascii_letters + string.digits) - chars = np.array(chars, dtype="U1") - ret = np.random.choice(chars, size=100 * 10, replace=True) - return ret.view("U100") - - diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 17199ec75f6e..6563abde8ff4 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -5,7 +5,7 @@ import numpy as np from numpy._core import _rational_tests -from numpy._core.tests.string_testing import random_string_list +from numpy._core.tests.test_stringdtype import random_unicode_string_list from numpy.testing import IS_64BIT, IS_WASM from numpy.testing._private.utils import run_threaded @@ -218,12 +218,12 @@ def func(arr): assert arr.dtype is dt -def test_stringdtype_multithreaded_access_and_mutation(dtype): +def test_stringdtype_multithreaded_access_and_mutation(): # this test uses an RNG and may crash or cause deadlocks if there is a # threading bug rng = np.random.default_rng(0x4D3D3D3) - string_list = random_string_list() + string_list = random_unicode_string_list() def func(arr): rnd = rng.random() @@ -246,7 +246,7 @@ def func(arr): arr[:] = string_list with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - arr = np.array(string_list, dtype=dtype) + arr = np.array(string_list, dtype="T") futures = [tpe.submit(func, arr) for _ in range(500)] for f in futures: diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 817c7d5846b6..196931f0ef10 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -3,25 +3,63 @@ import os import pickle import sys +import string import tempfile import pytest import numpy as np -from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype, pd_NA -from numpy._core.tests.string_testing import random_string_list +from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType from numpy.testing import IS_PYPY, assert_array_equal +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + +@pytest.fixture(params=[True, False]) +def coerce(request): + """Coerce input to strings or raise an error for non-string input""" + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + """Possible values for the missing data sentinel""" + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" + return get_dtype(na_object, coerce) + @pytest.fixture def string_list(): + """Mix of short and long strings, some with unicode, some without""" return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] -# second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" return request.param @@ -30,11 +68,13 @@ def coerce2(request): ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], ) def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" return request.param @pytest.fixture() def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object2 is pd_NA or na_object2 != "unset": return StringDType(na_object=na_object2, coerce=coerce2) @@ -268,7 +308,7 @@ def test_bytes_casts(self, dtype, strings): def test_additional_unicode_cast(dtype): - string_list = random_string_list() + string_list = random_unicode_string_list() arr = np.array(string_list, dtype=dtype) # test that this short-circuits correctly assert_array_equal(arr, arr.astype(arr.dtype)) diff --git a/numpy/conftest.py b/numpy/conftest.py index 5df9ac73baf4..71ece01cc3b5 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,7 +2,6 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os -import string import sys import tempfile import warnings @@ -12,9 +11,7 @@ import pytest import numpy -import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA from numpy.testing._private.utils import NOGIL_BUILD try: @@ -228,21 +225,3 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] - - -@pytest.fixture(params=[True, False]) -def coerce(request): - return request.param - - -@pytest.fixture( - params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], - ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], -) -def na_object(request): - return request.param - - -@pytest.fixture() -def dtype(na_object, coerce): - return get_stringdtype_dtype(na_object, coerce) From 7445147c273deaff9d82746ab413c05b90b10552 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:13:57 -0600 Subject: [PATCH 0447/1718] TST: simplify NAType code adapted from Pandas --- numpy/_core/tests/_natype.py | 55 +----------------------------------- 1 file changed, 1 insertion(+), 54 deletions(-) diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index 767d4d3832ab..539dfd2b36e1 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -74,8 +74,7 @@ def __bool__(self): raise TypeError("boolean value of NA is ambiguous") def __hash__(self): - exponent = 31 if is_32bit else 61 - return 2**exponent - 1 + return 2**61 - 1 def __reduce__(self): return "pd_NA" @@ -114,33 +113,6 @@ def __reduce__(self): __abs__ = _create_unary_propagating_op("__abs__") __invert__ = _create_unary_propagating_op("__invert__") - # pow has special - def __pow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 0: - # returning positive is correct for +/- 0. - return type(other)(1) - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 0, other.dtype.type(1), pd_NA) - - return NotImplemented - - def __rpow__(self, other): - if other is pd_NA: - return pd_NA - elif isinstance(other, (numbers.Number, np.bool)): - if other == 1: - return other - else: - return pd_NA - elif util.is_array(other): - return np.where(other == 1, other, pd_NA) - return NotImplemented - # Logical ops using Kleene logic def __and__(self, other): @@ -168,30 +140,5 @@ def __xor__(self, other): __rxor__ = __xor__ - __array_priority__ = 1000 - _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - types = self._HANDLED_TYPES + (NAType,) - for x in inputs: - if not isinstance(x, types): - return NotImplemented - - if method != "__call__": - raise ValueError(f"ufunc method '{method}' not supported for NA") - result = maybe_dispatch_ufunc_to_dunder_op( - self, ufunc, method, *inputs, **kwargs - ) - if result is NotImplemented: - # For a NumPy ufunc that's not a binop, like np.logaddexp - index = next(i for i, x in enumerate(inputs) if x is pd_NA) - result = np.broadcast_arrays(*inputs)[index] - if result.ndim == 0: - result = result.item() - if ufunc.nout > 1: - result = (pd_NA,) * ufunc.nout - - return result - pd_NA = NAType() From bb914390981a524fa2485c33d56e2b69d28ceae8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 11 Sep 2025 15:14:57 -0600 Subject: [PATCH 0448/1718] MAINT: run linter --- numpy/_core/tests/test_stringdtype.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 196931f0ef10..7a253c981c4d 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -2,8 +2,8 @@ import itertools import os import pickle -import sys import string +import sys import tempfile import pytest From ef9fcc952905ba0c7636295ff413ee118bf5a0ee Mon Sep 17 00:00:00 2001 From: kibitzing Date: Fri, 12 Sep 2025 12:06:13 +0900 Subject: [PATCH 0449/1718] BUG: remove object dtype check from max_depth validation in PyArray_FromAny_int --- numpy/_core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 38da6f314848..5bcf1cdf7ba3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1593,7 +1593,7 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, npy_free_coercion_cache(cache); goto cleanup; } - if (ndim > max_depth && (in_DType == NULL || in_DType->type_num != NPY_OBJECT)) { + if (ndim > max_depth) { PyErr_SetString(PyExc_ValueError, "object too deep for desired array"); npy_free_coercion_cache(cache); From eef7de6705144bbd271c4afb2455478433a97734 Mon Sep 17 00:00:00 2001 From: kibitzing Date: Fri, 12 Sep 2025 12:12:56 +0900 Subject: [PATCH 0450/1718] TST: add test for "too deep" error when ndmax < ndim --- numpy/_core/tests/test_multiarray.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2d8587ad5ac1..943e9642ec3c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1354,6 +1354,12 @@ def test_ndmax_greather_than_NPY_MAXDIMS(self): with pytest.raises(ValueError, match="ndmax must be in the range"): np.array(data, ndmax=65) + def test_ndmax_less_than_ndim(self): + # np.array input bypasses recursive inference, allowing ndim > ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + class TestStructured: def test_subarray_field_access(self): From d82354a9cdb941163cc0320c913e66288d8d4720 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:09:00 +0200 Subject: [PATCH 0451/1718] MAINT: bump `mypy` to `1.18.1` --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index dd7fa1a83e5b..61eed13bfd67 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.17.1 + - mypy=1.18.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 44f73a844591..95396afaf5a0 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.17.1; platform_python_implementation != "PyPy" +mypy==1.18.1; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 1ccd4805b8502ee52cc3c94fc9567b0cc7418909 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:12:50 +0200 Subject: [PATCH 0452/1718] TYP: fix mypy errors --- numpy/typing/tests/data/reveal/array_constructors.pyi | 8 ++++---- numpy/typing/tests/data/reveal/testing.pyi | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index e95e85093f6a..e86d96c884c0 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -227,19 +227,19 @@ assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 1533cd27955b..34fbc5feeb41 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -173,7 +173,7 @@ assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] def func4(a: int, b: str) -> bool: ... From eb737242ead791065d400f124d60ad1d67bdcacb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Sep 2025 02:19:32 +0200 Subject: [PATCH 0453/1718] STY: obey `ruff`-sama --- numpy/typing/tests/data/reveal/array_constructors.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index e86d96c884c0..2d8b5fa3fd17 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -235,7 +235,7 @@ assert_type(np.vstack([C, C]), npt.NDArray[Any]) assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) From 822665c9c3633e97fa599311b9b4dcc50bae2527 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 12 May 2025 14:38:03 +0300 Subject: [PATCH 0454/1718] ENH: Modulate dispatched x86 CPU features **IMPORTANT NOTE**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. This can be changed to `cpu-baseline=none` during build time to support older CPUs, though manual SIMD optimizations for pre-2009 processors are no longer supported. This patch reorganizes CPU build options by replacing individual x86 features with microarchitecture levels. This change aligns with the Google Highway requirements and common Linux distribution practices. This patch: - Removes all individual x86 features and replaces them with three microarchitecture levels (`X86_V2`, `X86_V3`, `X86_V4`) commonly used by Linux distributions - Raises the baseline to microarchitecture level 2 (replacing `SSE3`) since all known x86 CPUs since 2009 support it. This improves performance and reduces binary size - Updates documentation to to reflect these changes and to fit the current meson build system. - Corrects the behavior of the `-` operator, which now excludes successor features that imply the excluded feature - Adds redirection via meson for removed feature names to avoid breaking builds - Removes compiler compatibility workarounds, so features like AVX512 without full mask operations will be considered unsupported rather than providing fallbacks Detailed CPU features changes: - Removes individual features (`SSE`, `SSE2`, `SSE3`, `SSSE3`, `SSE4_1`, `SSE4_2`, `POPCNT`) which are now part of the new group `X86_V2` - Removes AMD legacy features (`XOP`, `FMA4`) - Removes Xeon Phi support (`AVX512_KNL`, `AVX512_KNM`) which Intel has discontinued - Removes individual features (`AVX`, `AVX2`, `FMA3`, `F16C`) which are now part of the new group `X86_V3` - Removes individual features `AVX512F`, `AVX512CD` as a result of dropping Xeon Phi support - Renames group `AVX512_SKX` to `x86_v4` to align with microarchitecture level naming - Removes groups `AVX512_CLX` and `AVX512_CNL` (features available via `AVX512_ICL`) - Updates `AVX512_ICL` to include features (`VAES`, `GFNI`, `VPCLMULQDQ`) for alignment with Highway New Feature Group Hierarchy: ``` Name | Implies | Includes --------------|-------------|----------------------------------------------------------- X86_V2 | | SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 POPCNT CX16 LAHF X86_V3 | X86_V2 | AVX AVX2 FMA3 BMI BMI2 LZCNT F16C MOVBE X86_V4 | X86_V3 | AVX512F AVX512CD AVX512VL AVX512BW AVX512DQ AVX512_ICL | X86_V4 | AVX512VBMI AVX512VBMI2 AVX512VNNI AVX512BITALG | | AVX512VPOPCNTDQ AVX512IFMA VAES GFNI VPCLMULQDQ AVX512_SPR | AVX512_ICL | AVX512FP16 ``` These groups correspond to CPU generations: - `X86_V2`: x86-64-v2 microarchitectures (CPUs since 2009) - `X86_V3`: x86-64-v3 microarchitectures (CPUs since 2015) - `X86_V4`: x86-64-v4 microarchitectures (AVX-512 capable CPUs) - `AVX512_ICL`: Intel Ice Lake and similar CPUs - `AVX512_SPR`: Intel Sapphire Rapids and newer CPUs Note: On 32-bit x86, `cx16` is excluded from `X86_V2`. --- .github/workflows/linux_simd.yml | 6 +- doc/release/upcoming_changes/28896.change.rst | 56 ++ doc/source/reference/simd/build-options.rst | 602 ++++++++++-------- doc/source/reference/simd/gen_features.py | 196 ------ .../simd/generated_tables/compilers-diff.inc | 35 - .../simd/generated_tables/cpu_features.inc | 109 ---- doc/source/reference/simd/log_example.txt | 141 ++-- meson.options | 7 +- meson_cpu/meson.build | 50 +- meson_cpu/x86/meson.build | 262 ++------ meson_cpu/x86/test_x86_v2.c | 69 ++ meson_cpu/x86/test_x86_v3.c | 66 ++ meson_cpu/x86/test_x86_v4.c | 88 +++ numpy/_core/meson.build | 40 +- numpy/_core/src/common/npy_cpu_features.c | 169 +++-- numpy/_core/src/common/npy_cpu_features.h | 21 +- numpy/_core/tests/test_cpu_dispatcher.py | 2 +- numpy/_core/tests/test_cpu_features.py | 41 +- 18 files changed, 977 insertions(+), 983 deletions(-) create mode 100644 doc/release/upcoming_changes/28896.change.rst delete mode 100644 doc/source/reference/simd/gen_features.py delete mode 100644 doc/source/reference/simd/generated_tables/compilers-diff.inc delete mode 100644 doc/source/reference/simd/generated_tables/cpu_features.inc create mode 100644 meson_cpu/x86/test_x86_v2.c create mode 100644 meson_cpu/x86/test_x86_v3.c create mode 100644 meson_cpu/x86/test_x86_v4.c diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 5bd1eab7f797..c35093d63a14 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -162,12 +162,12 @@ jobs: ] - [ "without avx512", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C,AVX2,FMA3", + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", "3.11" ] - [ "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", + "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v3", "3.11" ] @@ -212,7 +212,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=X86_V4 -Dtest-simd='BASELINE,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() diff --git a/doc/release/upcoming_changes/28896.change.rst b/doc/release/upcoming_changes/28896.change.rst new file mode 100644 index 000000000000..47538b7b22b2 --- /dev/null +++ b/doc/release/upcoming_changes/28896.change.rst @@ -0,0 +1,56 @@ +Modulate dispatched x86 CPU features +------------------------------------ + +**IMPORTANT**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. +This can be changed to none during build time to support older CPUs, +though SIMD optimizations for pre-2009 processors are no longer maintained. + +NumPy has reorganized x86 CPU features into microarchitecture-based groups instead of individual features, +aligning with Linux distribution standards and Google Highway requirements. + +Key changes: +* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, ``X86_V3``, and ``X86_V4`` +* Raised the baseline to ``X86_V2`` +* Improved ``-`` operator behavior to properly exclude successor features that imply the excluded feature +* Added meson redirections for removed feature names to maintain backward compatibility +* Removed compiler compatibility workarounds for partial feature support (e.g., AVX512 without mask operations) +* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi support + +New Feature Group Hierarchy: + +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs + +.. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +Documentation has been updated with details on using these new feature groups with the current meson build system. diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 1f105e27077d..4bb925b05532 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,227 +1,387 @@ ***************** -CPU build options +CPU Build Options ***************** -Description ------------ +Overview +-------- -The following options are mainly used to change the default behavior of optimizations -that target certain CPU features: +NumPy provides configuration options to optimize performance based on CPU capabilities. +These options allow you to specify which CPU features to support, balancing performance, compatibility, and binary size. +This document explains how to use these options effectively across various CPU architectures. -- ``cpu-baseline``: minimal set of required CPU features. - Default value is ``min`` which provides the minimum CPU features that can - safely run on a wide range of platforms within the processor family. +Key Configuration Options +------------------------- - .. note:: +NumPy uses several build options to control CPU optimizations: - During the runtime, NumPy modules will fail to load if any of specified features - are not supported by the target CPU (raises Python runtime error). +- ``cpu-baseline``: The minimum set of CPU features required to run the compiled NumPy. + + * Default: ``min`` (provides compatibility across a wide range of platforms) + * If your target CPU doesn't support all specified baseline features, NumPy will fail to load with a Python runtime error - ``cpu-baseline-detect``: controls detection of CPU baseline based on compiler flags. Default value is ``auto`` that enables detection if ``-march=`` or a similar compiler flag is used. The other possible values are ``enabled`` and ``disabled`` to respective enable or disable it unconditionally. -- ``cpu-dispatch``: dispatched set of additional CPU features. - Default value is ``max -xop -fma4`` which enables all CPU - features, except for AMD legacy features (in case of X86). +- ``cpu-dispatch``: Additional CPU features for which optimized code paths will be generated. + + * Default: ``max`` (enables all available optimizations) + * At runtime, NumPy will automatically select the fastest available code path based on your CPU's capabilities - .. note:: +- ``disable-optimization``: Completely disables all CPU optimizations. + + * Default: ``false`` (optimizations are enabled) + * When set to ``true``, disables all CPU optimized code including dispatch, SIMD, and loop unrolling + * Useful for debugging, testing, or in environments where optimization causes issues - During the runtime, NumPy modules will skip any specified features - that are not available in the target CPU. +These options are specified at build time via meson-python arguments:: -These options are accessible at build time by passing setup arguments to meson-python -via the build frontend (e.g., ``pip`` or ``build``). -They accept a set of :ref:`CPU features ` -or groups of features that gather several features or -:ref:`special options ` that -perform a series of procedures. + pip install . -Csetup-args=-Dcpu-baseline="min" -Csetup-args=-Dcpu-dispatch="max" + # or through spin + spin build -- -Dcpu-baseline="min" -Dcpu-dispatch="max" -To customize CPU/build options:: +``cpu-baseline`` and ``cpu-dispatch`` can be set to specific :ref:`CPU groups, features`, or :ref:`special options ` +that perform specific actions. The following sections describe these options in detail. - pip install . -Csetup-args=-Dcpu-baseline="avx2 fma3" -Csetup-args=-Dcpu-dispatch="max" +Common Usage Scenarios +---------------------- -Quick start ------------ +Building for Local Use Only +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In general, the default settings tend to not impose certain CPU features that -may not be available on some older processors. Raising the ceiling of the -baseline features will often improve performance and may also reduce -binary size. +When building for your machine only and not planning to distribute:: + python -m build --wheel -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" -The following are the most common scenarios that may require changing -the default settings: +This automatically detects and uses all CPU features available on your machine. +.. note:: + A fatal error will be raised if ``native`` isn't supported by the host platform. -I am building NumPy for my local use -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Excluding Specific Features +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -And I do not intend to export the build to other users or target a -different CPU than what the host has. +You may want to exclude certain CPU features from the dispatched features:: -Set ``native`` for baseline, or manually specify the CPU features in case of option -``native`` isn't supported by your platform:: + # For x86-64: exclude all AVX-512 features + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -X86_V4" - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" + # For ARM64: exclude SVE + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -SVE" -Building NumPy with extra CPU features isn't necessary for this case, -since all supported features are already defined within the baseline features:: +.. note:: + Excluding a feature will also exclude any successor features that are + implied by the excluded feature. For example, excluding ``X86_V4`` will + exclude ``AVX512_ICL`` and ``AVX512_SPR`` as well. - python -m build --wheel -Csetup-args=-Dcpu-baseline="native" \ - -Csetup-args=-Dcpu-dispatch="none" +Targeting Older CPUs +~~~~~~~~~~~~~~~~~~~~ -.. note:: +On ``x86-64``, by default the baseline is set to ``min`` which maps to ``X86_V2``. +This unsuitable for older CPUs (before 2009) or old virtual machines. +To address this, set the baseline to ``none``:: - A fatal error will be raised if ``native`` isn't supported by the host platform. + python -m build --wheel -Csetup-args=-Dcpu-baseline="none" -I do not want to support the old processors of the x86 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This will create a build that is compatible with all x86 CPUs, but +without any manual optimizations or SIMD code paths for the baseline. +The build will rely only on dispatched code paths for optimization. -Since most of the CPUs nowadays support at least ``AVX``, ``F16C`` features, you can use:: +Targeting Newer CPUs +~~~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx f16c" +Raising the baseline improves performance for two main reasons: + +1. Dispatched kernels don't cover all code paths +2. A higher baseline leads to smaller binary size as the compiler won't generate code paths for excluded dispatched features + +For CPUs from 2015 and newer, setting the baseline to ``X86_V3`` may be suitable:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V3" + +.. _opt-supported-features: + +Supported CPU Features By Architecture +-------------------------------------- + +NumPy supports optimized code paths for multiple CPU architectures. Below are the supported feature groups for each architecture. +The name of the feature group can be used in the build options ``cpu-baseline`` and ``cpu-dispatch``. + +X86 +~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs .. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +On IBM/POWER big-endian +~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On IBM/POWER little-endian +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VSX`` + - ``VSX2`` + * - ``VSX2`` + - ``VSX`` + * - ``VSX3`` + - ``VSX`` ``VSX2`` + * - ``VSX4`` + - ``VSX`` ``VSX2`` ``VSX3`` + +On ARMv7/A32 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - + * - ``NEON_FP16`` + - ``NEON`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On ARMv8/A64 +~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``NEON`` + - ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_FP16`` + - ``NEON`` ``NEON_VFPV4`` ``ASIMD`` + * - ``NEON_VFPV4`` + - ``NEON`` ``NEON_FP16`` ``ASIMD`` + * - ``ASIMD`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` + * - ``ASIMDHP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDDP`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - ``ASIMDFHM`` + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` + +On IBM/ZSYSTEM(S390X) +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``VX`` + - + * - ``VXE`` + - ``VX`` + * - ``VXE2`` + - ``VX`` ``VXE`` - ``cpu-baseline`` force combine all implied features, so there's no need - to add SSE features. +.. _opt-special-options: +Special Options +--------------- -I'm facing the same case above but with ppc64 architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Beyond specific feature names, you can use these special values: -Then raise the ceiling of the baseline features to Power8:: +``NONE`` +~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline="vsx2" +Enables no features (equivalent to an empty string). -Having issues with AVX512 features? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``NATIVE`` +~~~~~~~~~~ -You may have some reservations about including of ``AVX512`` or -any other CPU feature and you want to exclude from the dispatched features:: +Enables all features supported by the host CPU. - python -m build --wheel -Csetup-args=-Dcpu-dispatch="max -avx512f -avx512cd \ - -avx512_knl -avx512_knm -avx512_skx -avx512_clx -avx512_cnl -avx512_icl -avx512_spr" +``DETECT`` +~~~~~~~~~~ -.. _opt-supported-features: +Detects the features enabled by the compiler. This option is appended by default +to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in +the environment variable ``CFLAGS``. -Supported features ------------------- +``MIN`` +~~~~~~~ -The names of the features can express one feature or a group of features, -as shown in the following tables supported depend on the lowest interest: +Enables the minimum CPU features for each architecture: -.. note:: +.. list-table:: + :header-rows: 1 + :align: left - The following features may not be supported by all compilers, - also some compilers may produce different set of implied features - when it comes to features like ``AVX512``, ``AVX2``, and ``FMA3``. - See :ref:`opt-platform-differences` for more details. + * - For Arch + - Implies + * - x86 (32-bit) + - ``X86_V2`` + * - x86-64 + - ``X86_V2`` + * - IBM/POWER (big-endian) + - ``NONE`` + * - IBM/POWER (little-endian) + - ``VSX`` ``VSX2`` + * - ARMv7/ARMHF + - ``NONE`` + * - ARMv8/AArch64 + - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` + * - IBM/ZSYSTEM(S390X) + - ``NONE`` -.. include:: generated_tables/cpu_features.inc -.. _opt-special-options: +``MAX`` +~~~~~~~ -Special options ---------------- +Enables all features supported by the compiler and platform. -- ``NONE``: enable no features. +Operator Operators (``-``/``+``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``NATIVE``: Enables all CPU features that supported by the host CPU, - this operation is based on the compiler flags (``-march=native``, ``-xHost``, ``/QxHost``) +Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: -- ``MIN``: Enables the minimum CPU features that can safely run on a wide range of platforms: +- Adding a feature (``+``) includes all implied features +- Removing a feature (``-``) excludes all successor features that imply the removed feature - .. table:: - :align: left +<<<<<<< HEAD +- ``cpu-baseline`` will be treated as "native" if compiler native flag + ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable + ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: +======= +Examples:: +>>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) - ====================================== ======================================= - For Arch Implies - ====================================== ======================================= - x86 (32-bit mode) ``SSE`` ``SSE2`` - x86_64 ``SSE`` ``SSE2`` ``SSE3`` - IBM/POWER (big-endian mode) ``NONE`` - IBM/POWER (little-endian mode) ``VSX`` ``VSX2`` - ARMHF ``NONE`` - ARM64 A.K. AARCH64 ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMD`` - IBM/ZSYSTEM(S390X) ``NONE`` - ====================================== ======================================= + python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" -- ``MAX``: Enables all supported CPU features by the compiler and platform. +Usage And Behaviors +------------------- -- ``Operators-/+``: remove or add features, useful with options ``MAX``, ``MIN`` and ``NATIVE``. +Case Insensitivity +~~~~~~~~~~~~~~~~~~ -Behaviors ---------- +CPU features and options are case-insensitive:: -- CPU features and other options are case-insensitive, for example:: + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_v4" - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 avx2 FMA3" +Mixing Features across Architectures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- The order of the requested optimizations doesn't matter:: +You can mix features from different architectures:: - python -m build --wheel -Csetup-args=-Dcpu-dispatch="SSE41 AVX2 FMA3" - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-dispatch="FMA3 AVX2 SSE41" + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V4 VSX4 SVE" -- Either commas or spaces or '+' can be used as a separator, - for example:: +Order Independence +~~~~~~~~~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2 avx512f" - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,avx512f - # or - python -m build --wheel -Csetup-args=-Dcpu-dispatch="avx2+avx512f" +The order of specified features doesn't matter:: - all works but arguments should be enclosed in quotes or escaped - by backslash if any spaces are used. + python -m build --wheel -Csetup-args=-Dcpu-dispatch="SVE X86_V4 x86_v3" -- ``cpu-baseline`` combines all implied CPU features, for example:: +Separators +~~~~~~~~~~ - python -m build --wheel -Csetup-args=-Dcpu-baseline=sse42 - # equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +You can use spaces or commas as separators:: -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: + # All of these are equivalent + python -m build --wheel -Csetup-args=-Dcpu-dispatch="X86_V2 X86_V4" + python -m build --wheel -Csetup-args=-Dcpu-dispatch=X86_V2,X86_V4 + +Feature Combination +~~~~~~~~~~~~~~~~~~~ + +Features specified in options are automatically combined with all implied features:: + + python -m build --wheel -Csetup-args=-Dcpu-baseline=X86_V4 - export CFLAGS="-march=native" - pip install . - # is equivalent to - pip install . -Csetup-args=-Dcpu-baseline=native +Equivalent to:: -- ``cpu-baseline`` escapes any specified features that aren't supported - by the target platform or compiler rather than raising fatal errors. + python -m build --wheel -Csetup-args=-Dcpu-baseline="X86_V2 X86_V3 X86_V4" - .. note:: +Baseline Overlapping +~~~~~~~~~~~~~~~~~~~~ - Since ``cpu-baseline`` combines all implied features, the maximum - supported of implied features will be enabled rather than escape all of them. - For example:: +Features specified in ``cpu-baseline`` will be excluded from the ``cpu-dispatch`` features, +along with their implied features, but without excluding successor features that imply them. - # Requesting `AVX2,FMA3` but the compiler only support **SSE** features - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - # is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" +For instance, if you specify ``cpu-baseline="X86_V4"``, it will exclude ``X86_V4`` and its +implied features ``X86_V2`` and ``X86_V3`` from the ``cpu-dispatch`` features. -- ``cpu-dispatch`` does not combine any of implied CPU features, - so you must add them unless you want to disable one or all of them:: +Compile-time Detection +~~~~~~~~~~~~~~~~~~~~~~ - # Only dispatches AVX2 and FMA3 - python -m build --wheel -Csetup-args=-Dcpu-dispatch=avx2,fma3 - # Dispatches AVX and SSE features - python -m build --wheel -Csetup-args=-Dcpu-dispatch=ssse3,sse41,sse42,avx,avx2,fma3 +Specifying features to ``cpu-dispatch`` or ``cpu-baseline`` doesn't explicitly enable them. +Features are detected at compile time, and the maximum available features based on your +specified options will be enabled according to toolchain and platform support. -- ``cpu-dispatch`` escapes any specified baseline features and also escapes - any features not supported by the target platform or compiler without raising - fatal errors. +This detection occurs by testing feature availability in the compiler through compile-time +source files containing common intrinsics for the specified features. If both the compiler +and assembler support the feature, it will be enabled. + +For example, if you specify ``cpu-dispatch="AVX512_ICL"`` but your compiler doesn't support it, +the feature will be excluded from the build. However, any implied features will still be +enabled if they're supported. -Eventually, you should always check the final report through the build log -to verify the enabled features. See :ref:`opt-build-report` for more details. .. _opt-platform-differences: @@ -256,43 +416,6 @@ For example:: Please take a deep look at :ref:`opt-supported-features`, in order to determine the features that imply one another. -**Compilation compatibility** - -Some compilers don't provide independent support for all CPU features. For instance -**Intel**'s compiler doesn't provide separated flags for ``AVX2`` and ``FMA3``, -it makes sense since all Intel CPUs that comes with ``AVX2`` also support ``FMA3``, -but this approach is incompatible with other **x86** CPUs from **AMD** or **VIA**. - -For example:: - - # Specify AVX2 will force enables FMA3 on Intel compilers - python -m build --wheel -Csetup-args=-Dcpu-baseline=avx2 - # which is equivalent to - python -m build --wheel -Csetup-args=-Dcpu-baseline="avx2 fma3" - - -The following tables only show the differences imposed by some compilers from the -general context that been shown in the :ref:`opt-supported-features` tables: - -.. note:: - - Features names with strikeout represent the unsupported CPU features. - -.. raw:: html - - - -.. role:: enabled - :class: enabled-feature - -.. role:: disabled - :class: disabled-feature - -.. include:: generated_tables/compilers-diff.inc - .. _opt-build-report: Build report @@ -305,7 +428,7 @@ expected CPU features by the compiler. So we strongly recommend checking the final report log, to be aware of what kind of CPU features are enabled and what are not. -You can find the final report of CPU optimizations at the end of the build log, +You can find the final report of CPU optimizations by tracing meson build log, and here is how it looks on x86_64/gcc: .. raw:: html @@ -315,94 +438,63 @@ and here is how it looks on x86_64/gcc: .. literalinclude:: log_example.txt :language: bash -There is a separate report for each of ``build_ext`` and ``build_clib`` -that includes several sections, and each section has several values, representing the following: - -**Platform**: - -- :enabled:`Architecture`: The architecture name of target CPU. It should be one of - ``x86``, ``x64``, ``ppc64``, ``ppc64le``, ``armhf``, ``aarch64``, ``s390x`` or ``unknown``. - -- :enabled:`Compiler`: The compiler name. It should be one of - gcc, clang, msvc, icc, iccw or unix-like. - -**CPU baseline**: - -- :enabled:`Requested`: The specific features and options to ``cpu-baseline`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Flags`: The compiler flags that were used to all NumPy C/C++ sources - during the compilation except for temporary sources that have been used for generating - the binary objects of dispatched features. -- :enabled:`Extra checks`: list of internal checks that activate certain functionality - or intrinsics related to the enabled features, useful for debugging when it comes - to developing SIMD kernels. - -**CPU dispatch**: - -- :enabled:`Requested`: The specific features and options to ``cpu-dispatch`` as-is. -- :enabled:`Enabled`: The final set of enabled CPU features. -- :enabled:`Generated`: At the beginning of the next row of this property, - the features for which optimizations have been generated are shown in the - form of several sections with similar properties explained as follows: - - - :enabled:`One or multiple dispatched feature`: The implied CPU features. - - :enabled:`Flags`: The compiler flags that been used for these features. - - :enabled:`Extra checks`: Similar to the baseline but for these dispatched features. - - :enabled:`Detect`: Set of CPU features that need be detected in runtime in order to - execute the generated optimizations. - - The lines that come after the above property and end with a ':' on a separate line, - represent the paths of c/c++ sources that define the generated optimizations. .. _runtime-simd-dispatch: -Runtime dispatch +Runtime Dispatch ---------------- + Importing NumPy triggers a scan of the available CPU features from the set -of dispatchable features. This can be further restricted by setting the +of dispatchable features. You can restrict this scan by setting the environment variable ``NPY_DISABLE_CPU_FEATURES`` to a comma-, tab-, or -space-separated list of features to disable. This will raise an error if -parsing fails or if the feature was not enabled. For instance, on ``x86_64`` -this will disable ``AVX2`` and ``FMA3``:: +space-separated list of features to disable. + +For instance, on ``x86_64`` this will disable ``X86_V4``:: - NPY_DISABLE_CPU_FEATURES="AVX2,FMA3" + NPY_DISABLE_CPU_FEATURES="X86_V4" -If the feature is not available, a warning will be emitted. +This will raise an error if parsing fails or if the feature was not enabled through the ``cpu-dispatch`` build option. +If the feature is supported by the build but not available on the current CPU, a warning will be emitted instead. -Tracking dispatched functions +Tracking Dispatched Functions ----------------------------- -Discovering which CPU targets are enabled for different optimized functions is achievable -through the Python function ``numpy.lib.introspect.opt_func_info``. -This function offers the flexibility of applying filters using two optional arguments: -one for refining function names and the other for specifying data types in the signatures. + +You can discover which CPU targets are enabled for different optimized functions using +the Python function ``numpy.lib.introspect.opt_func_info``. + +This function offers two optional arguments for filtering results: + +1. ``func_name`` - For refining function names +2. ``signature`` - For specifying data types in the signatures For example:: >> func_info = numpy.lib.introspect.opt_func_info(func_name='add|abs', signature='float64|complex64') >> print(json.dumps(func_info, indent=2)) { - "absolute": { - "dd": { - "current": "SSE41", - "available": "SSE41 baseline(SSE SSE2 SSE3)" - }, - "Ff": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "Dd": { - "current": "FMA3__AVX2", - "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } - }, - "add": { - "ddd": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - }, - "FFF": { - "current": "FMA3__AVX2", - "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" - } + "absolute": { + "dd": { + "current": "baseline(X86_V2)", + "available": "baseline(X86_V2)" + }, + "Ff": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "Dd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } + }, + "add": { + "ddd": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + }, + "FFF": { + "current": "X86_V3", + "available": "X86_V3 baseline(X86_V2)" + } } } diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py deleted file mode 100644 index 3394f67f23ef..000000000000 --- a/doc/source/reference/simd/gen_features.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Generate CPU features tables from CCompilerOpt -""" -from os import path - -from numpy.distutils.ccompiler_opt import CCompilerOpt - - -class FakeCCompilerOpt(CCompilerOpt): - # disable caching no need for it - conf_nocache = True - - def __init__(self, arch, cc, *args, **kwargs): - self.fake_info = (arch, cc, '') - CCompilerOpt.__init__(self, None, **kwargs) - - def dist_compile(self, sources, flags, **kwargs): - return sources - - def dist_info(self): - return self.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - # avoid printing - pass - - def feature_test(self, name, force_flags=None, macros=[]): - # To speed up - return True - -class Features: - def __init__(self, arch, cc): - self.copt = FakeCCompilerOpt(arch, cc, cpu_baseline="max") - - def names(self): - return self.copt.cpu_baseline_names() - - def serialize(self, features_names): - result = [] - for f in self.copt.feature_sorted(features_names): - gather = self.copt.feature_supported.get(f, {}).get("group", []) - implies = self.copt.feature_sorted(self.copt.feature_implies(f)) - result.append((f, implies, gather)) - return result - - def table(self, **kwargs): - return self.gen_table(self.serialize(self.names()), **kwargs) - - def table_diff(self, vs, **kwargs): - fnames = set(self.names()) - fnames_vs = set(vs.names()) - common = fnames.intersection(fnames_vs) - extra = fnames.difference(fnames_vs) - notavl = fnames_vs.difference(fnames) - iextra = {} - inotavl = {} - idiff = set() - for f in common: - implies = self.copt.feature_implies(f) - implies_vs = vs.copt.feature_implies(f) - e = implies.difference(implies_vs) - i = implies_vs.difference(implies) - if not i and not e: - continue - if e: - iextra[f] = e - if i: - inotavl[f] = e - idiff.add(f) - - def fbold(f): - if f in extra: - return f':enabled:`{f}`' - if f in notavl: - return f':disabled:`{f}`' - return f - - def fbold_implies(f, i): - if i in iextra.get(f, {}): - return f':enabled:`{i}`' - if f in notavl or i in inotavl.get(f, {}): - return f':disabled:`{i}`' - return i - - diff_all = self.serialize(idiff.union(extra)) - diff_all += vs.serialize(notavl) - content = self.gen_table( - diff_all, fstyle=fbold, fstyle_implies=fbold_implies, **kwargs - ) - return content - - def gen_table(self, serialized_features, fstyle=None, fstyle_implies=None, - **kwargs): - - if fstyle is None: - fstyle = lambda ft: f'``{ft}``' - if fstyle_implies is None: - fstyle_implies = lambda origin, ft: fstyle(ft) - - rows = [] - have_gather = False - for f, implies, gather in serialized_features: - if gather: - have_gather = True - name = fstyle(f) - implies = ' '.join([fstyle_implies(f, i) for i in implies]) - gather = ' '.join([fstyle_implies(f, i) for i in gather]) - rows.append((name, implies, gather)) - if not rows: - return '' - fields = ["Name", "Implies", "Gathers"] - if not have_gather: - del fields[2] - rows = [(name, implies) for name, implies, _ in rows] - return self.gen_rst_table(fields, rows, **kwargs) - - def gen_rst_table(self, field_names, rows, tab_size=4): - assert not rows or len(field_names) == len(rows[0]) - rows.append(field_names) - fld_len = len(field_names) - cls_len = [max(len(c[i]) for c in rows) for i in range(fld_len)] - del rows[-1] - cformat = ' '.join('{:<%d}' % i for i in cls_len) - border = cformat.format(*['=' * i for i in cls_len]) - - rows = [cformat.format(*row) for row in rows] - # header - rows = [border, cformat.format(*field_names), border] + rows - # footer - rows += [border] - # add left margin - rows = [(' ' * tab_size) + r for r in rows] - return '\n'.join(rows) - -def wrapper_section(title, content, tab_size=4): - tab = ' ' * tab_size - if content: - return ( - f"{title}\n{'~' * len(title)}" - f"\n.. table::\n{tab}:align: left\n\n" - f"{content}\n\n" - ) - return '' - -def wrapper_tab(title, table, tab_size=4): - tab = ' ' * tab_size - if table: - ('\n' + tab).join(( - '.. tab:: ' + title, - tab + '.. table::', - tab + 'align: left', - table + '\n\n' - )) - return '' - - -if __name__ == '__main__': - - pretty_names = { - "PPC64": "IBM/POWER big-endian", - "PPC64LE": "IBM/POWER little-endian", - "S390X": "IBM/ZSYSTEM(S390X)", - "ARMHF": "ARMv7/A32", - "AARCH64": "ARMv8/A64", - "ICC": "Intel Compiler", - # "ICCW": "Intel Compiler msvc-like", - "MSVC": "Microsoft Visual C/C++" - } - gen_path = path.join( - path.dirname(path.realpath(__file__)), "generated_tables" - ) - with open(path.join(gen_path, 'cpu_features.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch in ( - ("x86", "PPC64", "PPC64LE", "ARMHF", "AARCH64", "S390X") - ): - title = "On " + pretty_names.get(arch, arch) - table = Features(arch, 'gcc').table() - fd.write(wrapper_section(title, table)) - - with open(path.join(gen_path, 'compilers-diff.inc'), 'w') as fd: - fd.write(f'.. generated via {__file__}\n\n') - for arch, cc_names in ( - ("x86", ("clang", "ICC", "MSVC")), - ("PPC64", ("clang",)), - ("PPC64LE", ("clang",)), - ("ARMHF", ("clang",)), - ("AARCH64", ("clang",)), - ("S390X", ("clang",)) - ): - arch_pname = pretty_names.get(arch, arch) - for cc in cc_names: - title = f"On {arch_pname}::{pretty_names.get(cc, cc)}" - table = Features(arch, cc).table_diff(Features(arch, "gcc")) - fd.write(wrapper_section(title, table)) diff --git a/doc/source/reference/simd/generated_tables/compilers-diff.inc b/doc/source/reference/simd/generated_tables/compilers-diff.inc deleted file mode 100644 index d5a87da3c617..000000000000 --- a/doc/source/reference/simd/generated_tables/compilers-diff.inc +++ /dev/null @@ -1,35 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86::Intel Compiler -~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` - :disabled:`XOP` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`FMA4` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ====================== - -On x86::Microsoft Visual C/C++ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - Name Implies Gathers - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - FMA3 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`AVX2` - AVX2 SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C :enabled:`FMA3` - AVX512F SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 :enabled:`AVX512CD` :enabled:`AVX512_SKX` - AVX512CD SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F :enabled:`AVX512_SKX` - :disabled:`AVX512_KNL` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512ER` :disabled:`AVX512PF` - :disabled:`AVX512_KNM` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_KNL` :disabled:`AVX5124FMAPS` :disabled:`AVX5124VNNIW` :disabled:`AVX512VPOPCNTDQ` - :disabled:`AVX512_SPR` :disabled:`SSE` :disabled:`SSE2` :disabled:`SSE3` :disabled:`SSSE3` :disabled:`SSE41` :disabled:`POPCNT` :disabled:`SSE42` :disabled:`AVX` :disabled:`F16C` :disabled:`FMA3` :disabled:`AVX2` :disabled:`AVX512F` :disabled:`AVX512CD` :disabled:`AVX512_SKX` :disabled:`AVX512_CLX` :disabled:`AVX512_CNL` :disabled:`AVX512_ICL` :disabled:`AVX512FP16` - ====================== ================================================================================================================================================================================================================================================================================================================================== ============================================================================= - diff --git a/doc/source/reference/simd/generated_tables/cpu_features.inc b/doc/source/reference/simd/generated_tables/cpu_features.inc deleted file mode 100644 index 603370e21545..000000000000 --- a/doc/source/reference/simd/generated_tables/cpu_features.inc +++ /dev/null @@ -1,109 +0,0 @@ -.. generated via /numpy/numpy/./doc/source/reference/simd/gen_features.py - -On x86 -~~~~~~ -.. table:: - :align: left - - ============== ========================================================================================================================================================================================== ===================================================== - Name Implies Gathers - ============== ========================================================================================================================================================================================== ===================================================== - ``SSE`` ``SSE2`` - ``SSE2`` ``SSE`` - ``SSE3`` ``SSE`` ``SSE2`` - ``SSSE3`` ``SSE`` ``SSE2`` ``SSE3`` - ``SSE41`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` - ``POPCNT`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` - ``SSE42`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` - ``AVX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` - ``XOP`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA4`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``F16C`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` - ``FMA3`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX2`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` - ``AVX512F`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` - ``AVX512CD`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` - ``AVX512_KNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512ER`` ``AVX512PF`` - ``AVX512_KNM`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_KNL`` ``AVX5124FMAPS`` ``AVX5124VNNIW`` ``AVX512VPOPCNTDQ`` - ``AVX512_SKX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - ``AVX512_CLX`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512VNNI`` - ``AVX512_CNL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512IFMA`` ``AVX512VBMI`` - ``AVX512_ICL`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512VBMI2`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` - ``AVX512_SPR`` ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE41`` ``POPCNT`` ``SSE42`` ``AVX`` ``F16C`` ``FMA3`` ``AVX2`` ``AVX512F`` ``AVX512CD`` ``AVX512_SKX`` ``AVX512_CLX`` ``AVX512_CNL`` ``AVX512_ICL`` ``AVX512FP16`` - ============== ========================================================================================================================================================================================== ===================================================== - -On IBM/POWER big-endian -~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On IBM/POWER little-endian -~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ========================= - Name Implies - ======== ========================= - ``VSX`` ``VSX2`` - ``VSX2`` ``VSX`` - ``VSX3`` ``VSX`` ``VSX2`` - ``VSX4`` ``VSX`` ``VSX2`` ``VSX3`` - ======== ========================= - -On ARMv7/A32 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` - ``NEON_FP16`` ``NEON`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On ARMv8/A64 -~~~~~~~~~~~~ -.. table:: - :align: left - - ============== =========================================================== - Name Implies - ============== =========================================================== - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_FP16`` ``NEON`` ``NEON_VFPV4`` ``ASIMD`` - ``NEON_VFPV4`` ``NEON`` ``NEON_FP16`` ``ASIMD`` - ``ASIMD`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` - ``ASIMDHP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDDP`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` - ``ASIMDFHM`` ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` ``ASIMDHP`` - ============== =========================================================== - -On IBM/ZSYSTEM(S390X) -~~~~~~~~~~~~~~~~~~~~~ -.. table:: - :align: left - - ======== ============== - Name Implies - ======== ============== - ``VX`` - ``VXE`` ``VX`` - ``VXE2`` ``VX`` ``VXE`` - ======== ============== - diff --git a/doc/source/reference/simd/log_example.txt b/doc/source/reference/simd/log_example.txt index 79c5c6c253ca..c71306d42aae 100644 --- a/doc/source/reference/simd/log_example.txt +++ b/doc/source/reference/simd/log_example.txt @@ -1,79 +1,64 @@ -########### EXT COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc +Test features "X86_V2" : Supported +Test features "X86_V3" : Supported +Test features "X86_V4" : Supported +Test features "AVX512_ICL" : Supported +Test features "AVX512_SPR" : Supported +Configuring npy_cpu_dispatch_config.h using configuration +Message: +CPU Optimization Options + baseline: + Requested : min + Enabled : X86_V2 + dispatch: + Requested : max + Enabled : X86_V3 X86_V4 AVX512_ICL AVX512_SPR -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : - : - SSE41 : SSE SSE2 SSE3 SSSE3 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - SSE42 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 - Extra checks: none - Detect : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : - AVX2 : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mavx2 - Extra checks: none - Detect : AVX F16C AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : numpy/_core/src/umath/_umath_tests.dispatch.c - : - (FMA3 AVX2) : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 - Extra checks: none - Detect : AVX F16C FMA3 AVX2 - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512F : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f - Extra checks: AVX512F_REDUCE - Detect : AVX512F - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithm_fp.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_trigonometric.dispatch.c - : - AVX512_SKX : SSE SSE2 SSE3 SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD - Flags : -msse -msse2 -msse3 -mssse3 -msse4.1 -mpopcnt -msse4.2 -mavx -mf16c -mfma -mavx2 -mavx512f -mavx512cd -mavx512vl -mavx512bw -mavx512dq - Extra checks: AVX512BW_MASK AVX512DQ_MASK - Detect : AVX512_SKX - : build/src.linux-x86_64-3.9/numpy/_core/src/_simd/_simd.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_arithmetic.dispatch.c - : build/src.linux-x86_64-3.9/numpy/_core/src/umath/loops_exponent_log.dispatch.c -CCompilerOpt.cache_flush[804] : write cache to path -> /home/seiko/work/repos/numpy/build/temp.linux-x86_64-3.9/ccompiler_opt_cache_ext.py - -########### CLIB COMPILER OPTIMIZATION ########### -Platform : - Architecture: x64 - Compiler : gcc - -CPU baseline : - Requested : 'min' - Enabled : SSE SSE2 SSE3 - Flags : -msse -msse2 -msse3 - Extra checks: none - -CPU dispatch : - Requested : 'max -xop -fma4' - Enabled : SSSE3 SSE41 POPCNT SSE42 AVX F16C FMA3 AVX2 AVX512F AVX512CD AVX512_KNL AVX512_KNM AVX512_SKX AVX512_CLX AVX512_CNL AVX512_ICL - Generated : none +Generating multi-targets for "_umath_tests.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "argfunc.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "x86_simd_argsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort.dispatch.h" + Enabled targets: X86_V4, X86_V3 +Generating multi-targets for "x86_simd_qsort_16bit.dispatch.h" + Enabled targets: AVX512_SPR, AVX512_ICL +Generating multi-targets for "highway_qsort.dispatch.h" + Enabled targets: +Generating multi-targets for "highway_qsort_16bit.dispatch.h" + Enabled targets: +Generating multi-targets for "loops_arithm_fp.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_arithmetic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_comparison.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_exponent_log.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_hyperbolic.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_logical.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_minmax.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_modulo.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_trigonometric.dispatch.h" + Enabled targets: X86_V4, X86_V3, baseline +Generating multi-targets for "loops_umath_fp.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary.dispatch.h" + Enabled targets: X86_V4, baseline +Generating multi-targets for "loops_unary_fp.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_fp_le.dispatch.h" + Enabled targets: baseline +Generating multi-targets for "loops_unary_complex.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_autovec.dispatch.h" + Enabled targets: X86_V3, baseline +Generating multi-targets for "loops_half.dispatch.h" + Enabled targets: AVX512_SPR, X86_V4, baseline +WARNING: Project targets '>=1.5.2' but uses feature deprecated since '1.3.0': Source file src/umath/svml/linux/avx512/svml_z0_acos_d_la.s in the 'objects' kwarg is not an object.. +Generating multi-targets for "_simd.dispatch.h" + Enabled targets: X86_V3, X86_V4, baseline diff --git a/meson.options b/meson.options index f17f9901664a..236f44af6d6c 100644 --- a/meson.options +++ b/meson.options @@ -28,14 +28,17 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') +<<<<<<< HEAD option('cpu-baseline-detect', type: 'feature', value: 'auto', description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', +======= +option('cpu-dispatch', type: 'string', value: 'max', +>>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', value: [ - 'BASELINE', 'SSE2', 'SSE42', 'XOP', 'FMA4', - 'AVX2', 'FMA3', 'AVX2,FMA3', 'AVX512F', 'AVX512_SKX', + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', 'VSX', 'VSX2', 'VSX3', 'VSX4', 'NEON', 'ASIMD', 'VX', 'VXE', 'VXE2', diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index 1c4c6eecb308..02bbe5f7618e 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -88,13 +88,16 @@ CPU_FEATURES += S390X_FEATURES CPU_FEATURES += RV64_FEATURES CPU_FEATURES += LOONGARCH64_FEATURES +CPU_FEATURES_REDIRECT = {} +CPU_FEATURES_REDIRECT += X86_REDIRECT + # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). cpu_family = host_machine.cpu_family() # Used by build option 'min' min_features = { - 'x86': [SSE2], - 'x86_64': [SSE3], + 'x86': [X86_V2], + 'x86_64': [X86_V2], 'ppc64': [], 's390x': [], 'arm': [], @@ -191,15 +194,31 @@ foreach opt_name, conf : parse_options accumulate = min_features elif tok == 'MAX' accumulate = max_features - elif tok in CPU_FEATURES - tokobj = CPU_FEATURES[tok] - if tokobj not in max_features - ignored += tok - continue - endif - accumulate = [tokobj] else - error('Invalid token "'+tok+'" within option --'+opt_name) + if tok in CPU_FEATURES_REDIRECT + ntok = CPU_FEATURES_REDIRECT[tok] + if ntok == '' + warning('Ignoring CPU feature "@0@" in --@1@ option - feature is no longer supported.'.format(tok, opt_name)) + else + warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) + endif + warning('Please check the latest documentation for build options.') + if ntok == '' or not append # redirected features not safe to be execluded + continue + endif + tok = ntok + endif + if tok not in CPU_FEATURES + error('Invalid token "'+tok+'" within option --'+opt_name) + endif + if tok in CPU_FEATURES + tokobj = CPU_FEATURES[tok] + if tokobj not in max_features + ignored += tok + continue + endif + accumulate = [tokobj] + endif endif if append foreach fet : accumulate @@ -209,8 +228,17 @@ foreach opt_name, conf : parse_options endforeach else filterd = [] + # filter out the features that are in the accumulate list + # including any successor features foreach fet : result - if fet not in accumulate + escape = false + foreach fet2 : accumulate + if fet2 in mod_features.implicit_c(fet) + escape = true + break + endif + endforeach + if not escape filterd += fet endif endforeach diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 1276e922ff2a..bd26c615624c 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -1,230 +1,98 @@ source_root = meson.project_source_root() +current_dir = meson.current_source_dir() +cpu_family = host_machine.cpu_family() mod_features = import('features') -SSE = mod_features.new( - 'SSE', 1, args: '-msse', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse.c')[0] -) -SSE2 = mod_features.new( - 'SSE2', 2, implies: SSE, - args: '-msse2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse2.c')[0] -) -# enabling SSE without SSE2 is useless also it's non-optional for x86_64 -SSE.update(implies: SSE2) -SSE3 = mod_features.new( - 'SSE3', 3, implies: SSE2, - args: '-msse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse3.c')[0] -) -SSSE3 = mod_features.new( - 'SSSE3', 4, implies: SSE3, - args: '-mssse3', - test_code: files(source_root + '/numpy/distutils/checks/cpu_ssse3.c')[0] -) -SSE41 = mod_features.new( - 'SSE41', 5, implies: SSSE3, - args: '-msse4.1', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse41.c')[0] -) -POPCNT = mod_features.new( - 'POPCNT', 6, implies: SSE41, - args: '-mpopcnt', - test_code: files(source_root + '/numpy/distutils/checks/cpu_popcnt.c')[0] -) -SSE42 = mod_features.new( - 'SSE42', 7, implies: POPCNT, args: '-msse4.2', - test_code: files(source_root + '/numpy/distutils/checks/cpu_sse42.c')[0] -) -# 7-20 left as margin for any extra features -AVX = mod_features.new( - 'AVX', 20, implies: SSE42, args: '-mavx', - detect: {'val': 'AVX', 'match': '.*SSE.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx.c')[0] -) -XOP = mod_features.new( - 'XOP', 21, implies: AVX, args: '-mxop', - test_code: files(source_root + '/numpy/distutils/checks/cpu_xop.c')[0] -) -FMA4 = mod_features.new( - 'FMA4', 22, implies: AVX, args: '-mfma4', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma4.c')[0] -) -# x86 half-precision -F16C = mod_features.new( - 'F16C', 23, implies: AVX, args: '-mf16c', - test_code: files(source_root + '/numpy/distutils/checks/cpu_f16c.c')[0] -) -FMA3 = mod_features.new( - 'FMA3', 24, implies: F16C, args: '-mfma', - test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] -) -# match this to HWY_AVX2 -AVX2 = mod_features.new( - 'AVX2', 25, implies: FMA3, args: ['-mavx2', '-maes', '-mpclmul', '-mbmi', '-mbmi2'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] -) -# 25-40 left as margin for any extra features -AVX512F = mod_features.new( - 'AVX512F', 40, implies: [AVX2], - # Disables mmx because of stack corruption that may happen during mask - # conversions. - # TODO (seiko2plus): provide more clarification - args: ['-mno-mmx', '-mavx512f'], - detect: {'val': 'AVX512F', 'match': '.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512f.c')[0], - extra_tests: { - 'AVX512F_REDUCE': files(source_root + '/numpy/distutils/checks/extra_avx512f_reduce.c')[0] - } -) -AVX512CD = mod_features.new( - 'AVX512CD', 41, implies: AVX512F, args: '-mavx512cd', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512cd.c')[0] -) -AVX512_KNL = mod_features.new( - 'AVX512_KNL', 42, implies: AVX512CD, args: ['-mavx512er', '-mavx512pf'], - group: ['AVX512ER', 'AVX512PF'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knl.c')[0] -) -AVX512_KNM = mod_features.new( - 'AVX512_KNM', 43, implies: AVX512_KNL, - args: ['-mavx5124fmaps', '-mavx5124vnniw', '-mavx512vpopcntdq'], - group: ['AVX5124FMAPS', 'AVX5124VNNIW', 'AVX512VPOPCNTDQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_knm.c')[0] -) -AVX512_SKX = mod_features.new( - 'AVX512_SKX', 50, implies: AVX512CD, - args: ['-mavx512vl', '-mavx512bw', '-mavx512dq'], - group: ['AVX512VL', 'AVX512BW', 'AVX512DQ'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_skx.c')[0], - extra_tests: { - 'AVX512BW_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512bw_mask.c')[0], - 'AVX512DQ_MASK': files(source_root + '/numpy/distutils/checks/extra_avx512dq_mask.c')[0] - } -) -AVX512_CLX = mod_features.new( - 'AVX512_CLX', 51, implies: AVX512_SKX, args: '-mavx512vnni', - group: ['AVX512VNNI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_clx.c')[0] -) -AVX512_CNL = mod_features.new( - 'AVX512_CNL', 52, implies: AVX512_SKX, - args: ['-mavx512ifma', '-mavx512vbmi'], - group: ['AVX512IFMA', 'AVX512VBMI'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_cnl.c')[0] -) +HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] +X86_64_V2_FLAGS = cpu_family == 'x86'? [] : ['-mcx16'] +X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] +X86_V2 = mod_features.new( + 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', + '-mpopcnt', '-msahf'] + X86_64_V2_FLAGS + HWY_SSE4_FLAGS, + # Adds compiler definitions `NPY_HAVE_SSE*` + group: ['SSE', 'SSE2', 'SSE3', 'SSSE3', 'SSE41', 'SSE42', 'POPCNT', 'LAHF'] + X86_64_V2_NAMES, + detect: 'X86_V2', + test_code: files(current_dir + '/test_x86_v2.c')[0], +) +X86_V3 = mod_features.new( + 'X86_V3', 10, implies: X86_V2, + args: ['-mavx', '-mavx2', '-mfma', '-mbmi', '-mbmi2', '-mlzcnt', '-mf16c', '-mmovbe'], + group: ['AVX', 'AVX2', 'FMA3', 'BMI', 'BMI2', 'LZCNT', 'F16C', 'MOVBE'], + detect: 'X86_V3', + test_code: files(current_dir + '/test_x86_v3.c')[0], +) +X86_V4 = mod_features.new( + 'X86_V4', 20, implies: X86_V3, + args: ['-mavx512f', '-mavx512cd', '-mavx512vl', '-mavx512bw', '-mavx512dq'], + group: ['AVX512F', 'AVX512CD', 'AVX512VL', 'AVX512BW', 'AVX512DQ', 'AVX512_SKX', + 'AVX512F_REDUCE', 'AVX512BW_MASK', 'AVX512DQ_MASK'], + detect: 'X86_V4', + test_code: files(current_dir + '/test_x86_v4.c')[0], +) +if cpu_family == 'x86' + X86_V4.update(disable: 'not supported on x86-32') +endif AVX512_ICL = mod_features.new( - 'AVX512_ICL', 53, implies: [AVX512_CLX, AVX512_CNL], - args: ['-mavx512vbmi2', '-mavx512bitalg', '-mavx512vpopcntdq'], - group: ['AVX512VBMI2', 'AVX512BITALG', 'AVX512VPOPCNTDQ'], + 'AVX512_ICL', 30, implies: X86_V4, + args: ['-mavx512vbmi', '-mavx512vbmi2', '-mavx512vnni', '-mavx512bitalg', + '-mavx512vpopcntdq', '-mavx512ifma', '-mvaes', '-mgfni', '-mvpclmulqdq'], + group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', + 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], + detect: 'AVX512_ICL', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] ) -# TODO add support for zen4 AVX512_SPR = mod_features.new( - 'AVX512_SPR', 55, implies: AVX512_ICL, + 'AVX512_SPR', 35, implies: AVX512_ICL, args: ['-mavx512fp16'], group: ['AVX512FP16'], + detect: 'AVX512_SPR', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers # ------------------------------------------- -cpu_family = host_machine.cpu_family() -compiler_id = meson.get_compiler('c').get_id() +cc = meson.get_compiler('c') +compiler_id = cc.get_id() if compiler_id not in ['gcc', 'clang'] AVX512_SPR.update(disable: compiler_id + ' compiler does not support it') endif -# Common specializations between both Intel compilers (unix-like and msvc-like) -if compiler_id in ['intel', 'intel-cl'] - # POPCNT, and F16C don't own private FLAGS however the compiler still - # provides ISA capability for them. - POPCNT.update(args: '') - F16C.update(args: '') - # Intel compilers don't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD]) - AVX512CD.update(implies: [AVX512F]) - XOP.update(disable: 'Intel Compiler does not support it') - FMA4.update(disable: 'Intel Compiler does not support it') -endif - if compiler_id == 'intel-cl' - foreach fet : [SSE, SSE2, SSE3, SSSE3, AVX] - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': '/arch:.*'}) - endforeach - SSE41.update(args: {'val': '/arch:SSE4.1', 'match': '/arch:.*'}) - SSE42.update(args: {'val': '/arch:SSE4.2', 'match': '/arch:.*'}) - FMA3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX2.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) - AVX512F.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512CD.update(args: {'val': '/Qx:COMMON-AVX512', 'match': '/arch:.*'}) - AVX512_KNL.update(args: {'val': '/Qx:KNL', 'match': '/[arch|Qx]:.*'}) - AVX512_KNM.update(args: {'val': '/Qx:KNM', 'match': '/[arch|Qx]:.*'}) - AVX512_SKX.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) - AVX512_CLX.update(args: {'val': '/Qx:CASCADELAKE', 'match': '/[arch|Qx]:.*'}) - AVX512_CNL.update(args: {'val': '/Qx:CANNONLAKE', 'match': '/[arch|Qx]:.*'}) + X86_V2.update(args: [{'val': '/arch:SSE4.2', 'match': '/arch:.*'}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '/arch:CORE-AVX2', 'match': '/arch:.*'}) + X86_V4.update(args: {'val': '/Qx:SKYLAKE-AVX512', 'match': '/[arch|Qx]:.*'}) AVX512_ICL.update(args: {'val': '/Qx:ICELAKE-CLIENT', 'match': '/[arch|Qx]:.*'}) endif if compiler_id == 'intel' - clear_m = '^(-mcpu=|-march=)' clear_any = '^(-mcpu=|-march=|-x[A-Z0-9\-])' - FMA3.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX2.update(args: {'val': '-xCORE-AVX2', 'match': clear_m}) - AVX512F.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512CD.update(args: {'val': '-xCOMMON-AVX512', 'match': clear_m}) - AVX512_KNL.update(args: {'val': '-xKNL', 'match': clear_any}) - AVX512_KNM.update(args: {'val': '-xKNM', 'match': clear_any}) - AVX512_SKX.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) - AVX512_CLX.update(args: {'val': '-xCASCADELAKE', 'match': clear_any}) - AVX512_CNL.update(args: {'val': '-xCANNONLAKE', 'match': clear_any}) + X86_V2.update(args: [{'val': '-xSSE4.2', 'match': clear_any}] + HWY_SSE4_FLAGS) + X86_V3.update(args: {'val': '-xCORE-AVX2', 'match': clear_any}) + X86_V4.update(args: {'val': '-xSKYLAKE-AVX512', 'match': clear_any}) AVX512_ICL.update(args: {'val': '-xICELAKE-CLIENT', 'match': clear_any}) endif if compiler_id == 'msvc' - # MSVC compiler doesn't support the following features - foreach fet : [AVX512_KNL, AVX512_KNM] - fet.update(disable: compiler_id + ' compiler does not support it') - endforeach - # The following features don't own private FLAGS, however the compiler still - # provides ISA capability for them. - foreach fet : [ - SSE3, SSSE3, SSE41, POPCNT, SSE42, AVX, F16C, XOP, FMA4, - AVX512F, AVX512CD, AVX512_CLX, AVX512_CNL, - AVX512_ICL - ] - fet.update(args: '') - endforeach - # MSVC compiler doesn't support the following features independently - FMA3.update(implies: [F16C, AVX2]) - AVX2.update(implies: [F16C, FMA3]) - AVX512F.update(implies: [AVX2, AVX512CD, AVX512_SKX]) - AVX512CD.update(implies: [AVX512F, AVX512_SKX]) + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] + MSVC_SSE4 = cc.version().version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS + ['/fp:contract']) clear_arch = '/arch:.*' - # only available on 32-bit. Its enabled by default on 64-bit mode - foreach fet : [SSE, SSE2] - if cpu_family == 'x86' - fet.update(args: {'val': '/arch:' + fet.get('name'), 'match': clear_arch}) - else - fet.update(args: '') - endif - endforeach - FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 - FMA3.update(args: {'val': '/fp:contract'}) - AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + X86_V4.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) + AVX512_ICL.update(args: '') endif +# legacy CPU features +X86_REDIRECT = { + 'SSE': 'X86_V2', 'SSE2': 'X86_V2', 'SSE3': 'X86_V2', 'SSSE3': 'X86_V2', + 'SSE41': 'X86_V2', 'SSE42': 'X86_V2', 'XOP': 'X86_V2', 'FMA4': 'X86_V2', + 'FMA3': 'X86_V3', 'AVX': 'X86_V3', 'F16C': 'X86_V3', + 'AVX512F': 'X86_V3', 'AVX512CD': 'X86_V3', + 'AVX512_KNL': 'X86_V3', 'AVX512_KNM': 'X86_V3', + 'AVX512_SKX': 'X86_V4', 'AVX512_CLX': 'X86_V4', 'AVX512_CNL': 'X86_V4', +} + X86_FEATURES = { - 'SSE': SSE, 'SSE2': SSE2, 'SSE3': SSE3, 'SSSE3': SSSE3, - 'SSE41': SSE41, 'POPCNT': POPCNT, 'SSE42': SSE42, 'AVX': AVX, - 'XOP': XOP, 'FMA4': FMA4, 'F16C': F16C, 'FMA3': FMA3, - 'AVX2': AVX2, 'AVX512F': AVX512F, 'AVX512CD': AVX512CD, - 'AVX512_KNL': AVX512_KNL, 'AVX512_KNM': AVX512_KNM, - 'AVX512_SKX': AVX512_SKX, 'AVX512_CLX': AVX512_CLX, - 'AVX512_CNL': AVX512_CNL, 'AVX512_ICL': AVX512_ICL, - 'AVX512_SPR': AVX512_SPR + 'X86_V2': X86_V2, 'X86_V3': X86_V3, 'X86_V4': X86_V4, + 'AVX512_ICL': AVX512_ICL, 'AVX512_SPR': AVX512_SPR } diff --git a/meson_cpu/x86/test_x86_v2.c b/meson_cpu/x86/test_x86_v2.c new file mode 100644 index 000000000000..f897957224d5 --- /dev/null +++ b/meson_cpu/x86/test_x86_v2.c @@ -0,0 +1,69 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__SSE__) || !defined(__SSE2__) || !defined(__SSE3__) || \ + !defined(__SSSE3__) || !defined(__SSE4_1__) || !defined(__SSE4_2__) || !defined(__POPCNT__) + #error HOST/ARCH does not support x86_v2 + #endif +#endif + +#include // SSE +#include // SSE2 +#include // SSE3 +#include // SSSE3 +#include // SSE4.1 +#ifdef _MSC_VER + #include // SSE4.2 and POPCNT for MSVC +#else + #include // SSE4.2 + #include // POPCNT +#endif + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // SSE test + __m128 a = _mm_set1_ps((float)seed); + __m128 b = _mm_set1_ps(2.0f); + __m128 c = _mm_add_ps(a, b); + result += (int)_mm_cvtss_f32(c); + + // SSE2 test + __m128i ai = _mm_set1_epi32(seed); + __m128i bi = _mm_set1_epi32(2); + __m128i ci = _mm_add_epi32(ai, bi); + result += _mm_cvtsi128_si32(ci); + + // SSE3 test + __m128 d = _mm_movehdup_ps(a); + result += (int)_mm_cvtss_f32(d); + + // SSSE3 test + __m128i di = _mm_abs_epi16(_mm_set1_epi16((short)seed)); + result += _mm_cvtsi128_si32(di); + + // SSE4.1 test + __m128i ei = _mm_max_epi32(ai, bi); + result += _mm_cvtsi128_si32(ei); + + // SSE4.2 test + __m128i str1 = _mm_set1_epi8((char)seed); + __m128i str2 = _mm_set1_epi8((char)(seed + 1)); + int res4_2 = _mm_cmpestra(str1, 4, str2, 4, 0); + result += res4_2; + + // POPCNT test + unsigned int test_val = (unsigned int)seed | 0x01234567; + int pcnt = _mm_popcnt_u32(test_val); + result += pcnt; + + return result; +} diff --git a/meson_cpu/x86/test_x86_v3.c b/meson_cpu/x86/test_x86_v3.c new file mode 100644 index 000000000000..0bc496a93ad0 --- /dev/null +++ b/meson_cpu/x86/test_x86_v3.c @@ -0,0 +1,66 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX__) || !defined(__AVX2__) || !defined(__FMA__) || \ + !defined(__BMI__) || !defined(__BMI2__) || !defined(__LZCNT__) || !defined(__F16C__) + #error HOST/ARCH does not support x86_v3 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX test + __m256 avx_a = _mm256_set1_ps((float)seed); + __m256 avx_b = _mm256_set1_ps(2.0f); + __m256 avx_c = _mm256_add_ps(avx_a, avx_b); + float avx_result = _mm256_cvtss_f32(avx_c); + result += (int)avx_result; + + // AVX2 test + __m256i avx2_a = _mm256_set1_epi32(seed); + __m256i avx2_b = _mm256_set1_epi32(2); + __m256i avx2_c = _mm256_add_epi32(avx2_a, avx2_b); + result += _mm256_extract_epi32(avx2_c, 0); + + // FMA test + __m256 fma_a = _mm256_set1_ps((float)seed); + __m256 fma_b = _mm256_set1_ps(2.0f); + __m256 fma_c = _mm256_set1_ps(3.0f); + __m256 fma_result = _mm256_fmadd_ps(fma_a, fma_b, fma_c); + result += (int)_mm256_cvtss_f32(fma_result); + + // BMI1 tests + unsigned int bmi1_src = (unsigned int)seed; + unsigned int tzcnt_result = _tzcnt_u32(bmi1_src); + result += tzcnt_result; + + // BMI2 tests + unsigned int bzhi_result = _bzhi_u32(bmi1_src, 17); + result += (int)bzhi_result; + + unsigned int pdep_result = _pdep_u32(bmi1_src, 0x10101010); + result += pdep_result; + + // LZCNT test + unsigned int lzcnt_result = _lzcnt_u32(bmi1_src); + result += lzcnt_result; + + // F16C tests + __m128 f16c_src = _mm_set1_ps((float)seed); + __m128i f16c_half = _mm_cvtps_ph(f16c_src, 0); + __m128 f16c_restored = _mm_cvtph_ps(f16c_half); + result += (int)_mm_cvtss_f32(f16c_restored); + + return result; +} diff --git a/meson_cpu/x86/test_x86_v4.c b/meson_cpu/x86/test_x86_v4.c new file mode 100644 index 000000000000..d49c3a78e3b3 --- /dev/null +++ b/meson_cpu/x86/test_x86_v4.c @@ -0,0 +1,88 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512F__) || !defined(__AVX512CD__) || !defined(__AVX512VL__) || \ + !defined(__AVX512BW__) || !defined(__AVX512DQ__) + #error HOST/ARCH does not support x86_v4 + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + // to prevent optimization + int seed = (int)argv[argc-1][0]; + volatile int result = 0; + + // AVX512F tests (Foundation) + __m512 avx512f_a = _mm512_set1_ps((float)seed); + __m512 avx512f_b = _mm512_set1_ps(2.0f); + __m512 avx512f_c = _mm512_add_ps(avx512f_a, avx512f_b); + float avx512f_result = _mm512_cvtss_f32(avx512f_c); + result += (int)avx512f_result; + + // Test AVX512F mask operations + __mmask16 k1 = _mm512_cmpeq_ps_mask(avx512f_a, avx512f_b); + __m512 masked_result = _mm512_mask_add_ps(avx512f_a, k1, avx512f_b, avx512f_c); + result += _mm512_mask2int(k1); + + // AVX512CD tests (Conflict Detection) + __m512i avx512cd_a = _mm512_set1_epi32(seed); + __m512i avx512cd_b = _mm512_conflict_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_b, 0)); + + __m512i avx512cd_lzcnt = _mm512_lzcnt_epi32(avx512cd_a); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512cd_lzcnt, 0)); + + // AVX512VL tests (Vector Length Extensions - 128/256-bit vectors with AVX512 features) + __m256 avx512vl_a = _mm256_set1_ps((float)seed); + __m256 avx512vl_b = _mm256_set1_ps(2.0f); + __mmask8 k2 = _mm256_cmp_ps_mask(avx512vl_a, avx512vl_b, _CMP_EQ_OQ); + __m256 avx512vl_c = _mm256_mask_add_ps(avx512vl_a, k2, avx512vl_a, avx512vl_b); + result += (int)_mm256_cvtss_f32(avx512vl_c); + + __m128 avx512vl_sm_a = _mm_set1_ps((float)seed); + __m128 avx512vl_sm_b = _mm_set1_ps(2.0f); + __mmask8 k3 = _mm_cmp_ps_mask(avx512vl_sm_a, avx512vl_sm_b, _CMP_EQ_OQ); + __m128 avx512vl_sm_c = _mm_mask_add_ps(avx512vl_sm_a, k3, avx512vl_sm_a, avx512vl_sm_b); + result += (int)_mm_cvtss_f32(avx512vl_sm_c); + + // AVX512BW tests (Byte and Word) + __m512i avx512bw_a = _mm512_set1_epi16((short)seed); + __m512i avx512bw_b = _mm512_set1_epi16(2); + __mmask32 k4 = _mm512_cmpeq_epi16_mask(avx512bw_a, avx512bw_b); + __m512i avx512bw_c = _mm512_mask_add_epi16(avx512bw_a, k4, avx512bw_a, avx512bw_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512bw_c, 0)); + + // Test byte operations + __m512i avx512bw_bytes_a = _mm512_set1_epi8((char)seed); + __m512i avx512bw_bytes_b = _mm512_set1_epi8(2); + __mmask64 k5 = _mm512_cmpeq_epi8_mask(avx512bw_bytes_a, avx512bw_bytes_b); + result += (k5 & 1); + + // AVX512DQ tests (Doubleword and Quadword) + __m512d avx512dq_a = _mm512_set1_pd((double)seed); + __m512d avx512dq_b = _mm512_set1_pd(2.0); + __mmask8 k6 = _mm512_cmpeq_pd_mask(avx512dq_a, avx512dq_b); + __m512d avx512dq_c = _mm512_mask_add_pd(avx512dq_a, k6, avx512dq_a, avx512dq_b); + double avx512dq_result = _mm512_cvtsd_f64(avx512dq_c); + result += (int)avx512dq_result; + + // Test integer to/from floating point conversion + __m512i avx512dq_back = _mm512_cvtps_epi32(masked_result); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_back, 0)); + + // Test 64-bit integer operations + __m512i avx512dq_i64_a = _mm512_set1_epi64(seed); + __m512i avx512dq_i64_b = _mm512_set1_epi64(2); + __m512i avx512dq_i64_c = _mm512_add_epi64(avx512dq_i64_a, avx512dq_i64_b); + result += _mm_cvtsi128_si32(_mm512_extracti32x4_epi32(avx512dq_i64_c, 0)); + + return result; +} diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 61a9d53a42d0..2d239d0f5852 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -89,7 +89,7 @@ cpu_family = host_machine.cpu_family() use_svml = ( host_machine.system() == 'linux' and cpu_family == 'x86_64' and - ('AVX512_SKX' in CPU_DISPATCH_NAMES or 'AVX512_SKX' in CPU_BASELINE_NAMES) and + ('X86_V4' in CPU_DISPATCH_NAMES or 'X86_V4' in CPU_BASELINE_NAMES) and not get_option('disable-svml') ) if use_svml @@ -774,7 +774,7 @@ _umath_tests_mtargets = mod_features.multi_targets( '_umath_tests.dispatch.h', 'src/umath/_umath_tests.dispatch.c', dispatch: [ - AVX2, SSE41, SSE2, + X86_V3, X86_V2, ASIMDHP, ASIMD, NEON, VSX3, VSX2, VSX, VXE, VX, RVV, @@ -819,7 +819,7 @@ foreach gen_mtargets : [ 'argfunc.dispatch.h', src_file.process('src/multiarray/argfunc.dispatch.c.src'), [ - AVX512_SKX, AVX2, XOP, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, ASIMD, NEON, VXE, VX @@ -856,12 +856,12 @@ foreach gen_mtargets : [ [ 'x86_simd_argsort.dispatch.h', 'src/npysort/x86_simd_argsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort.dispatch.h', 'src/npysort/x86_simd_qsort.dispatch.cpp', - use_intel_sort ? [AVX512_SKX, AVX2] : [] + use_intel_sort ? [X86_V4, X86_V3] : [] ], [ 'x86_simd_qsort_16bit.dispatch.h', @@ -923,7 +923,7 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -934,7 +934,7 @@ foreach gen_mtargets : [ 'loops_arithmetic.dispatch.h', src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, + X86_V4, X86_V3, X86_V2, NEON, VSX4, VSX2, VX, @@ -945,7 +945,7 @@ foreach gen_mtargets : [ 'loops_comparison.dispatch.h', src_file.process('src/umath/loops_comparison.dispatch.c.src'), [ - AVX512_SKX, AVX512F, AVX2, SSE42, SSE2, + X86_V4, X86_V3, X86_V2, VSX3, VSX2, NEON, VXE, VX, @@ -956,14 +956,14 @@ foreach gen_mtargets : [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - AVX512_SKX, AVX512F, [AVX2, FMA3] + X86_V4, X86_V3, ] ], [ 'loops_hyperbolic.dispatch.h', src_file.process('src/umath/loops_hyperbolic.dispatch.cpp.src'), [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX2, NEON_VFPV4, VXE, @@ -975,7 +975,7 @@ foreach gen_mtargets : [ 'src/umath/loops_logical.dispatch.cpp', [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VX, LSX, @@ -987,7 +987,7 @@ foreach gen_mtargets : [ src_file.process('src/umath/loops_minmax.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1004,7 +1004,7 @@ foreach gen_mtargets : [ 'loops_trigonometric.dispatch.h', 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512_SKX, [AVX2, FMA3], + X86_V4, X86_V3, VSX4, VSX3, VSX2, NEON_VFPV4, VXE2, VXE, @@ -1014,14 +1014,14 @@ foreach gen_mtargets : [ [ 'loops_umath_fp.dispatch.h', src_file.process('src/umath/loops_umath_fp.dispatch.c.src'), - [AVX512_SKX] + [X86_V4] ], [ 'loops_unary.dispatch.h', src_file.process('src/umath/loops_unary.dispatch.c.src'), [ ASIMD, NEON, - AVX512_SKX, AVX2, SSE2, + X86_V4, X86_V3, X86_V2, VSX2, VXE, VX, LSX, @@ -1031,7 +1031,7 @@ foreach gen_mtargets : [ 'loops_unary_fp.dispatch.h', src_file.process('src/umath/loops_unary_fp.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, VXE, VX, @@ -1042,7 +1042,7 @@ foreach gen_mtargets : [ 'loops_unary_fp_le.dispatch.h', src_file.process('src/umath/loops_unary_fp_le.dispatch.c.src'), [ - SSE41, SSE2, + X86_V2, VSX2, ASIMD, NEON, LSX, @@ -1052,7 +1052,7 @@ foreach gen_mtargets : [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - AVX512F, [AVX2, FMA3], SSE2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -1063,7 +1063,7 @@ foreach gen_mtargets : [ 'loops_autovec.dispatch.h', src_file.process('src/umath/loops_autovec.dispatch.c.src'), [ - AVX2, SSE2, + X86_V3, X86_V2, NEON, VSX2, VX, @@ -1074,7 +1074,7 @@ foreach gen_mtargets : [ [ 'loops_half.dispatch.h', src_file.process('src/umath/loops_half.dispatch.c.src'), - [AVX512_SPR, AVX512_SKX] + [AVX512_SPR, X86_V4] ], ] mtargets = mod_features.multi_targets( diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index e9239a257181..5c53d85ad2ea 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -80,12 +80,23 @@ static struct { {NPY_CPU_FEATURE_SSE41, "SSE41"}, {NPY_CPU_FEATURE_POPCNT, "POPCNT"}, {NPY_CPU_FEATURE_SSE42, "SSE42"}, + {NPY_CPU_FEATURE_X86_V2, "X86_V2"}, {NPY_CPU_FEATURE_AVX, "AVX"}, {NPY_CPU_FEATURE_F16C, "F16C"}, {NPY_CPU_FEATURE_XOP, "XOP"}, {NPY_CPU_FEATURE_FMA4, "FMA4"}, {NPY_CPU_FEATURE_FMA3, "FMA3"}, {NPY_CPU_FEATURE_AVX2, "AVX2"}, + {NPY_CPU_FEATURE_LAHF, "LAHF"}, + {NPY_CPU_FEATURE_CX16, "CX16"}, + {NPY_CPU_FEATURE_MOVBE, "MOVBE"}, + {NPY_CPU_FEATURE_BMI, "BMI"}, + {NPY_CPU_FEATURE_BMI2, "BMI2"}, + {NPY_CPU_FEATURE_LZCNT, "LZCNT"}, + {NPY_CPU_FEATURE_GFNI, "GFNI"}, + {NPY_CPU_FEATURE_VPCLMULQDQ, "VPCLMULQDQ"}, + {NPY_CPU_FEATURE_VAES, "VAES"}, + {NPY_CPU_FEATURE_X86_V3, "X86_V3"}, {NPY_CPU_FEATURE_AVX512F, "AVX512F"}, {NPY_CPU_FEATURE_AVX512CD, "AVX512CD"}, {NPY_CPU_FEATURE_AVX512ER, "AVX512ER"}, @@ -105,6 +116,7 @@ static struct { {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, + {NPY_CPU_FEATURE_X86_V4, "X86_V4"}, {NPY_CPU_FEATURE_AVX512_CLX, "AVX512_CLX"}, {NPY_CPU_FEATURE_AVX512_CNL, "AVX512_CNL"}, {NPY_CPU_FEATURE_AVX512_ICL, "AVX512_ICL"}, @@ -441,7 +453,13 @@ npy__cpu_init_features(void) #ifdef NPY_CPU_AMD64 npy__cpu_have[NPY_CPU_FEATURE_SSE3] = 1; #endif - return; + // For unsupported compilers, we default to NPY_CPU_X86_V2 availability + // as this is the minimum baseline required to bypass initial capability checks. + // However, we deliberately don't set any additional CPU feature flags, + // allowing us to detect this fallback behavior later via the Python + // __cpu_features__ dictionary. + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = 1; + return; } npy__cpu_cpuid(reg, 1); @@ -453,36 +471,42 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_SSE41] = (reg[2] & (1 << 19)) != 0; npy__cpu_have[NPY_CPU_FEATURE_POPCNT] = (reg[2] & (1 << 23)) != 0; npy__cpu_have[NPY_CPU_FEATURE_SSE42] = (reg[2] & (1 << 20)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_CX16] = (reg[2] & (1 << 13)) != 0; npy__cpu_have[NPY_CPU_FEATURE_F16C] = (reg[2] & (1 << 29)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_MOVBE] = (reg[2] & (1 << 22)) != 0; - // check OSXSAVE - if ((reg[2] & (1 << 27)) == 0) - return; - // check AVX OS support - int xcr = npy__cpu_getxcr0(); - if ((xcr & 6) != 6) - return; - npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX]) - return; + int osxsave = (reg[2] & (1 << 27)) != 0; + int xcr = 0; + if (osxsave) { + xcr = npy__cpu_getxcr0(); + } + int avx_os = (xcr & 6) == 6; + npy__cpu_have[NPY_CPU_FEATURE_AVX] = (reg[2] & (1 << 28)) != 0 && avx_os; npy__cpu_have[NPY_CPU_FEATURE_FMA3] = (reg[2] & (1 << 12)) != 0; // second call to the cpuid to get extended AMD feature bits npy__cpu_cpuid(reg, 0x80000001); - npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0; +#ifdef NPY_CPU_AMD64 + // long mode only + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; +#else + // alawys available + npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; +#endif + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] |= npy__cpu_have[NPY_CPU_FEATURE_LZCNT]; + npy__cpu_have[NPY_CPU_FEATURE_XOP] = (reg[2] & (1 << 11)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_FMA4] = (reg[2] & (1 << 16)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX2] = npy__cpu_have[NPY_CPU_FEATURE_AVX2] && - npy__cpu_have[NPY_CPU_FEATURE_FMA3]; - if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) - return; - // detect AVX2 & FMA3 - npy__cpu_have[NPY_CPU_FEATURE_FMA] = npy__cpu_have[NPY_CPU_FEATURE_FMA3]; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_AVX]; + npy__cpu_have[NPY_CPU_FEATURE_BMI] = (reg[1] & (1 << 3)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_BMI2] = (reg[1] & (1 << 8)) != 0 && npy__cpu_have[NPY_CPU_FEATURE_BMI]; + npy__cpu_have[NPY_CPU_FEATURE_GFNI] = (reg[2] & (1 << 8)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VAES] = (reg[2] & (1 << 9)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ] = (reg[2] & (1 << 10)) != 0; - // check AVX512 OS support int avx512_os = (xcr & 0xe6) == 0xe6; #if defined(__APPLE__) && defined(__x86_64__) /** @@ -494,7 +518,7 @@ npy__cpu_init_features(void) * - https://github.com/golang/go/issues/43089 * - https://github.com/numpy/numpy/issues/19319 */ - if (!avx512_os) { + if (!avx512_os && avx_os) { npy_uintp commpage64_addr = 0x00007fffffe00000ULL; npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E)); // cpu_capabilities64 undefined in versions < 13 @@ -504,58 +528,97 @@ npy__cpu_init_features(void) } } #endif - if (!avx512_os) { - return; - } - npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] = (reg[1] & (1 << 16)) != 0 && avx512_os; + if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F]) { + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0; // Knights Landing npy__cpu_have[NPY_CPU_FEATURE_AVX512PF] = (reg[1] & (1 << 26)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] = (reg[1] & (1 << 27)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; // Knights Mill npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] = (reg[2] & (1 << 14)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] = (reg[3] & (1 << 2)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] = (reg[3] & (1 << 3)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && - npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; - // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; - // Cannon Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] = (reg[1] & (1 << 21)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] = (reg[2] & (1 << 1)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_SKX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; // Ice Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] = (reg[2] & (1 << 6)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; // Sapphire Rapids - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; - + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; } + + // Groups + npy__cpu_have[NPY_CPU_FEATURE_X86_V2] = npy__cpu_have[NPY_CPU_FEATURE_SSE] && + npy__cpu_have[NPY_CPU_FEATURE_SSE2] && + npy__cpu_have[NPY_CPU_FEATURE_SSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSSE3] && + npy__cpu_have[NPY_CPU_FEATURE_SSE41] && + npy__cpu_have[NPY_CPU_FEATURE_SSE42] && + npy__cpu_have[NPY_CPU_FEATURE_POPCNT] && + #ifdef NPY_CPU_AMD64 + npy__cpu_have[NPY_CPU_FEATURE_CX16] && + #endif + npy__cpu_have[NPY_CPU_FEATURE_LAHF]; + + npy__cpu_have[NPY_CPU_FEATURE_X86_V3] = npy__cpu_have[NPY_CPU_FEATURE_X86_V2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX] && + npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_F16C] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3] && + npy__cpu_have[NPY_CPU_FEATURE_BMI] && + npy__cpu_have[NPY_CPU_FEATURE_BMI2] && + npy__cpu_have[NPY_CPU_FEATURE_LZCNT] && + npy__cpu_have[NPY_CPU_FEATURE_MOVBE]; + + + npy__cpu_have[NPY_CPU_FEATURE_X86_V4] = npy__cpu_have[NPY_CPU_FEATURE_X86_V3] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL]; + + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI2] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ] && + npy__cpu_have[NPY_CPU_FEATURE_GFNI] && + npy__cpu_have[NPY_CPU_FEATURE_VAES] && + npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; + + + // Legacy groups + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] = npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512ER] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512PF]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNM] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_KNL] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124FMAPS] && + npy__cpu_have[NPY_CPU_FEATURE_AVX5124VNNIW] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VPOPCNTDQ]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CLX] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI]; + + npy__cpu_have[NPY_CPU_FEATURE_AVX512_CNL] = npy__cpu_have[NPY_CPU_FEATURE_X86_V4] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512IFMA] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512VBMI]; + } /***************** POWER ******************/ diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index 7d6a406f8789..fa3c3c809015 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -26,8 +26,15 @@ enum npy_cpu_features NPY_CPU_FEATURE_FMA4 = 12, NPY_CPU_FEATURE_FMA3 = 13, NPY_CPU_FEATURE_AVX2 = 14, - NPY_CPU_FEATURE_FMA = 15, // AVX2 & FMA3, provides backward compatibility - + NPY_CPU_FEATURE_LAHF = 15, + NPY_CPU_FEATURE_CX16 = 16, + NPY_CPU_FEATURE_MOVBE = 17, + NPY_CPU_FEATURE_BMI = 18, + NPY_CPU_FEATURE_BMI2 = 19, + NPY_CPU_FEATURE_LZCNT = 20, + NPY_CPU_FEATURE_GFNI = 21, + NPY_CPU_FEATURE_VAES = 22, + NPY_CPU_FEATURE_VPCLMULQDQ = 23, NPY_CPU_FEATURE_AVX512F = 30, NPY_CPU_FEATURE_AVX512CD = 31, NPY_CPU_FEATURE_AVX512ER = 32, @@ -45,6 +52,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + // X86 CPU Groups // Knights Landing (F,CD,ER,PF) NPY_CPU_FEATURE_AVX512_KNL = 101, @@ -56,10 +64,17 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CLX = 104, // Cannon Lake (F,CD,BW,DQ,VL,IFMA,VBMI) NPY_CPU_FEATURE_AVX512_CNL = 105, - // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ) + // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, // Sapphire Rapids (Ice Lake, AVX512FP16) NPY_CPU_FEATURE_AVX512_SPR = 107, + // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) + // On 32-bit, cx16 is not available so it is not included + NPY_CPU_FEATURE_X86_V2 = 108, + // x86-64-v3 microarchitectures (X86_V2, AVX, AVX2, FMA3, BMI, BMI2, LZCNT, F16C, MOVBE) + NPY_CPU_FEATURE_X86_V3 = 109, + // x86-64-v4 microarchitectures (X86_V3, AVX512F, AVX512CD, AVX512VL, AVX512BW, AVX512DQ) + NPY_CPU_FEATURE_X86_V4 = NPY_CPU_FEATURE_AVX512_SKX, // IBM/POWER VSX // POWER7 diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index f665fa6464ef..01fd53a3dbd0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -12,7 +12,7 @@ def test_dispatcher(): Testing the utilities of the CPU dispatcher """ targets = ( - "SSE2", "SSE41", "AVX2", + "X86_V2", "X86_V3", "VSX", "VSX2", "VSX3", "NEON", "ASIMD", "ASIMDHP", "VX", "VXE", "LSX", "RVV" diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index ecc806e9c0e5..60e60e3f7c9d 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -338,30 +338,31 @@ def test_impossible_feature_enable(self): not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" ) class Test_X86_Features(AbstractTest): - features = [ - "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", - "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", - "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", - "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", - "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", - ] + features = [] + features_groups = { - "AVX512_KNL": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], - "AVX512_KNM": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", - "AVX5124VNNIW", "AVX512VPOPCNTDQ"], - "AVX512_SKX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], - "AVX512_CLX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], - "AVX512_CNL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI"], - "AVX512_ICL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", - "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], - "AVX512_SPR": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", - "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", - "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", - "AVX512FP16"], + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" + ] + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16"] + features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", From 077c00962aeffe83e023ea826f7f479a0e51613f Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 14 May 2025 19:24:33 +0300 Subject: [PATCH 0455/1718] Fix: Clear FP status to prevent invalid left_shift warnings with `x86-64-v2` baseline Prevents "invalid value encountered in left_shift" warnings on clang-cl when testing bit shifts with long types under `x86-64-v2` baseline. --- numpy/_core/src/umath/loops_autovec.dispatch.c.src | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 983fa1b5eb80..7449308a8a2a 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -24,6 +24,8 @@ */ #define INT_left_shift_needs_clear_floatstatus #define UINT_left_shift_needs_clear_floatstatus +#define LONG_left_shift_needs_clear_floatstatus +#define ULONG_left_shift_needs_clear_floatstatus /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, From f2ad1f1f64cd930adc304334c48e244eb3609f5b Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 14 May 2025 19:30:11 +0300 Subject: [PATCH 0456/1718] Set `-mfpmath=sse` on `x86` for gcc/clang numeric consistency Force SSE-based floating-point on 32-bit x86 systems to fix inconsistent results between einsum and other math functions. Prevents test failures with int16 operations by avoiding the x87 FPU's extended precision. --- meson_cpu/x86/meson.build | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index bd26c615624c..609d53a2d7c1 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -4,7 +4,11 @@ cpu_family = host_machine.cpu_family() mod_features = import('features') HWY_SSE4_FLAGS = ['-DHWY_WANT_SSE4', '-DHWY_DISABLE_PCLMUL_AES'] -X86_64_V2_FLAGS = cpu_family == 'x86'? [] : ['-mcx16'] +# Use SSE for floating-point on x86-32 to ensure numeric consistency. +# The x87 FPU's 80-bit internal precision causes unpredictable rounding +# and overflow behavior when converting to smaller types. SSE maintains +# strict 32/64-bit precision throughout all calculations. +X86_64_V2_FLAGS = cpu_family == 'x86'? ['-mfpmath=sse'] : ['-mcx16'] X86_64_V2_NAMES = cpu_family == 'x86'? [] : ['CX16'] X86_V2 = mod_features.new( 'X86_V2', 1, args: ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', From 6a9644fbf06a117e143175a6458861cfbad3b4fe Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Mon, 19 May 2025 12:48:23 +0300 Subject: [PATCH 0457/1718] Address review feedback on CPU features and CI workflows - Disable X86_V4 (AVX-512) for MSVC builds due to Highway incompatibility - Add FIXME comment for future MSVC compatibility investigation - Update SIMD CI workflow to reflect `x86-64-v2` baseline - Remove redundant test configurations - Add missing X86_V4 support to unary complex loops --- .github/workflows/linux_simd.yml | 11 +-------- doc/source/reference/simd/build-options.rst | 8 +----- meson.options | 4 --- meson_cpu/x86/meson.build | 18 +++++++++----- numpy/_core/meson.build | 2 +- numpy/_core/src/common/npy_cpu_features.c | 27 +++++++++++++++++---- numpy/_core/src/common/npy_cpu_features.h | 3 ++- numpy/_core/tests/test_cpu_features.py | 4 +-- 8 files changed, 41 insertions(+), 36 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index c35093d63a14..8002e9f3c749 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -7,7 +7,7 @@ name: Linux SIMD tests # # - baseline_only: # Focuses on completing as quickly as possible and acts as a filter for other, more resource-intensive jobs. -# Utilizes only the default baseline targets (e.g., SSE3 on X86_64) without enabling any runtime dispatched features. +# Utilizes only the default baseline targets (e.g., X86_V2 on X86_64) without enabling any runtime dispatched features. # # - old_gcc: # Tests the oldest supported GCC version with default CPU/baseline/dispatch settings. @@ -19,10 +19,6 @@ name: Linux SIMD tests # Tests against the host CPU features set as the baseline without enabling any runtime dispatched features. # Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrinsics. # -# - without_avx512/avx2/fma3: -# Uses runtime SIMD dispatching but disables AVX2, FMA3, and AVX512. -# Intended to evaluate 128-bit SIMD extensions without FMA support. -# # - without_avx512: # Uses runtime SIMD dispatching but disables AVX512. # Intended to evaluate 128-bit/256-bit SIMD extensions. @@ -165,11 +161,6 @@ jobs: "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", "3.11" ] - - [ - "without avx512/avx2/fma3", - "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v3", - "3.11" - ] env: MESON_ARGS: ${{ matrix.BUILD_PROP[1] }} diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 4bb925b05532..6cf27510103c 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -261,7 +261,7 @@ Enables all features supported by the host CPU. Detects the features enabled by the compiler. This option is appended by default to ``cpu-baseline`` if ``-march``, ``-mcpu``, ``-xhost``, or ``/QxHost`` is set in -the environment variable ``CFLAGS``. +the environment variable ``CFLAGS`` unless ``cpu-baseline-detect`` is ``disabled``. ``MIN`` ~~~~~~~ @@ -303,13 +303,7 @@ Remove or add specific features, useful with ``MAX``, ``MIN``, and ``NATIVE``: - Adding a feature (``+``) includes all implied features - Removing a feature (``-``) excludes all successor features that imply the removed feature -<<<<<<< HEAD -- ``cpu-baseline`` will be treated as "native" if compiler native flag - ``-march=native`` or ``-xHost`` or ``/QxHost`` is enabled through environment variable - ``CFLAGS`` and ``cpu-baseline-detect`` is not ``disabled``:: -======= Examples:: ->>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) python -m build --wheel -Csetup-args=-Dcpu-dispatch="max-X86_V4" python -m build --wheel -Csetup-args=-Dcpu-baseline="min+X86_V4" diff --git a/meson.options b/meson.options index 236f44af6d6c..e7011a3b2f2e 100644 --- a/meson.options +++ b/meson.options @@ -28,13 +28,9 @@ option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', description: 'Minimal set of required CPU features') -<<<<<<< HEAD option('cpu-baseline-detect', type: 'feature', value: 'auto', description: 'Detect CPU baseline from the compiler flags') -option('cpu-dispatch', type: 'string', value: 'max -xop -fma4', -======= option('cpu-dispatch', type: 'string', value: 'max', ->>>>>>> f68e178f88 (ENH: Modulate dispatched x86 CPU features) description: 'Dispatched set of additional CPU features') option('test-simd', type: 'array', value: [ diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 609d53a2d7c1..8035883215e9 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -47,8 +47,8 @@ AVX512_ICL = mod_features.new( ) AVX512_SPR = mod_features.new( 'AVX512_SPR', 35, implies: AVX512_ICL, - args: ['-mavx512fp16'], - group: ['AVX512FP16'], + args: ['-mavx512fp16', '-mavx512bf16'], + group: ['AVX512FP16', 'AVX512BF16'], detect: 'AVX512_SPR', test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] ) @@ -77,13 +77,19 @@ if compiler_id == 'intel' endif if compiler_id == 'msvc' + cc_ver = cc.version() MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] - MSVC_SSE4 = cc.version().version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 - X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS + ['/fp:contract']) + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 + X86_V2.update(args: MSVC_SSE4) clear_arch = '/arch:.*' X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) - X86_V4.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) - AVX512_ICL.update(args: '') + # FIXME: After completing transition from universal intrinsics to Highway, + # investigate which MSVC versions are incompatible with Highway's AVX-512 implementation. + X86_V4.update(disable: 'Considered broken by Highway on MSVC') + # To force enable AVX-512, use: + # X86_V4.update(args: [{'val': '/arch:AVX512', 'match': clear_arch}, '-DHWY_BROKEN_MSVC=0']) + AVX512_ICL.update(disable: 'unsupported by Highway on MSVC') endif # legacy CPU features diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 2d239d0f5852..e6871140492d 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1052,7 +1052,7 @@ foreach gen_mtargets : [ 'loops_unary_complex.dispatch.h', src_file.process('src/umath/loops_unary_complex.dispatch.c.src'), [ - X86_V3, X86_V2, + X86_V4, X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 5c53d85ad2ea..46ac4a9d8362 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -113,6 +113,7 @@ static struct { {NPY_CPU_FEATURE_AVX512VBMI2, "AVX512VBMI2"}, {NPY_CPU_FEATURE_AVX512BITALG, "AVX512BITALG"}, {NPY_CPU_FEATURE_AVX512FP16 , "AVX512FP16"}, + {NPY_CPU_FEATURE_AVX512BF16 , "AVX512BF16"}, {NPY_CPU_FEATURE_AVX512_KNL, "AVX512_KNL"}, {NPY_CPU_FEATURE_AVX512_KNM, "AVX512_KNM"}, {NPY_CPU_FEATURE_AVX512_SKX, "AVX512_SKX"}, @@ -410,12 +411,18 @@ npy__cpu_getxcr0(void) } static void -npy__cpu_cpuid(int reg[4], int func_id) +npy__cpu_cpuid_count(int reg[4], int func_id, int count) { #if defined(_MSC_VER) - __cpuidex(reg, func_id, 0); + __cpuidex(reg, func_id, count); #elif defined(__INTEL_COMPILER) __cpuid(reg, func_id); + // classic Intel compilers do not support count + if (count != 0) { + for (int i = 0; i < 4; i++) { + reg[i] = 0; + } + } #elif defined(__GNUC__) || defined(__clang__) #if defined(NPY_CPU_X86) && defined(__PIC__) // %ebx may be the PIC register @@ -424,13 +431,13 @@ npy__cpu_cpuid(int reg[4], int func_id) "xchg{l}\t{%%}ebx, %1\n\t" : "=a" (reg[0]), "=r" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #else __asm__("cpuid\n\t" : "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) - : "a" (func_id), "c" (0) + : "a" (func_id), "c" (count) ); #endif #else @@ -438,6 +445,12 @@ npy__cpu_cpuid(int reg[4], int func_id) #endif } +static void +npy__cpu_cpuid(int reg[4], int func_id) +{ + return npy__cpu_cpuid_count(reg, func_id, 0); +} + static void npy__cpu_init_features(void) { @@ -552,6 +565,8 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_AVX512BITALG] = (reg[2] & (1 << 12)) != 0; // Sapphire Rapids npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] = (reg[3] & (1 << 23)) != 0; + npy__cpu_cpuid_count(reg, 7, 1); + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16] = (reg[0] & (1 << 5)) != 0; } // Groups @@ -598,7 +613,9 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VPCLMULQDQ]; npy__cpu_have[NPY_CPU_FEATURE_AVX512_SPR] = npy__cpu_have[NPY_CPU_FEATURE_AVX512_ICL] && - npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16]; + npy__cpu_have[NPY_CPU_FEATURE_AVX512FP16] && + npy__cpu_have[NPY_CPU_FEATURE_AVX512BF16]; + // Legacy groups diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index fa3c3c809015..de05a17afdb8 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -51,6 +51,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512VBMI2 = 43, NPY_CPU_FEATURE_AVX512BITALG = 44, NPY_CPU_FEATURE_AVX512FP16 = 45, + NPY_CPU_FEATURE_AVX512BF16 = 46, // X86 CPU Groups @@ -66,7 +67,7 @@ enum npy_cpu_features NPY_CPU_FEATURE_AVX512_CNL = 105, // Ice Lake (F,CD,BW,DQ,VL,IFMA,VBMI,VNNI,VBMI2,BITALG,VPOPCNTDQ,GFNI,VPCLMULDQ,VAES) NPY_CPU_FEATURE_AVX512_ICL = 106, - // Sapphire Rapids (Ice Lake, AVX512FP16) + // Sapphire Rapids (Ice Lake, AVX512FP16, AVX512BF16) NPY_CPU_FEATURE_AVX512_SPR = 107, // x86-64-v2 microarchitectures (SSE[1-4.*], POPCNT, LAHF, CX16) // On 32-bit, cx16 is not available so it is not included diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 60e60e3f7c9d..ebf5b49fc357 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -358,7 +358,7 @@ class Test_X86_Features(AbstractTest): "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", "VAES", "VPCLMULQDQ", "GFNI" ] - features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16"] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", @@ -366,7 +366,7 @@ class Test_X86_Features(AbstractTest): "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", - "AVX512FP16": "AVX512_FP16", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" } def load_flags(self): From 37966c6e179d6432f79ee69e2d02300bddc8abe5 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 13 Sep 2025 09:51:01 +0300 Subject: [PATCH 0458/1718] fix up msvc --- meson_cpu/x86/meson.build | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8035883215e9..add073376d98 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -78,10 +78,11 @@ endif if compiler_id == 'msvc' cc_ver = cc.version() - MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : [] - MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : MSVC_SSE4 + MSVC_SSE4 = cc_ver.version_compare('>=19.40') ? ['/arch:SSE4.2'] : [] + # 32-bit MSVC does not support /arch:SSE4.2 + MSVC_SSE4 = cpu_family == 'x86' ? ['/arch:SSE2'] : MSVC_SSE4 MSVC_SSE4 = cc_ver.version_compare('>=19.30') ? MSVC_SSE4 + ['/fp:contract'] : MSVC_SSE4 - X86_V2.update(args: MSVC_SSE4) + X86_V2.update(args: MSVC_SSE4 + HWY_SSE4_FLAGS) clear_arch = '/arch:.*' X86_V3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) # FIXME: After completing transition from universal intrinsics to Highway, From df9770aa302f025d365fa077a42a55797395820b Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 12 Sep 2025 17:46:31 +0200 Subject: [PATCH 0459/1718] BLD: change file extension for libnpymath on win-arm64 from .a to .lib Closes gh-29577. --- doc/release/upcoming_changes/29750.change.rst | 5 +++++ numpy/meson.build | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/29750.change.rst diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst new file mode 100644 index 000000000000..5c72ef13db0f --- /dev/null +++ b/doc/release/upcoming_changes/29750.change.rst @@ -0,0 +1,5 @@ +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. diff --git a/numpy/meson.build b/numpy/meson.build index 67e4861d7ad6..45d5a2b52eb8 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -32,7 +32,7 @@ endif # than a `.a` file extension in order not to break including them in a # distutils-based build (see gh-23981 and # https://mesonbuild.com/FAQ.html#why-does-building-my-project-with-msvc-output-static-libraries-called-libfooa) -if is_windows and cc.get_id() == 'msvc' +if is_windows and cc.get_id() in ['msvc', 'clang-cl'] name_prefix_staticlib = '' name_suffix_staticlib = 'lib' else From 8cfbad023a871ebc18b1871fa331dd750474730e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Sat, 13 Sep 2025 16:12:11 +0100 Subject: [PATCH 0460/1718] TYP: Add missing defaults to stubs (#29634) --- numpy/_array_api_info.pyi | 24 +++--- numpy/ctypeslib/_ctypeslib.pyi | 32 ++++---- numpy/fft/_pocketfft.pyi | 112 +++++++++++++------------- numpy/linalg/_linalg.pyi | 132 ++++++++++++++++--------------- numpy/matrixlib/defmatrix.pyi | 6 +- numpy/polynomial/_polybase.pyi | 74 ++++++++--------- numpy/polynomial/chebyshev.pyi | 4 +- numpy/polynomial/polyutils.pyi | 56 ++++++------- numpy/testing/_private/utils.pyi | 8 +- 9 files changed, 228 insertions(+), 220 deletions(-) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index b4592ba2c2ee..069ef478de92 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -125,7 +125,7 @@ class __array_namespace_info__: def default_dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, ) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @@ -133,49 +133,49 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., - kind: None = ..., + device: _DeviceLike = None, + kind: None = None, ) -> _DTypes: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindBool], ) -> _DTypesBool: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindInt], ) -> _DTypesInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindUInt], ) -> _DTypesUInt: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindFloat], ) -> _DTypesFloat: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: _Permute1[_KindComplex], ) -> _DTypesComplex: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt] @@ -185,7 +185,7 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: ( _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex] @@ -195,13 +195,13 @@ class __array_namespace_info__: def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[()], ) -> _EmptyDict: ... @overload def dtypes( self, *, - device: _DeviceLike = ..., + device: _DeviceLike = None, kind: tuple[_Kind, ...], ) -> _DTypesUnion: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index e10e9fb51ab7..ebe9dbf91f04 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -96,40 +96,40 @@ c_intp = _c_intp @overload def ndpointer( - dtype: None = ..., - ndim: int = ..., - shape: _ShapeLike | None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + dtype: None = None, + ndim: int | None = None, + shape: _ShapeLike | None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload def ndpointer( dtype: _DTypeLike[_ScalarT], - ndim: int = ..., + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike, - ndim: int = ..., + ndim: int | None = None, *, shape: _ShapeLike, - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_concrete_ndptr[dtype]]: ... @overload def ndpointer( dtype: _DTypeLike[_ScalarT], - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike, - ndim: int = ..., - shape: None = ..., - flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ..., + ndim: int | None = None, + shape: None = None, + flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[dtype]]: ... @overload @@ -170,9 +170,9 @@ def as_ctypes_type(dtype: str) -> type[Any]: ... @overload def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload -def as_array(obj: object, shape: _ShapeLike | None = ...) -> NDArray[Any]: ... +def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... @overload def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 4f5e5c944b4c..81064bb174fe 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -25,113 +25,113 @@ _NormKind: TypeAlias = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... # Input array must be compatible with `np.conjugate` def hfft( a: _ArrayLikeNumber_co, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def ihfft( a: ArrayLike, - n: int | None = ..., - axis: int = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def fftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfftn( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... def fft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def ifft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def rfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[complex128] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... def irfft2( a: ArrayLike, - s: Sequence[int] | None = ..., - axes: Sequence[int] | None = ..., - norm: _NormKind = ..., - out: NDArray[float64] | None = ..., + s: Sequence[int] | None = None, + axes: Sequence[int] | None = ..., # = (-2, -1) + norm: _NormKind = None, + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 51844817e2dc..a63492013ade 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -113,19 +113,19 @@ class SVDResult(NamedTuple): def tensorsolve( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[float64]: ... @overload def tensorsolve( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[floating]: ... @overload def tensorsolve( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = ..., + axes: Iterable[int] | None = None, ) -> NDArray[complexfloating]: ... @overload @@ -147,17 +147,17 @@ def solve( @overload def tensorinv( a: _ArrayLikeInt_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[float64]: ... @overload def tensorinv( a: _ArrayLikeFloat_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[floating]: ... @overload def tensorinv( a: _ArrayLikeComplex_co, - ind: int = ..., + ind: int = 2, ) -> NDArray[complexfloating]: ... @overload @@ -211,11 +211,11 @@ def outer( ) -> _ArrayT: ... @overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... @@ -225,9 +225,9 @@ def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloatin def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... @overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ... +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @@ -239,69 +239,69 @@ def eig(a: _ArrayLikeComplex_co) -> EigResult: ... @overload def eigh( a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def eigh( a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = ..., + UPLO: L["L", "U", "l", "u"] = "L", ) -> EighResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeFloat_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., - compute_uv: L[True] = ..., - hermitian: bool = ..., + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, ) -> SVDResult: ... @overload def svd( a: _ArrayLikeInt_co, - full_matrices: bool = ..., + full_matrices: bool = True, *, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeInt_co, full_matrices: bool, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def svd( a: _ArrayLikeComplex_co, - full_matrices: bool = ..., + full_matrices: bool = True, *, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[floating]: ... @overload def svd( a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], - hermitian: bool = ..., + hermitian: bool = False, ) -> NDArray[floating]: ... def svdvals( @@ -310,34 +310,34 @@ def svdvals( # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensionl array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ... +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... # TODO: Returns `int` for <2D arrays and `intp` otherwise def matrix_rank( A: _ArrayLikeComplex_co, - tol: _ArrayLikeFloat_co | None = ..., - hermitian: bool = ..., + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, *, - rtol: _ArrayLikeFloat_co | None = ..., + rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def pinv( a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[float64]: ... @overload def pinv( a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[floating]: ... @overload def pinv( a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = ..., - hermitian: bool = ..., + rcond: _ArrayLikeFloat_co = None, + hermitian: bool = False, ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and @@ -349,21 +349,21 @@ def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... def det(a: _ArrayLikeComplex_co) -> Any: ... @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ NDArray[floating], NDArray[floating], int32, NDArray[floating], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[ +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ NDArray[complexfloating], NDArray[floating], int32, @@ -373,16 +373,24 @@ def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = None, + axis: None = None, + keepdims: L[False] = False, ) -> floating: ... @overload def norm( x: ArrayLike, - ord: float | L["fro", "nuc"] | None = ..., - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = None, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, ) -> Any: ... @overload @@ -390,16 +398,16 @@ def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: L[False] = False, ) -> floating: ... @overload def matrix_norm( x: ArrayLike, /, *, - ord: float | L["fro", "nuc"] | None = ..., - keepdims: bool = ..., + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: bool = False, ) -> Any: ... @overload @@ -407,40 +415,40 @@ def vector_norm( x: ArrayLike, /, *, - axis: None = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: None = None, + ord: float | None = 2, + keepdims: L[False] = False, ) -> floating: ... @overload def vector_norm( x: ArrayLike, /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., - ord: float | None = ..., - keepdims: bool = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...], + ord: float | None = 2, + keepdims: bool = False, ) -> Any: ... # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, - out: NDArray[Any] | None = ..., + out: NDArray[Any] | None = None, ) -> Any: ... def diagonal( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., + offset: SupportsIndex = 0, ) -> NDArray[Any]: ... def trace( x: ArrayLike, # >= 2D array /, *, - offset: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + offset: SupportsIndex = 0, + dtype: DTypeLike | None = None, ) -> Any: ... @overload diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index ee8f83746998..05e4c77303c0 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -8,10 +8,10 @@ __all__ = ["asmatrix", "bmat", "matrix"] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], - ldict: Mapping[str, Any] | None = ..., - gdict: Mapping[str, Any] | None = ..., + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, ) -> matrix[tuple[int, int], Any]: ... def asmatrix( - data: ArrayLike, dtype: DTypeLike = ... + data: ArrayLike, dtype: DTypeLike = None ) -> matrix[tuple[int, int], Any]: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 30c906fa3b4b..e82e441e4c64 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -148,7 +148,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def copy(self, /) -> Self: ... def degree(self, /) -> int: ... def cutdeg(self, /) -> Self: ... - def trim(self, /, tol: _FloatLike_co = ...) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... @overload @@ -157,24 +157,24 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): /, domain: _SeriesLikeCoef_co | None, kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> _Other: ... @overload def convert( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, kind: type[_Other], - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> _Other: ... @overload def convert( self, /, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, kind: None = None, - window: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... def mapparms(self, /) -> _Tuple2[Any]: ... @@ -182,20 +182,20 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def integ( self, /, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co | None = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = ..., # = [] + lbnd: _CoefLike_co | None = None, ) -> Self: ... - def deriv(self, /, m: SupportsIndex = ...) -> Self: ... + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... def roots(self, /) -> _CoefSeries: ... def linspace( self, /, - n: SupportsIndex = ..., - domain: _SeriesLikeCoef_co | None = ..., + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... @overload @@ -205,12 +205,12 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @overload @classmethod @@ -219,13 +219,13 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: int | _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co = ..., + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @overload @classmethod @@ -237,43 +237,43 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, full: Literal[True], /, - w: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... @classmethod def fromroots( cls, roots: _ArrayLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = ..., # = [] + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def identity( cls, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def basis( cls, deg: _AnyInt, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> Self: ... @classmethod def cast( cls, series: ABCPolyBase, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, ) -> Self: ... @classmethod diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 85c92816a261..30db7dfb9e6b 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -150,7 +150,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): cls, func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, args: tuple[()] = ..., ) -> Self: ... @overload @@ -162,7 +162,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): _CoefSeries, ], deg: _IntLike_co, - domain: _SeriesLikeCoef_co | None = ..., + domain: _SeriesLikeCoef_co | None = None, *, args: Iterable[Any], ) -> Self: ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 65ae4e5503b2..8938c3cc8259 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -59,47 +59,47 @@ _AnyVanderF: TypeAlias = Callable[ @overload def as_series( alist: npt.NDArray[np.integer] | _FloatArray, - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: _ComplexArray, - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: _ObjectArray, - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[_FloatArray | npt.NDArray[np.integer]], - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: Iterable[_ComplexArray], - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: Iterable[_ObjectArray], - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... @overload def as_series( # type: ignore[overload-overlap] alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = ..., + trim: bool = True, ) -> list[_FloatSeries]: ... @overload def as_series( alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = ..., + trim: bool = True, ) -> list[_ComplexSeries]: ... @overload def as_series( alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = ..., + trim: bool = True, ) -> list[_ObjectSeries]: ... _T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) @@ -108,32 +108,32 @@ def trimseq(seq: _T_seq) -> _T_seq: ... @overload def trimcoef( # type: ignore[overload-overlap] c: npt.NDArray[np.integer] | _FloatArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _FloatSeries: ... @overload def trimcoef( c: _ComplexArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ComplexSeries: ... @overload def trimcoef( c: _ObjectArray, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ObjectSeries: ... @overload def trimcoef( # type: ignore[overload-overlap] c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _FloatSeries: ... @overload def trimcoef( c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ComplexSeries: ... @overload def trimcoef( c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = ..., + tol: _FloatLike_co = 0, ) -> _ObjectSeries: ... @overload @@ -361,9 +361,9 @@ def _fit( # type: ignore[overload-overlap] y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeFloat_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeFloat_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, ) -> _FloatArray: ... @overload def _fit( @@ -372,9 +372,9 @@ def _fit( y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeComplex_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeComplex_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, ) -> _ComplexArray: ... @overload def _fit( @@ -383,9 +383,9 @@ def _fit( y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., - full: Literal[False] = ..., - w: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, ) -> _CoefArray: ... @overload def _fit( @@ -397,7 +397,7 @@ def _fit( rcond: _FloatLike_co | None, full: Literal[True], /, - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload def _fit( @@ -406,11 +406,11 @@ def _fit( y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None = ..., - rcond: _FloatLike_co | None = ..., + rcond: _FloatLike_co | None = None, *, full: Literal[True], - w: _SeriesLikeCoef_co | None = ..., + w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... def _as_int(x: SupportsIndex, desc: str) -> int: ... -def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 76455df87eb0..a9b773cf2247 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -178,10 +178,10 @@ else: def build_err_msg( arrays: Iterable[object], err_msg: object, - header: str = ..., - verbose: bool = ..., - names: Sequence[str] = ..., - precision: SupportsIndex | None = ..., + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ..., # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, ) -> str: ... # From beb8c9cf993faddc58811eda47c141e110aecde2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Sat, 13 Sep 2025 14:16:21 -0400 Subject: [PATCH 0461/1718] TST: Replace test_smoke xunit setup with methods (#29671) --- numpy/random/tests/test_smoke.py | 537 +++++++++++++++++-------------- 1 file changed, 300 insertions(+), 237 deletions(-) diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 6f07443f79a9..5353a72a1174 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -1,4 +1,5 @@ import pickle +from dataclasses import dataclass from functools import partial import pytest @@ -7,12 +8,8 @@ from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox from numpy.testing import assert_, assert_array_equal, assert_equal - -@pytest.fixture(scope='module', - params=(np.bool, np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64)) -def dtype(request): - return request.param +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) def params_0(f): @@ -92,403 +89,459 @@ def warmup(rg, n=None): rg.random(n, dtype=np.float32) +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + class RNG: @classmethod - def setup_class(cls): + def _create_rng(cls): # Overridden in test classes. Place holder to silence IDE noise - cls.bit_generator = PCG64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() - - @classmethod - def _extra_setup(cls): - cls.vec_1d = np.arange(2.0, 102.0) - cls.vec_2d = np.arange(2.0, 102.0)[None, :] - cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) - cls.seed_error = TypeError - - def _reset_state(self): - self.rg.bit_generator.state = self.initial_state + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_init(self): - rg = Generator(self.bit_generator()) - state = rg.bit_generator.state - rg.standard_normal(1) - rg.standard_normal(1) - rg.bit_generator.state = state - new_state = rg.bit_generator.state + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state assert_(comp_state(state, new_state)) def test_advance(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'advance'): - self.rg.bit_generator.advance(self.advance) - assert_(not comp_state(state, self.rg.bit_generator.state)) + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = data.rg.bit_generator.__class__.__name__ pytest.skip(f'Advance is not supported by {bitgen_name}') def test_jump(self): - state = self.rg.bit_generator.state - if hasattr(self.rg.bit_generator, 'jumped'): - bit_gen2 = self.rg.bit_generator.jumped() + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() jumped_state = bit_gen2.state assert_(not comp_state(state, jumped_state)) - self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) - self.rg.bit_generator.state = state - bit_gen3 = self.rg.bit_generator.jumped() + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() rejumped_state = bit_gen3.state assert_(comp_state(jumped_state, rejumped_state)) else: - bitgen_name = self.rg.bit_generator.__class__.__name__ + bitgen_name = rg.bit_generator.__class__.__name__ if bitgen_name not in ('SFC64',): raise AttributeError(f'no "jumped" in {bitgen_name}') pytest.skip(f'Jump is not supported by {bitgen_name}') def test_uniform(self): - r = self.rg.uniform(-1.0, 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_uniform_array(self): - r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(np.array([-1.0] * 10), + r = rg.uniform(np.array([-1.0] * 10), np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) - r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) assert_(len(r) == 10) assert_((r > -1).all()) assert_((r <= 0).all()) def test_random(self): - assert_(len(self.rg.random(10)) == 10) - params_0(self.rg.random) + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) def test_standard_normal_zig(self): - assert_(len(self.rg.standard_normal(10)) == 10) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) def test_standard_normal(self): - assert_(len(self.rg.standard_normal(10)) == 10) - params_0(self.rg.standard_normal) + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) def test_standard_gamma(self): - assert_(len(self.rg.standard_gamma(10, 10)) == 10) - assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) - params_1(self.rg.standard_gamma) + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) def test_standard_exponential(self): - assert_(len(self.rg.standard_exponential(10)) == 10) - params_0(self.rg.standard_exponential) + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) def test_standard_exponential_float(self): - randoms = self.rg.standard_exponential(10, dtype='float32') + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32')) + params_0(partial(rg.standard_exponential, dtype='float32')) def test_standard_exponential_float_log(self): - randoms = self.rg.standard_exponential(10, dtype='float32', + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', method='inv') assert_(len(randoms) == 10) assert randoms.dtype == np.float32 - params_0(partial(self.rg.standard_exponential, dtype='float32', + params_0(partial(rg.standard_exponential, dtype='float32', method='inv')) def test_standard_cauchy(self): - assert_(len(self.rg.standard_cauchy(10)) == 10) - params_0(self.rg.standard_cauchy) + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) def test_standard_t(self): - assert_(len(self.rg.standard_t(10, 10)) == 10) - params_1(self.rg.standard_t) + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) def test_binomial(self): - assert_(self.rg.binomial(10, .5) >= 0) - assert_(self.rg.binomial(1000, .5) >= 0) + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) def test_reset_state(self): - state = self.rg.bit_generator.state - int_1 = self.rg.integers(2**31) - self.rg.bit_generator.state = state - int_2 = self.rg.integers(2**31) + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) assert_(int_1 == int_2) def test_entropy_init(self): - rg = Generator(self.bit_generator()) - rg2 = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) assert_(not comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_seed(self): - rg = Generator(self.bit_generator(*self.seed)) - rg2 = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) rg.random() rg2.random() assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_reset_state_gauss(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.standard_normal() state = rg.bit_generator.state n1 = rg.standard_normal(size=10) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.standard_normal(size=10) assert_array_equal(n1, n2) def test_reset_state_uint32(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.integers(0, 2 ** 24, 120, dtype=np.uint32) state = rg.bit_generator.state n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) assert_array_equal(n1, n2) def test_reset_state_float(self): - rg = Generator(self.bit_generator(*self.seed)) + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) rg.random(dtype='float32') state = rg.bit_generator.state n1 = rg.random(size=10, dtype='float32') - rg2 = Generator(self.bit_generator()) + rg2 = Generator(data.bit_generator()) rg2.bit_generator.state = state n2 = rg2.random(size=10, dtype='float32') assert_((n1 == n2).all()) def test_shuffle(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_permutation(self): + rg = self._create_rng().rg original = np.arange(200, 0, -1) - permuted = self.rg.permutation(original) + permuted = rg.permutation(original) assert_((original != permuted).any()) def test_beta(self): - vals = self.rg.beta(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), 2.0) + vals = rg.beta(np.array([2.0] * 10), 2.0) assert_(len(vals) == 10) - vals = self.rg.beta(2.0, np.array([2.0] * 10)) + vals = rg.beta(2.0, np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) assert_(len(vals) == 10) - vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) assert_(vals.shape == (10, 10)) def test_bytes(self): - vals = self.rg.bytes(10) + rg = self._create_rng().rg + vals = rg.bytes(10) assert_(len(vals) == 10) def test_chisquare(self): - vals = self.rg.chisquare(2.0, 10) + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.chisquare) + params_1(rg.chisquare) def test_exponential(self): - vals = self.rg.exponential(2.0, 10) + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential) + params_1(rg.exponential) def test_f(self): - vals = self.rg.f(3, 1000, 10) + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) assert_(len(vals) == 10) def test_gamma(self): - vals = self.rg.gamma(3, 2, 10) + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) assert_(len(vals) == 10) def test_geometric(self): - vals = self.rg.geometric(0.5, 10) + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) assert_(len(vals) == 10) - params_1(self.rg.exponential, bounded=True) + params_1(rg.exponential, bounded=True) def test_gumbel(self): - vals = self.rg.gumbel(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) assert_(len(vals) == 10) def test_laplace(self): - vals = self.rg.laplace(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logitic(self): - vals = self.rg.logistic(2.0, 2.0, 10) + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) assert_(len(vals) == 10) def test_logseries(self): - vals = self.rg.logseries(0.5, 10) + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) assert_(len(vals) == 10) def test_negative_binomial(self): - vals = self.rg.negative_binomial(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) assert_(len(vals) == 10) def test_noncentral_chisquare(self): - vals = self.rg.noncentral_chisquare(10, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) assert_(len(vals) == 10) def test_noncentral_f(self): - vals = self.rg.noncentral_f(3, 1000, 2, 10) + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) assert_(len(vals) == 10) - vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) assert_(len(vals) == 10) def test_normal(self): - vals = self.rg.normal(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) assert_(len(vals) == 10) def test_pareto(self): - vals = self.rg.pareto(3.0, 10) + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) assert_(len(vals) == 10) def test_poisson(self): - vals = self.rg.poisson(10, 10) + rg = self._create_rng().rg + vals = rg.poisson(10, 10) assert_(len(vals) == 10) - vals = self.rg.poisson(np.array([10] * 10)) + vals = rg.poisson(np.array([10] * 10)) assert_(len(vals) == 10) - params_1(self.rg.poisson) + params_1(rg.poisson) def test_power(self): - vals = self.rg.power(0.2, 10) + rg = self._create_rng().rg + vals = rg.power(0.2, 10) assert_(len(vals) == 10) def test_integers(self): - vals = self.rg.integers(10, 20, 10) + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) assert_(len(vals) == 10) def test_rayleigh(self): - vals = self.rg.rayleigh(0.2, 10) + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) assert_(len(vals) == 10) - params_1(self.rg.rayleigh, bounded=True) + params_1(rg.rayleigh, bounded=True) def test_vonmises(self): - vals = self.rg.vonmises(10, 0.2, 10) + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) assert_(len(vals) == 10) def test_wald(self): - vals = self.rg.wald(1.0, 1.0, 10) + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) assert_(len(vals) == 10) def test_weibull(self): - vals = self.rg.weibull(1.0, 10) + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) assert_(len(vals) == 10) def test_zipf(self): - vals = self.rg.zipf(10, 10) + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) assert_(len(vals) == 10) - vals = self.rg.zipf(self.vec_1d) + vals = rg.zipf(vec_1d) assert_(len(vals) == 100) - vals = self.rg.zipf(self.vec_2d) + vals = rg.zipf(vec_2d) assert_(vals.shape == (1, 100)) - vals = self.rg.zipf(self.mat) + vals = rg.zipf(mat) assert_(vals.shape == (100, 100)) def test_hypergeometric(self): - vals = self.rg.hypergeometric(25, 25, 20) + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) assert_(np.isscalar(vals)) - vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) assert_(vals.shape == (10,)) def test_triangular(self): - vals = self.rg.triangular(-5, 0, 5) + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) assert_(np.isscalar(vals)) - vals = self.rg.triangular(-5, np.array([0] * 10), 5) + vals = rg.triangular(-5, np.array([0] * 10), 5) assert_(vals.shape == (10,)) def test_multivariate_normal(self): + rg = self._create_rng().rg mean = [0, 0] cov = [[1, 0], [0, 100]] # diagonal covariance - x = self.rg.multivariate_normal(mean, cov, 5000) + x = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_zig = self.rg.multivariate_normal(mean, cov, 5000) + x_zig = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) - x_inv = self.rg.multivariate_normal(mean, cov, 5000) + x_inv = rg.multivariate_normal(mean, cov, 5000) assert_(x.shape == (5000, 2)) assert_((x_zig != x_inv).any()) def test_multinomial(self): - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) assert_(vals.shape == (2,)) - vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) assert_(vals.shape == (10, 2)) def test_dirichlet(self): - s = self.rg.dirichlet((10, 5, 3), 20) + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) assert_(s.shape == (20, 3)) def test_pickle(self): - pick = pickle.dumps(self.rg) + rg = self._create_rng().rg + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) - pick = pickle.dumps(self.rg) + pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(self.rg) == type(unpick)) - assert_(comp_state(self.rg.bit_generator.state, + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) def test_seed_array(self): - if self.seed_vector_bits is None: - bitgen_name = self.bit_generator.__name__ + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ pytest.skip(f'Vector seeding is not supported by {bitgen_name}') - if self.seed_vector_bits == 32: + if data.seed_vector_bits == 32: dtype = np.uint32 else: dtype = np.uint64 seed = np.array([1], dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(1) + bg = data.bit_generator(1) state2 = bg.state assert_(comp_state(state1, state2)) seed = np.arange(4, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = np.arange(1500, dtype=dtype) - bg = self.bit_generator(seed) + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) seed = 2 ** np.mod(np.arange(1500, dtype=dtype), - self.seed_vector_bits - 1) + 1 - bg = self.bit_generator(seed) + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) state1 = bg.state - bg = self.bit_generator(seed[0]) + bg = data.bit_generator(seed[0]) state2 = bg.state assert_(not comp_state(state1, state2)) def test_uniform_float(self): - rg = Generator(self.bit_generator(12345)) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) warmup(rg) state = rg.bit_generator.state r1 = rg.random(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.random(11, dtype=np.float32) @@ -497,11 +550,12 @@ def test_uniform_float(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_gamma_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) @@ -510,11 +564,12 @@ def test_gamma_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -523,11 +578,12 @@ def test_normal_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_normal_zig_floats(self): - rg = Generator(self.bit_generator()) + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) warmup(rg) state = rg.bit_generator.state r1 = rg.standard_normal(11, dtype=np.float32) - rg2 = Generator(self.bit_generator()) + rg2 = Generator(bit_generator()) warmup(rg2) rg2.bit_generator.state = state r2 = rg2.standard_normal(11, dtype=np.float32) @@ -536,7 +592,7 @@ def test_normal_zig_floats(self): assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) def test_output_fill(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -558,7 +614,7 @@ def test_output_fill(self): assert_equal(direct, existing) def test_output_filling_uniform(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -576,7 +632,7 @@ def test_output_filling_uniform(self): assert_equal(direct, existing) def test_output_filling_exponential(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.empty(size) @@ -594,7 +650,7 @@ def test_output_filling_exponential(self): assert_equal(direct, existing) def test_output_filling_gamma(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) existing = np.zeros(size) @@ -612,7 +668,7 @@ def test_output_filling_gamma(self): assert_equal(direct, existing) def test_output_filling_gamma_broadcast(self): - rg = self.rg + rg = self._create_rng().rg state = rg.bit_generator.state size = (31, 7, 97) mu = np.arange(97.0) + 1.0 @@ -631,7 +687,7 @@ def test_output_filling_gamma_broadcast(self): assert_equal(direct, existing) def test_output_fill_error(self): - rg = self.rg + rg = self._create_rng().rg size = (31, 7, 97) existing = np.empty(size) with pytest.raises(TypeError): @@ -653,7 +709,14 @@ def test_output_fill_error(self): with pytest.raises(ValueError): rg.standard_gamma(1.0, out=existing[::3]) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + if dtype == np.bool: upper = 2 lower = 0 @@ -661,45 +724,50 @@ def test_integers_broadcast(self, dtype): info = np.iinfo(dtype) upper = int(info.max) + 1 lower = info.min - self._reset_state() - a = self.rg.integers(lower, [upper] * 10, dtype=dtype) - self._reset_state() - b = self.rg.integers([lower] * 10, upper, dtype=dtype) + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) assert_equal(a, b) - self._reset_state() - c = self.rg.integers(lower, upper, size=10, dtype=dtype) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) assert_equal(a, c) - self._reset_state() - d = self.rg.integers(np.array( + reset_state(rg) + d = rg.integers(np.array( [lower] * 10), np.array([upper], dtype=object), size=10, dtype=dtype) assert_equal(a, d) - self._reset_state() - e = self.rg.integers( + reset_state(rg) + e = rg.integers( np.array([lower] * 10), np.array([upper] * 10), size=10, dtype=dtype) assert_equal(a, e) - self._reset_state() - a = self.rg.integers(0, upper, size=10, dtype=dtype) - self._reset_state() - b = self.rg.integers([upper] * 10, dtype=dtype) + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) assert_equal(a, b) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_numpy(self, dtype): + rg = self._create_rng().rg high = np.array([1]) low = np.array([0]) - out = self.rg.integers(low, high, dtype=dtype) + out = rg.integers(low, high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low[0], high, dtype=dtype) + out = rg.integers(low[0], high, dtype=dtype) assert out.shape == (1,) - out = self.rg.integers(low, high[0], dtype=dtype) + out = rg.integers(low, high[0], dtype=dtype) assert out.shape == (1,) + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg if dtype == np.bool: upper = 2 lower = 0 @@ -708,102 +776,97 @@ def test_integers_broadcast_errors(self, dtype): upper = int(info.max) + 1 lower = info.min with pytest.raises(ValueError): - self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + rg.integers(lower, [upper + 1] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + rg.integers(lower - 1, [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + rg.integers([lower - 1], [upper] * 10, dtype=dtype) with pytest.raises(ValueError): - self.rg.integers([0], [0], dtype=dtype) + rg.integers([0], [0], dtype=dtype) class TestMT19937(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = MT19937 - cls.advance = None - cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 32 - cls._extra_setup() - cls.seed_error = ValueError + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_numpy_state(self): + rg = self._create_rng().rg nprg = np.random.RandomState() nprg.standard_normal(99) state = nprg.get_state() - self.rg.bit_generator.state = state - state2 = self.rg.bit_generator.state + rg.bit_generator.state = state + state2 = rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = Philox - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestSFC64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = SFC64 - cls.advance = None - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 192 - cls._extra_setup() + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestPCG64DXSM(RNG): @classmethod - def setup_class(cls): - cls.bit_generator = PCG64DXSM - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = Generator(cls.bit_generator(*cls.seed)) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) class TestDefaultRNG(RNG): @classmethod - def setup_class(cls): + def _create_rng(cls): # This will duplicate some tests that directly instantiate a fresh # Generator(), but that's okay. - cls.bit_generator = PCG64 - cls.advance = 2**63 + 2**31 + 2**15 + 1 - cls.seed = [12345] - cls.rg = np.random.default_rng(*cls.seed) - cls.initial_state = cls.rg.bit_generator.state - cls.seed_vector_bits = 64 - cls._extra_setup() + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) def test_default_is_pcg64(self): # In order to change the default BitGenerator, we'll go through # a deprecation cycle to move to a different function. - assert_(isinstance(self.rg.bit_generator, PCG64)) + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) def test_seed(self): np.random.default_rng() From 6bf63e712010ba059f7a37b9a3d1628361c330ca Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 14 Sep 2025 14:57:57 +0200 Subject: [PATCH 0462/1718] apply suggested edit [skip ci] Co-authored-by: Matti Picus --- doc/release/upcoming_changes/29750.change.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst index 5c72ef13db0f..2759c08d8349 100644 --- a/doc/release/upcoming_changes/29750.change.rst +++ b/doc/release/upcoming_changes/29750.change.rst @@ -1,5 +1,5 @@ The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a -``.a`` file extension, for compatibility for building with MSVC and +``.a`` file extension on win-arm64, for compatibility for building with MSVC and ``setuptools``. Please note that using these static libraries is discouraged and for existing projects using it, it's best to use it with a matching compiler toolchain, which is ``clang-cl`` on Windows on Arm. From 9fab0d3a065b44bc358d749b1185b9eed4beb79a Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 14 Sep 2025 18:18:47 +0300 Subject: [PATCH 0463/1718] pin asv<0.6.5 [skip azp][skip cirrus][skip circleci] --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5a453521f2dd..b33213449561 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -218,7 +218,7 @@ jobs: run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install asv virtualenv packaging -r requirements/build_requirements.txt + pip install "asv<0.6.5" virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none From 76ec13aff566e94379c34045d7e152e0d8c6b918 Mon Sep 17 00:00:00 2001 From: Vineet Kumar Date: Mon, 15 Sep 2025 00:56:35 +0530 Subject: [PATCH 0464/1718] DOC: Clarify description of diagonal covariance in multivariate_normal function --- numpy/random/_generator.pyx | 3 ++- numpy/random/mtrand.pyx | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index c067a0821563..c1b78ce5b97e 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3791,7 +3791,8 @@ cdef class Generator: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index beaf96c06921..719d1d860d3c 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4147,7 +4147,8 @@ cdef class RandomState: >>> mean = [0, 0] >>> cov = [[1, 0], [0, 100]] # diagonal covariance - Diagonal covariance means that points are oriented along x or y-axis: + Diagonal covariance means that the variables are independent, and the + probability density contours have their axes aligned with the coordinate axes: >>> import matplotlib.pyplot as plt >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T From dca33b3df733c485fc228e89c9459cec10aebe63 Mon Sep 17 00:00:00 2001 From: Phoenix studio <59125767+phoenixstudiodz@users.noreply.github.com> Date: Mon, 15 Sep 2025 16:10:07 +0100 Subject: [PATCH 0465/1718] DOC: Fix typo in absolute_beginners.rst (#29753) Co-authored-by: phx --- doc/source/user/absolute_beginners.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 3ba12b84052e..5f620fa36cef 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -863,12 +863,13 @@ NumPy also performs aggregation functions. In addition to ``min``, ``max``, and result of multiplying the elements together, ``std`` to get the standard deviation, and more. :: + >>> data = np.array([1, 2, 3]) >>> data.max() - 2.0 + 3 >>> data.min() - 1.0 + 1 >>> data.sum() - 3.0 + 6 .. image:: images/np_aggregation.png From 5f8296630d0102029175164a76fc1eae6da3fd20 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Mon, 15 Sep 2025 11:52:42 -0400 Subject: [PATCH 0466/1718] TST: Fix np.random thread test failures (#29729) * TST: Fix np.random thread failures * TST: Use RandomState to fix thread safety issues. * TST: Remove rfunc from TestRandint * TST: Remove setSeed in TestBroadcast --- numpy/lib/tests/test_function_base.py | 4 +- numpy/random/tests/test_random.py | 681 ++++++++------- numpy/random/tests/test_randomstate.py | 783 +++++++++--------- .../tests/test_randomstate_regression.py | 24 +- numpy/random/tests/test_regression.py | 12 +- 5 files changed, 723 insertions(+), 781 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ce293ec2f256..6ca7892c6cbe 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1186,8 +1186,8 @@ def test_second_order_accurate(self): assert_(np.all(num_error < 0.03) == True) # test with unevenly spaced - np.random.seed(0) - x = np.sort(np.random.random(10)) + rng = np.random.default_rng(0) + x = np.sort(rng.random(10)) y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index b3e69b41956b..f110aa892b31 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -165,47 +165,49 @@ def test_set_invalid_state(self): class TestRandint: - rfunc = np.random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -213,15 +215,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - np.random.seed() + rng = random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -242,20 +244,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - np.random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -284,11 +286,12 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -297,7 +300,7 @@ def test_respect_dtype_singleton(self): ubnd = 2 if dt is bool else np.iinfo("long").max + 1 # gh-7284: Ensure that we get Python data types - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -308,33 +311,33 @@ class TestRandomDist: seed = 1234567890 def test_rand(self): - np.random.seed(self.seed) - actual = np.random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - np.random.seed(self.seed) - actual = np.random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): - np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = np.random.random_integers(-99, 99, size=(3, 2)) + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) @@ -368,41 +371,41 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random(self): - np.random.seed(self.seed) - actual = np.random.random((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - np.random.seed(self.seed) - actual = np.random.choice(4, 3, replace=False, + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - np.random.seed(self.seed) - actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -477,8 +480,8 @@ def test_choice_nan_probabilities(self): assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): - np.random.seed(self.seed) - actual = np.random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -501,9 +504,9 @@ def test_shuffle(self): # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - np.random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -563,11 +566,11 @@ def test_shuffle_memoryview(self): # gh-18273 # allow graceful handling of memoryviews # (treat the same as arrays) - np.random.seed(self.seed) + rng = random.RandomState(self.seed) a = np.arange(5).data - np.random.shuffle(a) + rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) - rng = np.random.RandomState(self.seed) + rng = random.RandomState(self.seed) rng.shuffle(a) assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) rng = np.random.default_rng(self.seed) @@ -581,8 +584,8 @@ def test_shuffle_not_writeable(self): np.random.shuffle(a) def test_beta(self): - np.random.seed(self.seed) - actual = np.random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -590,25 +593,25 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - np.random.seed(self.seed) - actual = np.random.binomial(100, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): - np.random.seed(self.seed) - actual = np.random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -641,8 +644,8 @@ def test_dirichlet_bad_alpha(self): assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) def test_exponential(self): - np.random.seed(self.seed) - actual = np.random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -653,16 +656,16 @@ def test_exponential_0(self): assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): - np.random.seed(self.seed) - actual = np.random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - np.random.seed(self.seed) - actual = np.random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -673,16 +676,16 @@ def test_gamma_0(self): assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): - np.random.seed(self.seed) - actual = np.random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): - np.random.seed(self.seed) - actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -693,34 +696,34 @@ def test_gumbel_0(self): assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): - np.random.seed(self.seed) - actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = np.random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = np.random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = np.random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - np.random.seed(self.seed) - actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -731,16 +734,16 @@ def test_laplace_0(self): assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): - np.random.seed(self.seed) - actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - np.random.seed(self.seed) - actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -751,16 +754,16 @@ def test_lognormal_0(self): assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): - np.random.seed(self.seed) - actual = np.random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): - np.random.seed(self.seed) - actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -770,11 +773,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - np.random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = np.random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -785,7 +788,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = np.random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -793,53 +796,53 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - pytest.warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(np.random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with warnings.catch_warnings(): warnings.simplefilter('error') - np.random.multivariate_normal(mean, cov) + rng.multivariate_normal(mean, cov) def test_negative_binomial(self): - np.random.seed(self.seed) - actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - np.random.seed(self.seed) - actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - np.random.seed(self.seed) - actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -847,8 +850,8 @@ def test_noncentral_f(self): assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): - np.random.seed(self.seed) - actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -859,8 +862,8 @@ def test_normal_0(self): assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): - np.random.seed(self.seed) - actual = np.random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -874,8 +877,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - np.random.seed(self.seed) - actual = np.random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -890,16 +893,16 @@ def test_poisson_exceptions(self): assert_raises(ValueError, np.random.poisson, [lambig] * 10) def test_power(self): - np.random.seed(self.seed) - actual = np.random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - np.random.seed(self.seed) - actual = np.random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -910,24 +913,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): - np.random.seed(self.seed) - actual = np.random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - np.random.seed(self.seed) - actual = np.random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - np.random.seed(self.seed) - actual = np.random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -938,24 +941,24 @@ def test_standard_gamma_0(self): assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): - np.random.seed(self.seed) - actual = np.random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - np.random.seed(self.seed) - actual = np.random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - np.random.seed(self.seed) - actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -963,8 +966,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - np.random.seed(self.seed) - actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1011,8 +1014,8 @@ def __int__(self): assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - np.random.seed(self.seed) - actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1025,16 +1028,16 @@ def test_vonmises_small(self): np.testing.assert_(np.isfinite(r).all()) def test_wald(self): - np.random.seed(self.seed) - actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - np.random.seed(self.seed) - actual = np.random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1046,8 +1049,8 @@ def test_weibull_0(self): assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): - np.random.seed(self.seed) - actual = np.random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1059,138 +1062,128 @@ class TestBroadcast: # correctly when presented with non-scalar arguments seed = 123456789 - def setSeed(self): - np.random.seed(self.seed) - # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 def test_uniform(self): low = [0] high = [1] - uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.setSeed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.setSeed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.setSeed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.setSeed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.setSeed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.setSeed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.setSeed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.setSeed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.setSeed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1199,256 +1192,242 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.setSeed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.setSeed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.setSeed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.setSeed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.setSeed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.setSeed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.setSeed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.setSeed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.setSeed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.setSeed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.setSeed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.setSeed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.setSeed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.setSeed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.setSeed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.setSeed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.setSeed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.setSeed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.setSeed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) - self.setSeed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.setSeed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.setSeed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) - self.setSeed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1457,33 +1436,32 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.setSeed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.setSeed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.setSeed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): @@ -1492,22 +1470,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = np.random.binomial desired = np.array([1, 1, 1]) - self.setSeed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.setSeed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1515,22 +1492,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) - self.setSeed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.setSeed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max @@ -1538,41 +1514,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = np.random.poisson desired = np.array([1, 1, 0]) - self.setSeed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = np.random.zipf desired = np.array([2, 2, 1]) - self.setSeed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = np.random.geometric desired = np.array([2, 2, 2]) - self.setSeed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1582,45 +1555,43 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) - self.setSeed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.setSeed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = np.random.logseries desired = np.array([1, 1, 1]) - self.setSeed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index ff7daf0496bb..fd371218616c 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -173,10 +173,10 @@ def test_p_non_contiguous(self): p = np.arange(15.) p /= np.sum(p[1::3]) pvals = p[1::3] - random.seed(1432985819) - non_contig = random.multinomial(100, pvals=pvals) - random.seed(1432985819) - contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) assert_array_equal(non_contig, contig) def test_multinomial_pvals_float32(self): @@ -288,47 +288,49 @@ def test_repr(self): class TestRandint: - rfunc = random.randint - # valid integer/boolean types itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=float) + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) def test_bounds_checking(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) - assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) - assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) - assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 tgt = ubnd - 1 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd) // 2 - assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 + rng = np.random.RandomState() for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 try: - self.rfunc(lbnd, ubnd, dtype=dt) + rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " @@ -336,15 +338,15 @@ def test_full_range(self): def test_in_bounds_fuzz(self): # Don't use fixed seed - random.seed() + rng = np.random.RandomState() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: - vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) - vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) assert_(vals.max() < 2) assert_(vals.min() >= 0) @@ -364,20 +366,20 @@ def test_repeatability(self): 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 for dt in self.itype[1:]: - random.seed(1234) + rng = random.RandomState(1234) # view as little endian for hash if sys.byteorder == 'little': - val = self.rfunc(0, 6, size=1000, dtype=dt) + val = rng.randint(0, 6, size=1000, dtype=dt) else: - val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.sha256(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness - random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.sha256(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) @@ -400,8 +402,8 @@ def test_repeatability_32bit_boundary_broadcasting(self): [2978368172, 764731833, 2282559898], [ 105711276, 720447391, 3596512484]]]) for size in [None, (5, 3, 3)]: - random.seed(12345) - x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], size=size) assert_array_equal(x, desired if size is not None else desired[0]) @@ -430,11 +432,13 @@ def test_int64_uint64_corner_case(self): def test_respect_dtype_singleton(self): # See gh-7203 + rng = np.random.RandomState() + for dt in self.itype: lbnd = 0 if dt is np.bool else np.iinfo(dt).min ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int): @@ -445,7 +449,7 @@ def test_respect_dtype_singleton(self): lbnd = 0 if dt is bool else np.iinfo(op_dtype).min ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 - sample = self.rfunc(lbnd, ubnd, dtype=dt) + sample = rng.randint(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) @@ -456,55 +460,54 @@ class TestRandomDist: seed = 1234567890 def test_rand(self): - random.seed(self.seed) - actual = random.rand(3, 2) + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rand_singleton(self): - random.seed(self.seed) - actual = random.rand() + rng = random.RandomState(self.seed) + actual = rng.rand() desired = 0.61879477158567997 assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): - random.seed(self.seed) - actual = random.randn(3, 2) + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_randint(self): - random.seed(self.seed) - actual = random.randint(-99, 99, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = random.random_integers(-99, 99, size=(3, 2)) + actual = rng.random_integers(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) with pytest.warns(DeprecationWarning): - actual = random.random_integers(198, size=(3, 2)) + actual = rng.random_integers(198, size=(3, 2)) assert_array_equal(actual, desired + 100) def test_tomaxint(self): - random.seed(self.seed) rs = random.RandomState(self.seed) actual = rs.tomaxint(size=(3, 2)) if np.iinfo(np.long).max == 2147483647: @@ -556,44 +559,44 @@ def test_random_integers_deprecated(self): np.iinfo('l').max, np.iinfo('l').max) def test_random_sample(self): - random.seed(self.seed) - actual = random.random_sample((3, 2)) + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) - random.seed(self.seed) - actual = random.random_sample() + rng = random.RandomState(self.seed) + actual = rng.random_sample() assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_choice_uniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): - random.seed(self.seed) - actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): - random.seed(self.seed) - actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): - random.seed(self.seed) - actual = random.choice(['a', 'b', 'c', 'd'], 4) + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) @@ -670,15 +673,15 @@ def test_choice_nan_probabilities(self): def test_choice_p_non_contiguous(self): p = np.ones(10) / 5 p[1::2] = 3.0 - random.seed(self.seed) - non_contig = random.choice(5, 3, p=p[::2]) - random.seed(self.seed) - contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) assert_array_equal(non_contig, contig) def test_bytes(self): - random.seed(self.seed) - actual = random.bytes(10) + rng = random.RandomState(self.seed) + actual = rng.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) @@ -702,9 +705,9 @@ def test_shuffle(self): lambda x: np.asarray([(i, i) for i in x], [("a", object, (1,)), ("b", np.int32, (1,))])]: - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) - random.shuffle(alist) + rng.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) @@ -728,35 +731,35 @@ def test_shuffle_invalid_objects(self): assert_raises(TypeError, random.shuffle, x) def test_permutation(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] - actual = random.permutation(alist) + actual = rng.permutation(alist) desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] assert_array_equal(actual, desired) - random.seed(self.seed) + rng = random.RandomState(self.seed) arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T - actual = random.permutation(arr_2d) + actual = rng.permutation(arr_2d) assert_array_equal(actual, np.atleast_2d(desired).T) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_str = "abcd" assert_raises(IndexError, random.permutation, bad_x_str) - random.seed(self.seed) + rng = random.RandomState(self.seed) bad_x_float = 1.2 assert_raises(IndexError, random.permutation, bad_x_float) integer_val = 10 desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] - random.seed(self.seed) - actual = random.permutation(integer_val) + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) assert_array_equal(actual, desired) def test_beta(self): - random.seed(self.seed) - actual = random.beta(.1, .9, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], @@ -764,30 +767,30 @@ def test_beta(self): assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): - random.seed(self.seed) - actual = random.binomial(100.123, .456, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) - random.seed(self.seed) - actual = random.binomial(100.123, .456) + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) desired = 37 assert_array_equal(actual, desired) def test_chisquare(self): - random.seed(self.seed) - actual = random.chisquare(50, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha, size=(3, 2)) + actual = rng.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], @@ -798,9 +801,9 @@ def test_dirichlet(self): bad_alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, random.dirichlet, bad_alpha) - random.seed(self.seed) + rng = random.RandomState(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) - actual = random.dirichlet(alpha) + actual = rng.dirichlet(alpha) assert_array_almost_equal(actual, desired[0, 0], decimal=15) def test_dirichlet_size(self): @@ -823,16 +826,16 @@ def test_dirichlet_bad_alpha(self): def test_dirichlet_alpha_non_contiguous(self): a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) alpha = a[::2] - random.seed(self.seed) - non_contig = random.dirichlet(alpha, size=(3, 2)) - random.seed(self.seed) - contig = random.dirichlet(np.ascontiguousarray(alpha), + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), size=(3, 2)) assert_array_almost_equal(non_contig, contig) def test_exponential(self): - random.seed(self.seed) - actual = random.exponential(1.1234, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) @@ -843,16 +846,16 @@ def test_exponential_0(self): assert_raises(ValueError, random.exponential, scale=-0.) def test_f(self): - random.seed(self.seed) - actual = random.f(12, 77, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): - random.seed(self.seed) - actual = random.gamma(5, 3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) @@ -863,8 +866,8 @@ def test_gamma_0(self): assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) def test_geometric(self): - random.seed(self.seed) - actual = random.geometric(.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) @@ -881,8 +884,8 @@ def test_geometric_exceptions(self): assert_raises(ValueError, random.geometric, [np.nan] * 10) def test_gumbel(self): - random.seed(self.seed) - actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) @@ -893,34 +896,34 @@ def test_gumbel_0(self): assert_raises(ValueError, random.gumbel, scale=-0.) def test_hypergeometric(self): - random.seed(self.seed) - actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 - actual = random.hypergeometric(5, 0, 3, size=4) + actual = rng.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) - actual = random.hypergeometric(15, 0, 12, size=4) + actual = rng.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 - actual = random.hypergeometric(0, 5, 3, size=4) + actual = rng.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) - actual = random.hypergeometric(0, 15, 12, size=4) + actual = rng.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): - random.seed(self.seed) - actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) @@ -931,16 +934,16 @@ def test_laplace_0(self): assert_raises(ValueError, random.laplace, scale=-0.) def test_logistic(self): - random.seed(self.seed) - actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): - random.seed(self.seed) - actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) @@ -951,8 +954,8 @@ def test_lognormal_0(self): assert_raises(ValueError, random.lognormal, sigma=-0.) def test_logseries(self): - random.seed(self.seed) - actual = random.logseries(p=.923456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) @@ -974,8 +977,8 @@ def test_logseries_exceptions(self, value): random.logseries(np.array([value] * 10)[::2]) def test_multinomial(self): - random.seed(self.seed) - actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], @@ -985,11 +988,11 @@ def test_multinomial(self): assert_array_equal(actual, desired) def test_multivariate_normal(self): - random.seed(self.seed) + rng = random.RandomState(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) - actual = random.multivariate_normal(mean, cov, size) + actual = rng.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], @@ -1000,7 +1003,7 @@ def test_multivariate_normal(self): assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning - actual = random.multivariate_normal(mean, cov) + actual = rng.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) @@ -1008,35 +1011,35 @@ def test_multivariate_normal(self): # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] - pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' - assert_no_warnings(random.multivariate_normal, mean, cov, + assert_no_warnings(rng.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) - random.multivariate_normal(mean, cov) + rng.multivariate_normal(mean, cov) mu = np.zeros(2) cov = np.eye(2) - assert_raises(ValueError, random.multivariate_normal, mean, cov, + assert_raises(ValueError, rng.multivariate_normal, mean, cov, check_valid='other') - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, np.zeros((2, 1, 1)), cov) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.empty((3, 2))) - assert_raises(ValueError, random.multivariate_normal, + assert_raises(ValueError, rng.multivariate_normal, mu, np.eye(3)) def test_negative_binomial(self): - random.seed(self.seed) - actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) @@ -1050,29 +1053,29 @@ def test_negative_binomial_exceptions(self): [np.nan] * 10) def test_noncentral_chisquare(self): - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) - actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) - random.seed(self.seed) - actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): - random.seed(self.seed) - actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], @@ -1085,8 +1088,8 @@ def test_noncentral_f_nan(self): assert np.isnan(actual) def test_normal(self): - random.seed(self.seed) - actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) @@ -1097,8 +1100,8 @@ def test_normal_0(self): assert_raises(ValueError, random.normal, scale=-0.) def test_pareto(self): - random.seed(self.seed) - actual = random.pareto(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], @@ -1112,8 +1115,8 @@ def test_pareto(self): np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): - random.seed(self.seed) - actual = random.poisson(lam=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) @@ -1132,16 +1135,16 @@ def test_poisson_exceptions(self): assert_raises(ValueError, random.poisson, [np.nan] * 10) def test_power(self): - random.seed(self.seed) - actual = random.power(a=.123456789, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): - random.seed(self.seed) - actual = random.rayleigh(scale=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) @@ -1152,24 +1155,24 @@ def test_rayleigh_0(self): assert_raises(ValueError, random.rayleigh, scale=-0.) def test_standard_cauchy(self): - random.seed(self.seed) - actual = random.standard_cauchy(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): - random.seed(self.seed) - actual = random.standard_exponential(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): - random.seed(self.seed) - actual = random.standard_gamma(shape=3, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) @@ -1180,30 +1183,30 @@ def test_standard_gamma_0(self): assert_raises(ValueError, random.standard_gamma, shape=-0.) def test_standard_normal(self): - random.seed(self.seed) - actual = random.standard_normal(size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn_singleton(self): - random.seed(self.seed) - actual = random.randn() + rng = random.RandomState(self.seed) + actual = rng.randn() desired = np.array(1.34016345771863121) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): - random.seed(self.seed) - actual = random.standard_t(df=10, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): - random.seed(self.seed) - actual = random.triangular(left=5.12, mode=10.23, right=20.34, + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], @@ -1211,8 +1214,8 @@ def test_triangular(self): assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): - random.seed(self.seed) - actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) @@ -1257,8 +1260,8 @@ def __int__(self): assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): - random.seed(self.seed) - actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) @@ -1272,8 +1275,8 @@ def test_vonmises_small(self): def test_vonmises_large(self): # guard against changes in RandomState when Generator is fixed - random.seed(self.seed) - actual = random.vonmises(mu=0., kappa=1e7, size=3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) desired = np.array([4.634253748521111e-04, 3.558873596114509e-04, -2.337119622577433e-04]) @@ -1285,16 +1288,16 @@ def test_vonmises_nan(self): assert_(np.isnan(r)) def test_wald(self): - random.seed(self.seed) - actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): - random.seed(self.seed) - actual = random.weibull(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) @@ -1306,8 +1309,8 @@ def test_weibull_0(self): assert_raises(ValueError, random.weibull, a=-0.) def test_zipf(self): - random.seed(self.seed) - actual = random.zipf(a=1.23, size=(3, 2)) + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) @@ -1319,135 +1322,125 @@ class TestBroadcast: # correctly when presented with non-scalar arguments seed = 123456789 - def set_seed(self): - random.seed(self.seed) - def test_uniform(self): low = [0] high = [1] - uniform = random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = uniform(low * 3, high) + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) - self.set_seed() - actual = uniform(low, high * 3) + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] - normal = random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) - self.set_seed() - actual = normal(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc * 3, bad_scale) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) - self.set_seed() - actual = normal(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, normal, loc, bad_scale * 3) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] - beta = random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) - self.set_seed() - actual = beta(a * 3, b) + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a * 3, b) - assert_raises(ValueError, beta, a * 3, bad_b) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) - self.set_seed() - actual = beta(a, b * 3) + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, beta, bad_a, b * 3) - assert_raises(ValueError, beta, a, bad_b * 3) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] - exponential = random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = exponential(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, exponential, bad_scale * 3) + assert_raises(ValueError, rng.exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] - std_gamma = random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = std_gamma(shape * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, std_gamma, bad_shape * 3) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] - gamma = random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) - self.set_seed() - actual = gamma(shape * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape * 3, scale) - assert_raises(ValueError, gamma, shape * 3, bad_scale) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) - self.set_seed() - actual = gamma(shape, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gamma, bad_shape, scale * 3) - assert_raises(ValueError, gamma, shape, bad_scale * 3) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] - f = random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) - self.set_seed() - actual = f(dfnum * 3, dfden) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum * 3, dfden) - assert_raises(ValueError, f, dfnum * 3, bad_dfden) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) - self.set_seed() - actual = f(dfnum, dfden * 3) + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, f, bad_dfnum, dfden * 3) - assert_raises(ValueError, f, dfnum, bad_dfden * 3) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] @@ -1456,267 +1449,253 @@ def test_noncentral_f(self): bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] - nonc_f = random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) - self.set_seed() - actual = nonc_f(dfnum * 3, dfden, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) - assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) - assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) - assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) - self.set_seed() - actual = nonc_f(dfnum, dfden, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) - assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): - self.set_seed() + rng = random.RandomState(self.seed) desired = np.array([6.869638627492048, 0.785880199263955]) - actual = random.noncentral_f(0.9, 0.9, 2, size=2) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] - chisquare = random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) - self.set_seed() - actual = chisquare(df * 3) + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, chisquare, bad_df * 3) + assert_raises(ValueError, rng.chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] - nonc_chi = random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) - self.set_seed() - actual = nonc_chi(df * 3, nonc) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) - assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) - self.set_seed() - actual = nonc_chi(df, nonc * 3) + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) - assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] - t = random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) - self.set_seed() - actual = t(df * 3) + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, rng.standard_t, bad_df * 3) assert_raises(ValueError, random.standard_t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] - vonmises = random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) - self.set_seed() - actual = vonmises(mu * 3, kappa) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) - self.set_seed() - actual = vonmises(mu, kappa * 3) + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] - pareto = random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) - self.set_seed() - actual = pareto(a * 3) + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, rng.pareto, bad_a * 3) assert_raises(ValueError, random.pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] - weibull = random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) - self.set_seed() - actual = weibull(a * 3) + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, rng.weibull, bad_a * 3) assert_raises(ValueError, random.weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] - power = random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) - self.set_seed() - actual = power(a * 3) + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, rng.power, bad_a * 3) assert_raises(ValueError, random.power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] - laplace = random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) - self.set_seed() - actual = laplace(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc * 3, bad_scale) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) - self.set_seed() - actual = laplace(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, laplace, loc, bad_scale * 3) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] - gumbel = random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) - self.set_seed() - actual = gumbel(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc * 3, bad_scale) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) - self.set_seed() - actual = gumbel(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, gumbel, loc, bad_scale * 3) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] - logistic = random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) - self.set_seed() - actual = logistic(loc * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc * 3, bad_scale) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) - self.set_seed() - actual = logistic(loc, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, logistic, loc, bad_scale * 3) - assert_equal(random.logistic(1.0, 0.0), 1.0) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] - lognormal = random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) - self.set_seed() - actual = lognormal(mean * 3, sigma) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) - self.set_seed() - actual = lognormal(mean, sigma * 3) + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] - rayleigh = random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) - self.set_seed() - actual = rayleigh(scale * 3) + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, rayleigh, bad_scale * 3) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] - wald = random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) - self.set_seed() - actual = wald(mean * 3, scale) + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean * 3, scale) - assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) assert_raises(ValueError, random.wald, bad_mean * 3, scale) assert_raises(ValueError, random.wald, mean * 3, bad_scale) - self.set_seed() - actual = wald(mean, scale * 3) + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, wald, bad_mean, scale * 3) - assert_raises(ValueError, wald, mean, bad_scale * 3) - assert_raises(ValueError, wald, 0.0, 1) - assert_raises(ValueError, wald, 0.5, 0.0) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) def test_triangular(self): left = [1] @@ -1725,38 +1704,37 @@ def test_triangular(self): bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 - triangular = random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) - self.set_seed() - actual = triangular(left * 3, mode, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) - assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) - assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, right) - self.set_seed() - actual = triangular(left, mode * 3, right) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) - assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, right) - self.set_seed() - actual = triangular(left, mode, right * 3) + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) - assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) - assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) - assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, right * 3) - assert_raises(ValueError, triangular, 10., 0., 20.) - assert_raises(ValueError, triangular, 10., 25., 20.) - assert_raises(ValueError, triangular, 10., 10., 10.) + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) def test_binomial(self): n = [1] @@ -1764,22 +1742,21 @@ def test_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - binom = random.binomial desired = np.array([1, 1, 1]) - self.set_seed() - actual = binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n * 3, p) - assert_raises(ValueError, binom, n * 3, bad_p_one) - assert_raises(ValueError, binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) - self.set_seed() - actual = binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, binom, bad_n, p * 3) - assert_raises(ValueError, binom, n, bad_p_one * 3) - assert_raises(ValueError, binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] @@ -1787,22 +1764,21 @@ def test_negative_binomial(self): bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] - neg_binom = random.negative_binomial desired = np.array([1, 0, 1]) - self.set_seed() - actual = neg_binom(n * 3, p) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n * 3, p) - assert_raises(ValueError, neg_binom, n * 3, bad_p_one) - assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) - self.set_seed() - actual = neg_binom(n, p * 3) + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, neg_binom, bad_n, p * 3) - assert_raises(ValueError, neg_binom, n, bad_p_one * 3) - assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) def test_poisson(self): max_lam = random.RandomState()._poisson_lam_max @@ -1810,41 +1786,38 @@ def test_poisson(self): lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] - poisson = random.poisson desired = np.array([1, 1, 0]) - self.set_seed() - actual = poisson(lam * 3) + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, poisson, bad_lam_one * 3) - assert_raises(ValueError, poisson, bad_lam_two * 3) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] - zipf = random.zipf desired = np.array([2, 2, 1]) - self.set_seed() - actual = zipf(a * 3) + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, zipf, bad_a * 3) + assert_raises(ValueError, rng.zipf, bad_a * 3) with np.errstate(invalid='ignore'): - assert_raises(ValueError, zipf, np.nan) - assert_raises(ValueError, zipf, [0, 0, np.nan]) + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] - geom = random.geometric desired = np.array([2, 2, 2]) - self.set_seed() - actual = geom(p * 3) + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, geom, bad_p_one * 3) - assert_raises(ValueError, geom, bad_p_two * 3) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] @@ -1854,50 +1827,48 @@ def test_hypergeometric(self): bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] - hypergeom = random.hypergeometric desired = np.array([1, 1, 1]) - self.set_seed() - actual = hypergeom(ngood * 3, nbad, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad * 3, nsample) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) - assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) - self.set_seed() - actual = hypergeom(ngood, nbad, nsample * 3) + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) - assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) - assert_raises(ValueError, hypergeom, -1, 10, 20) - assert_raises(ValueError, hypergeom, 10, -1, 20) - assert_raises(ValueError, hypergeom, 10, 10, 0) - assert_raises(ValueError, hypergeom, 10, 10, 25) + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] - logseries = random.logseries desired = np.array([1, 1, 1]) - self.set_seed() - actual = logseries(p * 3) + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) assert_array_equal(actual, desired) - assert_raises(ValueError, logseries, bad_p_one * 3) - assert_raises(ValueError, logseries, bad_p_two * 3) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) @pytest.mark.skipif(IS_WASM, reason="can't start thread") @@ -2025,9 +1996,9 @@ def test_integer_dtype(int_func): def test_integer_repeat(int_func): - random.seed(123456789) + rng = random.RandomState(123456789) fname, args, sha256 = int_func - f = getattr(random, fname) + f = getattr(rng, fname) val = f(*args, size=1000000) if sys.byteorder != 'little': val = val.byteswap() diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index e71be8acd981..1c8882d1b672 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -54,9 +54,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - random.seed(12345) + rng = random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -131,9 +131,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - random.seed(1) + rng = random.RandomState(1) orig = np.arange(3).view(N) - perm = random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -143,9 +143,9 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - random.seed(1) + rng = random.RandomState(1) m = M() - perm = random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) @@ -176,27 +176,27 @@ def test_choice_retun_dtype(self): reason='Cannot test with 32-bit C long') def test_randint_117(self): # GH 14189 - random.seed(0) + rng = random.RandomState(0) expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, 2588848963, 3684848379, 2340255427, 3638918503, 1819583497, 2678185683], dtype='int64') - actual = random.randint(2**32, size=10) + actual = rng.randint(2**32, size=10) assert_array_equal(actual, expected) def test_p_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(12345) - assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. - np.random.seed(8675309) + rng = random.RandomState(8675309) expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) - assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), expected) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index de52582c2b56..b29bd526266e 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -52,9 +52,9 @@ def test_shuffle_mixed_dimension(self): [(1, 1), (2, 2), (3, 3), None], [1, (2, 2), (3, 3), None], [(1, 1), 2, 3, None]]: - np.random.seed(12345) + rng = np.random.RandomState(12345) shuffled = list(t) - random.shuffle(shuffled) + rng.shuffle(shuffled) expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) assert_array_equal(np.array(shuffled, dtype=object), expected) @@ -129,9 +129,9 @@ def test_permutation_subclass(self): class N(np.ndarray): pass - np.random.seed(1) + rng = np.random.RandomState(1) orig = np.arange(3).view(N) - perm = np.random.permutation(orig) + perm = rng.permutation(orig) assert_array_equal(perm, np.array([0, 2, 1])) assert_array_equal(orig, np.arange(3).view(N)) @@ -141,8 +141,8 @@ class M: def __array__(self, dtype=None, copy=None): return self.a - np.random.seed(1) + rng = np.random.RandomState(1) m = M() - perm = np.random.permutation(m) + perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) From 1f5b7392f6a1d57886a4b32f71d7a44f09f22e26 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 15 Sep 2025 13:12:42 -0600 Subject: [PATCH 0467/1718] DOC: add dev docs on C debuggers and compiler sanitizers --- .../dev/development_advanced_debugging.rst | 214 +++++++++++++++++- 1 file changed, 212 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 6df7a3ecb64a..23b06085b371 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -5,11 +5,15 @@ Advanced debugging tools ======================== If you reached here, you want to dive into, or use, more advanced tooling. -This is usually not necessary for first time contributors and most +This is usually not necessary for first-time contributors and most day-to-day development. These are used more rarely, for example close to a new NumPy release, or when a large or particular complex change was made. +Some of these tools are used in NumPy's continuous integration tests. If you +see a test failure that only happens under a debugging tool, these instructions +should hopefully enable you to reproduce the test failure locally. + Since not all of these tools are used on a regular basis and only available on some systems, please expect differences, issues, or quirks; we will be happy to help if you get stuck and appreciate any improvements @@ -20,7 +24,7 @@ Finding C errors with additional tooling ######################################## Most development will not require more than a typical debugging toolchain -as shown in :ref:`Debugging `. +as shown in :ref:`Debugging `. But for example memory leaks can be particularly subtle or difficult to narrow down. @@ -213,3 +217,209 @@ command for NumPy). .. _pytest-valgrind: https://github.com/seberg/pytest-valgrind + +C debuggers +=========== + +Whenever NumPy crashes or when working on changes to NumPy's low-level C or C++ +code, it's often convenient to run Python under a C debugger to get more +information. A debugger can aid in understanding an interpreter crash (e.g. due +to a segmentation fault) by providing a C call stack at the site of the +crash. The call stack often provides valuable context to understand the nature +of a crash. C debuggers are also very useful during development, allowing +interactive debugging in the C implementation of NumPy. + +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. It does +not matter which debugger you use - although on a Mac it is often far easier to +use ``lldb`` than ``gdb``. That said, they have disjoint user interfaces, so you +will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` +`command map `_ is a convnient reference for +how to accomplish common recipes in both debuggers. + + +Use together with ``spin`` +-------------------------- + +The ``spin`` `development workflow tool +`_. has built-in support for working +with both ``gdb`` and ``ldb`` via the ``spin gdb`` and ``spin lldb`` commands. + +For both debuggers, it's advisable to build NumPy in either the ``debug`` or +``debugoptimized`` meson build profile. To use ``debug`` you can pass the option +via ``spin build``: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug + +You can pass additional arguments to `meson setup +`_ besides ``buildtype`` using the +same positional argument syntax for ``spin build``. + +Let's say you have a test script named `test.py` that lives in a ``test`` folder +in the same directory as the NumPy source checking. You could execute the test +script using the ``spin`` build of NumPy with the following incantation: + +.. code-block:: bash + + spin gdb ../test/test.py + +This will launch into gdb. If all you care about is a call stack for a crash, +type "r" and hit enter. Your test script will run and if a crash happens, you +type "bt" to get a traceback. For ``lldb``, the instructions are similar, just +replace ``spin gdb`` with ``spin lldb``. + +You can also set breakpoints and use other more advanced techniques. See the +documentation for your debugger for more details. + +One common issue with breakpoints in NumPy is that some code paths get hit +repeatedly during the import of the ``numpy`` module. This can make it tricky or +tedious to find the first "real" call after the NumPy import has completed and +the ``numpy`` module is fully initialized. + +One workaround is to use a script like this: + +.. code-block:: python + + import os + import signal + + import numpy as np + + PID = os.getpid() + + def do_nothing(*args): + pass + + signal.signal(signal.SIGUSR1, do_nothing) + + os.kill(PID, signal.SIGUSR1) + + # the code to run under a debugger follows + + +This example installs a signal handler for the ``SIGUSR1`` signal that does +nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` +signal. This causes the signal handler to fire and critically also causes both +``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. + +Since the ``os.kill`` call happens after the ``numpy`` module is already fully +initialized, this means any breakpoints set inside of ``kill`` will happen +*after* ``numpy`` is finished initializing. + +Use together with ``pytest`` +---------------------------- + +You can also run ``pytest`` tests under a debugger. This requires using +the debugger in a slightly more manual fashion, since ``spin`` does not yet +automate this process. First, run ``spin build`` to ensure there is a fully +built copy of NumPy managed by ``spin``. Then, to run the tests under ``lldb`` +you would do something like this: + +.. code-block:: bash + + spin lldb $(which python) $(which pytest) build-install/usr/lib/python3.13/site-packages/numpy/_core/tests/test_multiarray.py + +This will execute the tests in ``test_multiarray.py`` under lldb after typing +'r' and hitting enter. Note that this command comes from a session using Python +3.13 on a Mac. If you are using a different Python version or operating system, +the directory layout inside ``build-install`` may be slightly different. + +You can set breakpoints as described above. The issue about breakpoints +commonly being hit during NumPy import also applies - consider refactoring your +test workflow into a test script so you can adopt the workaround using +``os.kill`` described above. + +Note the use of ``$(which python)`` to ensure the debugger receives a path to a +Python executable. If you are using ``pyenv``, you may need to replace ``which +python`` with ``pyenv which python``, since ``pyenv`` relies on shim scripts +that ``which`` doesn't know about. + + +Compiler Sanitizers +=================== + +The `compiler sanitizer `_ suites +shipped by both GCC and LLVM offer a means to detect many common programming +errors at runtime. The sanitizers work by instrumenting the application code at +build time so additional runtime checks fire. Typically, sanitizers are run +during the course of regular testing and if a sanitizer check fails, this leads +to a test failure or crash, along with a report about the nature of the failure. + +While it is possible to use sanitizers with a "regular" build of CPython - it is +best if you can set up a Python environment based on a from-source Python build +with sanitizer instrumentation, and then use the instrumented Python to build +NumPy and run the tests. If the entire Python stack is instrumented using the +same sanitizer runtime, it becomes possible to identify issues that happen +across the Python stack. This enables detecting memory leaks in NumPy due to +misuse of memory allocated in CPython, for example. + +Build Python with Sanitizer Instrumentation +------------------------------------------- + +See the `section in the Python developer's guide +`_ on this topic for +more information about building Python from source. To enable address sanitizer, +you will need to pass ``--with-address-sanitizer`` to the ``configure`` script +invocation when you build Python. + +You can also use `pyenv `_ to automate the +process of building Python and quickly activate or deactivate a Python +installation using a command-line interface similar to virtual +environments. With ``pyenv`` you could install an ASAN-instrumented build of +Python 3.13 like this: + +.. code-block:: bash + + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + +If you are interested in thread sanitizer, the ``cpython_sanity`` `docker images +`_ might also be a quicker choice +that bypasses building Python from source, although it may be annoying to do +debugging work inside of a docker image. + +Use together with ``spin`` +-------------------------- + +However you build Python, once you have an instrumented Python build, you can +install NumPy's development and test dependencies and build NumPy with address +sanitizer instrumentation. For example, to build NumPy with the ``debug`` +profile and address sanitizer, you would pass additional build options to +``meson`` like this: + +.. code-block:: bash + + spin build -- -Dbuildtype=debug -Db_sanitize=address + + +Once the build is finished, you can use other ``spin`` command like ``spin +test`` and ``spin gdb`` as with any other Python build. + +Special considerations +---------------------- + +Some NumPy tests intentionally lead to ``malloc`` returning ``NULL``. In its +default configuration, some of the compiler sanitizers flag this as an +error. You can disable that check by passing ``allocator_may_return_null=1`` to +the sanitizer as an option. For example, with address sanitizer: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1 spin test + +You may see memory leaks coming from the Python interpreter, particularly on +MacOS. If the memory leak reports are not useful, you can disable leak detection +by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one +option using a comma-delimited list, like this: + +.. code-block:: bash + + ASAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1:detect_leaks=1 spin test + +The ``halt_on_error`` option can be particularly useful -- it hard-crashes the +Python executable whenever it detects an error, along with a report about the +error that includes a stack trace. + +You can also take a look at the ``compiler_sanitizers.yml`` GitHub actions +workflow configuration. It describes several different CI jobs that are run as +part of the NumPy tests using Thread, Address, and Undefined Behavior sanitizer. From 4a0d4d4b0c971696c4858fb95819cde56f83e2a4 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Wed, 17 Sep 2025 00:09:39 +0530 Subject: [PATCH 0468/1718] DOC: Improve documentation for f2py and Meson usage, add ufunc extension example --- doc/source/f2py/index.rst | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index b5cfb168073a..3427ffbbd2fc 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,6 +45,73 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. +======================= +Using f2py with Meson +======================= + +Meson is a modern build system recommended for building Python extension modules, +especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and +maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` +to invoke f2py and generate the extension module. The following minimal example +demonstrates how to do this: + +.. code-block:: meson + + # List your Fortran source files + fortran_sources = files('your_module.f90') + + # Find the Python installation + py = import('python').find_installation() + + # Create a custom target to build the extension with f2py + f2py_wrapper = custom_target( + 'your_module_wrapper', + output: 'your_module.so', + input: fortran_sources, + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', '@INPUT@', '-m', 'your_module' + ] + ) + + # Install the built extension to the Python site-packages directory + install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + +For more details and advanced usage, see the Meson build guide in +the user documentation or refer to SciPy's Meson build files for +real-world examples: https://github.com/scipy/scipy/tree/main/meson.build + +========================================== +Building NumPy ufunc Extensions with Meson +========================================== + +To build a NumPy ufunc extension (C API) using Meson, you can use the +following template: + +.. code-block:: meson + + # List your C source files + c_sources = files('your_ufunc_module.c') + + # Find the Python installation + py = import('python').find_installation() + + # Create an extension module + extension_module = py.extension_module( + 'your_ufunc_module', + c_sources, + dependencies: py.dependency(), + install: true + ) + +For more information on writing NumPy ufunc extensions, see the +official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html + +You can also refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build + .. toctree:: :maxdepth: 3 From a362d7888fef57d0926be81b3ece4257bdce6e94 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Wed, 17 Sep 2025 00:17:16 +0530 Subject: [PATCH 0469/1718] DOC: Improve documentation for f2py and Meson usage, add ufunc extension example --- doc/source/f2py/index.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index 3427ffbbd2fc..fb1c573d1269 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -109,9 +109,6 @@ following template: For more information on writing NumPy ufunc extensions, see the official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html -You can also refer to SciPy's Meson build files for real-world -examples: https://github.com/scipy/scipy/tree/main/meson.build - .. toctree:: :maxdepth: 3 From 2fcfde1804e222040d45a56dfe35608b2c562e29 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Sep 2025 14:01:16 -0600 Subject: [PATCH 0470/1718] Apply suggestions from code review Co-authored-by: Lucas Colley --- doc/source/dev/development_advanced_debugging.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 23b06085b371..805b7251e626 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -242,7 +242,7 @@ Use together with ``spin`` The ``spin`` `development workflow tool `_. has built-in support for working -with both ``gdb`` and ``ldb`` via the ``spin gdb`` and ``spin lldb`` commands. +with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. For both debuggers, it's advisable to build NumPy in either the ``debug`` or ``debugoptimized`` meson build profile. To use ``debug`` you can pass the option @@ -256,8 +256,8 @@ You can pass additional arguments to `meson setup `_ besides ``buildtype`` using the same positional argument syntax for ``spin build``. -Let's say you have a test script named `test.py` that lives in a ``test`` folder -in the same directory as the NumPy source checking. You could execute the test +Let's say you have a test script named ``test.py`` that lives in a ``test`` folder +in the same directory as the NumPy source checkout. You could execute the test script using the ``spin`` build of NumPy with the following incantation: .. code-block:: bash From 9273b2fef33e3ac5a022e67fb84f8f5f66bdc30a Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 16 Sep 2025 22:55:45 +0200 Subject: [PATCH 0471/1718] BUG: Stable ``ScalarType`` ordering --- numpy/_core/numerictypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 135dc1b51d97..265ad4f8eb1f 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -598,7 +598,7 @@ def _scalar_type_key(typ): ScalarType = [int, float, complex, bool, bytes, str, memoryview] -ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key) +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) ScalarType = tuple(ScalarType) From 707fee62a0f084d6068bc0624d3fb79f974d57bb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 16 Sep 2025 14:48:30 -0600 Subject: [PATCH 0472/1718] DOC: respond to code review comments [skip actions] [skip azp] [skip cirrus] --- .../dev/development_advanced_debugging.rst | 61 +++++++++++++++---- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 805b7251e626..07c80314da1b 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -229,16 +229,16 @@ crash. The call stack often provides valuable context to understand the nature of a crash. C debuggers are also very useful during development, allowing interactive debugging in the C implementation of NumPy. -The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. It does -not matter which debugger you use - although on a Mac it is often far easier to -use ``lldb`` than ``gdb``. That said, they have disjoint user interfaces, so you -will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` -`command map `_ is a convnient reference for -how to accomplish common recipes in both debuggers. +The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a +rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier +to use on a Mac environment. They have disjoint user interfaces, so you will need to +learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map +`_ is a convnient reference for how to +accomplish common recipes in both debuggers. -Use together with ``spin`` --------------------------- +Building With Debug Symbols +--------------------------- The ``spin`` `development workflow tool `_. has built-in support for working @@ -252,11 +252,16 @@ via ``spin build``: spin build -- -Dbuildtype=debug +to use ``debugoptimized`` you're pass ``-Dbuildtype=debugoptimized`` instead. + You can pass additional arguments to `meson setup `_ besides ``buildtype`` using the same positional argument syntax for ``spin build``. -Let's say you have a test script named ``test.py`` that lives in a ``test`` folder +Running a Test Script +--------------------- + +Let's say you have a test script named `test.py` that lives in a ``test`` folder in the same directory as the NumPy source checkout. You could execute the test script using the ``spin`` build of NumPy with the following incantation: @@ -303,7 +308,41 @@ nothing and then calls ``os.kill`` on the Python process with the ``SIGUSR1`` signal. This causes the signal handler to fire and critically also causes both ``gdb`` and ``lldb`` to halt execution inside of the ``kill`` syscall. -Since the ``os.kill`` call happens after the ``numpy`` module is already fully +If you run ``lldb`` you should see output something like this: + +.. code-block:: + + Process 67365 stopped + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + libsystem_kernel.dylib`__kill: + -> 0x19c4b9da4 <+8>: b.lo 0x19c4b9dc4 ; <+40> + 0x19c4b9da8 <+12>: pacibsp + 0x19c4b9dac <+16>: stp x29, x30, [sp, #-0x10]! + 0x19c4b9db0 <+20>: mov x29, sp + Target 0: (python3.13) stopped. + (lldb) bt + * thread #1, queue = 'com.apple.main-thread', stop reason = signal SIGUSR1 + * frame #0: 0x000000019c4b9da4 libsystem_kernel.dylib`__kill + 8 + frame #1: 0x000000010087f5c4 libpython3.13.dylib`os_kill + 104 + frame #2: 0x000000010071374c libpython3.13.dylib`cfunction_vectorcall_FASTCALL + 276 + frame #3: 0x00000001006c1e3c libpython3.13.dylib`PyObject_Vectorcall + 88 + frame #4: 0x00000001007edd1c libpython3.13.dylib`_PyEval_EvalFrameDefault + 23608 + frame #5: 0x00000001007e7e6c libpython3.13.dylib`PyEval_EvalCode + 252 + frame #6: 0x0000000100852944 libpython3.13.dylib`run_eval_code_obj + 180 + frame #7: 0x0000000100852610 libpython3.13.dylib`run_mod + 220 + frame #8: 0x000000010084fa4c libpython3.13.dylib`_PyRun_SimpleFileObject + 868 + frame #9: 0x000000010084f400 libpython3.13.dylib`_PyRun_AnyFileObject + 160 + frame #10: 0x0000000100874ab8 libpython3.13.dylib`pymain_run_file + 336 + frame #11: 0x0000000100874324 libpython3.13.dylib`Py_RunMain + 1516 + frame #12: 0x000000010087459c libpython3.13.dylib`pymain_main + 324 + frame #13: 0x000000010087463c libpython3.13.dylib`Py_BytesMain + 40 + frame #14: 0x000000019c152b98 dyld`start + 6076 + (lldb) + +As you can see, the C stack trace is inside of the ``kill`` syscall and an +``lldb`` prompt is active, allowing interactively setting breakpoints. Since the +``os.kill`` call happens after the ``numpy`` module is already fully initialized, this means any breakpoints set inside of ``kill`` will happen *after* ``numpy`` is finished initializing. @@ -410,7 +449,7 @@ the sanitizer as an option. For example, with address sanitizer: You may see memory leaks coming from the Python interpreter, particularly on MacOS. If the memory leak reports are not useful, you can disable leak detection by passing ``detect_leaks=0`` in ``ASAN_OPTIONS``. You can pass more than one -option using a comma-delimited list, like this: +option using a colon-delimited list, like this: .. code-block:: bash From c9e75927a43c5a52fc8ad7c645d8fa5c6c33f8ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Wed, 17 Sep 2025 08:35:15 +0200 Subject: [PATCH 0473/1718] BUG: Fix `dtype` refcount in `__array__` (#29715) * BUG: Fix `dtype` refcount in `__array__` * Consider all possible code paths * Remove else-if branch * Move refcount checks to a separate test * Add code comments * Add missing `Py_DECREF` for error path * Apply review comments --- numpy/_core/src/multiarray/methods.c | 17 +++++++++----- numpy/_core/tests/test_api.py | 35 +++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index f2150d63c496..2d754d7c6e91 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -934,6 +934,11 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } + if (newtype == NULL) { + newtype = PyArray_DESCR(self); + Py_INCREF(newtype); // newtype is owned. + } + /* convert to PyArray_Type */ if (!PyArray_CheckExact(self)) { PyArrayObject *new; @@ -951,6 +956,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) (PyObject *)self ); if (new == NULL) { + Py_DECREF(newtype); return NULL; } self = new; @@ -960,22 +966,21 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) } if (copy == NPY_COPY_ALWAYS) { - if (newtype == NULL) { - newtype = PyArray_DESCR(self); - } - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference Py_DECREF(self); return ret; } else { // copy == NPY_COPY_IF_NEEDED || copy == NPY_COPY_NEVER - if (newtype == NULL || PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + if (PyArray_EquivTypes(PyArray_DESCR(self), newtype)) { + Py_DECREF(newtype); return (PyObject *)self; } if (copy == NPY_COPY_IF_NEEDED) { - ret = PyArray_CastToType(self, newtype, 0); + ret = PyArray_CastToType(self, newtype, 0); // steals newtype reference. Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(newtype); Py_DECREF(self); return NULL; } diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index c2e5bf8909f9..83e0a0179e0a 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -91,7 +91,7 @@ def test_array_array(): # instead we get a array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) - # test array + # test __array__ def custom__array__(self, dtype=None, copy=None): return np.array(100.0, dtype=dtype, copy=copy) @@ -157,6 +157,39 @@ def custom__array__(self, dtype=None, copy=None): assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), np.ones((10, 10), dtype=np.float64)) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + @pytest.mark.parametrize("array", [True, False]) def test_array_impossible_casts(array): # All builtin types can be forcibly cast, at least theoretically, From fbfaf4276a9f101525b9d7696b03d95acf32a0e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 17:21:09 +0000 Subject: [PATCH 0474/1718] MAINT: Bump larsoner/circleci-artifacts-redirector-action Bumps [larsoner/circleci-artifacts-redirector-action](https://github.com/larsoner/circleci-artifacts-redirector-action) from 1.2.0 to 1.3.1. - [Release notes](https://github.com/larsoner/circleci-artifacts-redirector-action/releases) - [Commits](https://github.com/larsoner/circleci-artifacts-redirector-action/compare/839631420e45a08af893032e5a5e8843bf47e8ff...5d358ff96e96429a5c64a969bb4a574555439f4f) --- updated-dependencies: - dependency-name: larsoner/circleci-artifacts-redirector-action dependency-version: 1.3.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/circleci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index 12c51735bf81..eafe61098588 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@839631420e45a08af893032e5a5e8843bf47e8ff # master + uses: larsoner/circleci-artifacts-redirector-action@5d358ff96e96429a5c64a969bb4a574555439f4f # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} From abb8dac07f9364c0e3f3fe943b5ba9d4f6db1ed5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 17 Sep 2025 21:16:04 +0300 Subject: [PATCH 0475/1718] ENH: add a casting option 'same_value' and use it in np.astype (#29129) * start implementing same_value casting * work through more places that check 'cast', add a TODO * add a test, percolate casting closer to inner loops * use SAME_VALUE_CAST flag for one inner loop variant * aligned test of same_value passes. Need more tests * handle unaligned casting with 'same_value' * extend tests to use source-is-complex * fix more interfaces to pass casting around, disallow using 'same_value' in raw_array_assign_scalar and raw_array_wheremasked_assign_scalar * raise in places that have a kwarg casting, besides np.astype * refactor based on review comments * CHAR_MAX,MIN -> SCHAR_MAX,MIN * copy context flags * add 'same_value' to typing stubs * document new feature * test, check exact float->int casting: refactor same_value check into a function * enable astype same_value casting for scalars * typo * fix ptr-to-src_value -> value casting errors * fix linting and docs, ignore warning better * gcc warning is different * fixes from review, typos * fix compile warning ignore and make filter in tests more specific, disallow non-numeric 'same_value' * fix warning filters * emit PyErr inside the loop * macOS can emit FPEs when touching NAN * Fix can-cast logic everywhere for same-value casts (only allow numeric) * reorder and simplify, from review * revert last commit and remove redundant checks * gate and document SAME_VALUE_CASTING for v2.4 * make SAME_VALUE a flag, not an enum * fixes from review * fixes from review * inline inner casting loop and float16 -> float64 calls * typo * fixes for AVX512 and debug builds, inline npy_halfbits_to_doublebits only on specific platforms * use optimize instead of inline --------- Co-authored-by: Matti Picus Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_ufunc.py | 2 +- .../upcoming_changes/29129.enhancement.rst | 7 + doc/source/reference/c-api/array.rst | 7 + numpy/__init__.cython-30.pxd | 1 + numpy/__init__.pxd | 1 + numpy/__init__.pyi | 2 +- numpy/_core/_add_newdocs.py | 18 +- numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/include/numpy/dtype_api.h | 14 +- numpy/_core/include/numpy/ndarraytypes.h | 13 + numpy/_core/include/numpy/numpyconfig.h | 5 +- numpy/_core/meson.build | 3 +- numpy/_core/src/common/array_assign.h | 4 +- .../src/multiarray/_multiarray_tests.c.src | 7 +- .../_core/src/multiarray/array_assign_array.c | 82 +++--- .../src/multiarray/array_assign_scalar.c | 27 +- numpy/_core/src/multiarray/array_method.c | 23 +- numpy/_core/src/multiarray/common.c | 9 - numpy/_core/src/multiarray/conversion_utils.c | 29 +- numpy/_core/src/multiarray/convert.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 50 +++- numpy/_core/src/multiarray/ctors.c | 1 - numpy/_core/src/multiarray/datetime.c | 16 ++ numpy/_core/src/multiarray/datetime_strings.c | 2 +- numpy/_core/src/multiarray/dtype_transfer.c | 10 +- numpy/_core/src/multiarray/dtype_transfer.h | 16 +- .../multiarray/legacy_dtype_implementation.c | 8 +- .../multiarray/lowlevel_strided_loops.c.src | 247 +++++++++++++++++- numpy/_core/src/multiarray/mapping.c | 3 +- numpy/_core/src/multiarray/methods.c | 13 +- numpy/_core/src/umath/legacy_array_method.c | 9 +- numpy/_core/src/umath/reduction.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 56 ++-- numpy/_core/tests/test_casting_unittests.py | 146 ++++++++++- numpy/_core/tests/test_conversion_utils.py | 5 +- numpy/_core/tests/test_datetime.py | 4 + numpy/_core/tests/test_einsum.py | 5 + numpy/_core/tests/test_shape_base.py | 5 + 38 files changed, 703 insertions(+), 154 deletions(-) create mode 100644 doc/release/upcoming_changes/29129.enhancement.rst diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 155e7d4f7421..ac978981faba 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -304,7 +304,7 @@ def time_ndarray_dlp(self, methname, npdtypes): class NDArrayAsType(Benchmark): """ Benchmark for type conversion """ - params = [list(itertools.combinations(TYPES1, 2))] + params = [list(itertools.product(TYPES1, TYPES1))] param_names = ['typeconv'] timeout = 10 diff --git a/doc/release/upcoming_changes/29129.enhancement.rst b/doc/release/upcoming_changes/29129.enhancement.rst new file mode 100644 index 000000000000..9a14f13c1f4a --- /dev/null +++ b/doc/release/upcoming_changes/29129.enhancement.rst @@ -0,0 +1,7 @@ +``'same_value'`` for casting by value +------------------------------------- +The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual +values can be round-trip cast without changing value. Currently it is only +implemented in `ndarray.astype`. This will raise a ``ValueError`` if any of the +values in the array would change as a result of the cast, including rounding of +floats or overflowing of ints. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 667b9948a3d9..95f5cc033b4d 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4492,5 +4492,12 @@ Enumerated Types Allow any cast, no matter what kind of data loss may occur. +.. c:macro:: NPY_SAME_VALUE_CASTING + + Error if any values change during a cast. Currently + supported only in ``ndarray.astype(... casting='same_value')`` + + .. versionadded:: 2.4 + .. index:: pair: ndarray; C-API diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 86c91cf617a5..c71898626070 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -156,6 +156,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index eb0764126116..40a24b6c7cc1 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -165,6 +165,7 @@ cdef extern from "numpy/arrayobject.h": NPY_SAFE_CASTING NPY_SAME_KIND_CASTING NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING ctypedef enum NPY_CLIPMODE: NPY_CLIP diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 39b256474b3f..caa5b6f5a724 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -940,7 +940,7 @@ _DTypeBuiltinKind: TypeAlias = L[0, 1, 2] _ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] _OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None _OrderACF: TypeAlias = L["A", "C", "F"] | None diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index ad7029deaa0f..264bb8e25e7c 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -3212,7 +3212,7 @@ 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. - casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. @@ -3222,6 +3222,12 @@ * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. @@ -3244,6 +3250,9 @@ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow Examples -------- @@ -3255,6 +3264,13 @@ >>> x.astype(int) array([1, 2, 2]) + >>> x.astype(int, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + + >>> x[:2].astype(int, casting="same_value") + array([1, 2]) """)) diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 0d642d760b21..a04dd784c67f 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -79,5 +79,6 @@ # Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 # Version 20 (NumPy 2.3.0) -# Version 20 (NumPy 2.4.0) No change 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) Add 'same_value' casting, header additions +0x00000015 = e56b74d32a934d085e7c3414cb9999b8, diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index b37c9fbb6821..8c3ff720b372 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -99,6 +99,11 @@ typedef enum { } NPY_ARRAYMETHOD_FLAGS; +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + typedef struct PyArrayMethod_Context_tag { /* The caller, which is typically the original ufunc. May be NULL */ PyObject *caller; @@ -107,7 +112,15 @@ typedef struct PyArrayMethod_Context_tag { /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; /* Structure may grow (this is harmless for DType authors) */ + #endif } PyArrayMethod_Context; @@ -144,7 +157,6 @@ typedef struct { #define NPY_METH_contiguous_indexed_loop 9 #define _NPY_METH_static_data 10 - /* * The resolve descriptors function, must be able to handle NULL values for * all output (but not input) `given_descrs` and fill `loop_descrs`. diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index ecd5d9724528..f740788f3720 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -233,6 +233,16 @@ typedef enum { NPY_KEEPORDER=2 } NPY_ORDER; +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + /* For specifying allowed casting in operations which support it */ typedef enum { _NPY_ERROR_OCCURRED_IN_CAST = -1, @@ -246,6 +256,9 @@ typedef enum { NPY_SAME_KIND_CASTING=3, /* Allow any casts */ NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif } NPY_CASTING; typedef enum { diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 52d7e2b5d7d7..c129a3aceb6d 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -84,6 +84,7 @@ #define NPY_2_1_API_VERSION 0x00000013 #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 /* @@ -172,8 +173,10 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 61a9d53a42d0..e05f25da39ca 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -50,7 +50,8 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.1.x # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x -C_API_VERSION = '0x00000014' +# 0x00000015 - 2.4.x +C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. diff --git a/numpy/_core/src/common/array_assign.h b/numpy/_core/src/common/array_assign.h index 8a28ed1d3a01..cc5f044ef080 100644 --- a/numpy/_core/src/common/array_assign.h +++ b/numpy/_core/src/common/array_assign.h @@ -46,7 +46,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data); + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting); /* * Assigns the scalar value to every element of the destination raw array @@ -59,7 +59,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides); + npy_intp const *wheremask_strides, NPY_CASTING casting); /******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 4b58b2789e65..8b0c4b3f85d1 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2170,12 +2170,17 @@ run_casting_converter(PyObject* NPY_UNUSED(self), PyObject *args) if (!PyArg_ParseTuple(args, "O&", PyArray_CastingConverter, &casting)) { return NULL; } - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return PyUnicode_FromString("NPY_NO_CASTING"); case NPY_EQUIV_CASTING: return PyUnicode_FromString("NPY_EQUIV_CASTING"); case NPY_SAFE_CASTING: return PyUnicode_FromString("NPY_SAFE_CASTING"); case NPY_SAME_KIND_CASTING: return PyUnicode_FromString("NPY_SAME_KIND_CASTING"); case NPY_UNSAFE_CASTING: return PyUnicode_FromString("NPY_UNSAFE_CASTING"); + case NPY_SAME_VALUE_CASTING: return PyUnicode_FromString("NPY_SAME_VALUE_CASTING"); + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: return PyUnicode_FromString("NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG"); default: return PyLong_FromLong(casting); } } diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 8886d1cacb40..306ed07b0ace 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -29,6 +29,8 @@ #include "umathmodule.h" +#define NPY_ALIGNED_CASTING_FLAG 1 + /* * Check that array data is both uint-aligned and true-aligned for all array * elements, as required by the copy/casting code in lowlevel_strided_loops.c @@ -79,7 +81,8 @@ copycast_isaligned(int ndim, npy_intp const *shape, NPY_NO_EXPORT int raw_array_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides) + PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, + int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -87,14 +90,11 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp src_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareTwoRawArrayIter( ndim, shape, @@ -120,21 +120,25 @@ raw_array_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], src_dtype, dst_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier((char*)&src_data); } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + /* Ensure number of elements exceeds threshold for threading */ - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; @@ -144,11 +148,14 @@ raw_array_assign_array(int ndim, npy_intp const *shape, npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -158,7 +165,7 @@ raw_array_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier((char*)&src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; @@ -183,7 +190,7 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, int flags) { int idim; npy_intp shape_it[NPY_MAXDIMS]; @@ -192,14 +199,11 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, npy_intp wheremask_strides_it[NPY_MAXDIMS]; npy_intp coord[NPY_MAXDIMS]; - int aligned; + int aligned = (flags & NPY_ALIGNED_CASTING_FLAG) != 0; + int same_value_cast = (flags & NPY_SAME_VALUE_CASTING_FLAG) != 0; NPY_BEGIN_THREADS_DEF; - aligned = - copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) && - copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides); - /* Use raw iteration with no heap allocation */ if (PyArray_PrepareThreeRawArrayIter( ndim, shape, @@ -229,39 +233,45 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, /* Get the function to do the casting */ NPY_cast_info cast_info; - NPY_ARRAYMETHOD_FLAGS flags; + NPY_ARRAYMETHOD_FLAGS method_flags; if (PyArray_GetMaskedDTypeTransferFunction(aligned, src_strides_it[0], dst_strides_it[0], wheremask_strides_it[0], src_dtype, dst_dtype, wheremask_dtype, 0, - &cast_info, &flags) != NPY_SUCCEED) { + &cast_info, &method_flags) != NPY_SUCCEED) { return -1; } + if (same_value_cast) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { npy_clear_floatstatus_barrier(src_data); } - if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + if (!(method_flags & NPY_METH_REQUIRES_PYAPI)) { npy_intp nitems = 1, i; for (i = 0; i < ndim; i++) { nitems *= shape_it[i]; } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + npy_intp strides[2] = {src_strides_it[0], dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { PyArray_MaskedStridedUnaryOp *stransfer; stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, - args, &shape_it[0], strides, - (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + result = stransfer(&cast_info.context, + args, &shape_it[0], strides, + (npy_bool *)wheremask_data, wheremask_strides_it[0], + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape_it, @@ -272,15 +282,13 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape, NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); - if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + if (!(method_flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { int fpes = npy_get_floatstatus_barrier(src_data); if (fpes && PyUFunc_GiveFloatingpointErrors("cast", fpes) < 0) { return -1; } } - return 0; - fail: NPY_END_THREADS; NPY_cast_info_xfree(&cast_info); @@ -307,7 +315,6 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, NPY_CASTING casting) { int copied_src = 0; - npy_intp src_strides[NPY_MAXDIMS]; /* Use array_assign_scalar if 'src' NDIM is 0 */ @@ -438,12 +445,21 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, } } + int flags = (NPY_SAME_VALUE_CASTING_FLAG & casting); + if (copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), + PyArray_DATA(dst), PyArray_STRIDES(dst)) && + copycast_isaligned(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(src), + PyArray_DATA(src), src_strides)) { + /* NPY_ALIGNED_CASTING_FLAG is internal to this file */ + flags |= NPY_ALIGNED_CASTING_FLAG; + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ if (raw_array_assign_array(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - PyArray_DESCR(src), PyArray_DATA(src), src_strides) < 0) { + PyArray_DESCR(src), PyArray_DATA(src), src_strides, flags) < 0){ goto fail; } } @@ -465,7 +481,7 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), PyArray_DESCR(src), PyArray_DATA(src), src_strides, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, flags) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index 0199ba969eb9..f7d04ed0a39f 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -37,7 +37,7 @@ NPY_NO_EXPORT int raw_array_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, - PyArray_Descr *src_dtype, char *src_data) + PyArray_Descr *src_dtype, char *src_data, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -86,13 +86,19 @@ raw_array_assign_scalar(int ndim, npy_intp const *shape, NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } + npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ char *args[2] = {src_data, dst_data}; - if (cast_info.func(&cast_info.context, - args, &shape_it[0], strides, cast_info.auxdata) < 0) { + result = cast_info.func(&cast_info.context, + args, &shape_it[0], strides, cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, @@ -126,7 +132,7 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides, PyArray_Descr *src_dtype, char *src_data, PyArray_Descr *wheremask_dtype, char *wheremask_data, - npy_intp const *wheremask_strides) + npy_intp const *wheremask_strides, NPY_CASTING casting) { int idim; npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS]; @@ -177,8 +183,12 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, } NPY_BEGIN_THREADS_THRESHOLDED(nitems); } + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) != 0) { + cast_info.context.flags |= NPY_SAME_VALUE_CONTEXT_FLAG; + } npy_intp strides[2] = {0, dst_strides_it[0]}; + int result = 0; NPY_RAW_ITER_START(idim, ndim, coord, shape_it) { /* Process the innermost dimension */ @@ -186,10 +196,11 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape, stransfer = (PyArray_MaskedStridedUnaryOp *)cast_info.func; char *args[2] = {src_data, dst_data}; - if (stransfer(&cast_info.context, + result = stransfer(&cast_info.context, args, &shape_it[0], strides, (npy_bool *)wheremask_data, wheremask_strides_it[0], - cast_info.auxdata) < 0) { + cast_info.auxdata); + if (result < 0) { goto fail; } } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it, @@ -298,7 +309,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, /* Do the assignment with raw array iteration */ if (raw_array_assign_scalar(PyArray_NDIM(dst), PyArray_DIMS(dst), PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - src_dtype, src_data) < 0) { + src_dtype, src_data, casting) < 0) { goto fail; } } @@ -319,7 +330,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), src_dtype, src_data, PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { + wheremask_strides, casting) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 5554cad5e2dd..755735ede290 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -39,6 +39,7 @@ #include "convert_datatype.h" #include "common.h" #include "numpy/ufuncobject.h" +#include "dtype_transfer.h" /* @@ -184,12 +185,17 @@ validate_spec(PyArrayMethod_Spec *spec) "not exceed %d. (method: %s)", NPY_MAXARGS, spec->name); return -1; } - switch (spec->casting) { + switch ((int)spec->casting) { case NPY_NO_CASTING: case NPY_EQUIV_CASTING: case NPY_SAFE_CASTING: case NPY_SAME_KIND_CASTING: case NPY_UNSAFE_CASTING: + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: break; default: if (spec->casting != -1) { @@ -668,10 +674,11 @@ boundarraymethod__resolve_descripors( if (!parametric) { /* * Non-parametric can only mismatch if it switches from equiv to no - * (e.g. due to byteorder changes). + * (e.g. due to byteorder changes). Throw away same_value casting flag */ + int method_casting = self->method->casting & ~NPY_SAME_VALUE_CASTING_FLAG; if (cast != self->method->casting && - self->method->casting != NPY_EQUIV_CASTING) { + method_casting != NPY_EQUIV_CASTING) { PyErr_Format(PyExc_RuntimeError, "resolve_descriptors cast level changed even though " "the cast is non-parametric where the only possible " @@ -792,11 +799,10 @@ boundarraymethod__simple_strided_call( return NULL; } - PyArrayMethod_Context context = { - .caller = NULL, - .method = self->method, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.method = self->method; + PyArrayMethod_StridedLoop *strided_loop = NULL; NpyAuxData *loop_data = NULL; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -984,3 +990,4 @@ NPY_NO_EXPORT PyTypeObject PyBoundArrayMethod_Type = { .tp_methods = boundarraymethod_methods, .tp_getset = boundarraymethods_getters, }; + diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 8236ec5c65ae..4d1d9c238418 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -25,15 +25,6 @@ * variable is misnamed, but it's part of the public API so I'm not sure we * can just change it. Maybe someone should try and see if anyone notices. */ -/* - * In numpy 1.6 and earlier, this was NPY_UNSAFE_CASTING. In a future - * release, it will become NPY_SAME_KIND_CASTING. Right now, during the - * transitional period, we continue to follow the NPY_UNSAFE_CASTING rules (to - * avoid breaking people's code), but we also check for whether the cast would - * be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a - * warning (that people's code will be broken in a future release.) - */ - NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d487aa16727d..164aa2e4c8b4 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -911,7 +911,7 @@ PyArray_CorrelatemodeConverter(PyObject *object, NPY_CORRELATEMODE *val) } } -static int casting_parser(char const *str, Py_ssize_t length, void *data) +static int casting_parser_full(char const *str, Py_ssize_t length, void *data, int can_use_same_value) { NPY_CASTING *casting = (NPY_CASTING *)data; if (length < 2) { @@ -941,6 +941,10 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) *casting = NPY_SAME_KIND_CASTING; return 0; } + if (can_use_same_value && length == 10 && strcmp(str, "same_value") == 0) { + *casting = NPY_SAME_VALUE_CASTING; + return 0; + } break; case 's': if (length == 6 && strcmp(str, "unsafe") == 0) { @@ -952,6 +956,11 @@ static int casting_parser(char const *str, Py_ssize_t length, void *data) return -1; } +static int casting_parser(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 0); +} + /*NUMPY_API * Convert any Python object, *obj*, to an NPY_CASTING enum. */ @@ -961,10 +970,26 @@ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting) return string_converter_helper( obj, (void *)casting, casting_parser, "casting", "must be one of 'no', 'equiv', 'safe', " - "'same_kind', or 'unsafe'"); + "'same_kind', 'unsafe'"); + return 0; +} + +static int casting_parser_same_value(char const *str, Py_ssize_t length, void *data) +{ + return casting_parser_full(str, length, data, 1); +} + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting) +{ + return string_converter_helper( + obj, (void *)casting, casting_parser_same_value, "casting", + "must be one of 'no', 'equiv', 'safe', " + "'same_kind', 'unsafe', 'same_value'"); return 0; } + /***************************** * Other conversion functions *****************************/ diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 8e0177616955..983d9bc19ce6 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -429,7 +429,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) int retcode = raw_array_assign_scalar( PyArray_NDIM(arr), PyArray_DIMS(arr), descr, PyArray_BYTES(arr), PyArray_STRIDES(arr), - descr, (void *)value); + descr, (void *)value, NPY_UNSAFE_CASTING); if (PyDataType_REFCHK(descr)) { PyArray_ClearBuffer(descr, (void *)value, 0, 1, 1); diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index d34d852a706b..dbab8b4253d8 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -260,6 +260,10 @@ _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args) * Supports the NPY_CAST_IS_VIEW check, and should be preferred to allow * extending cast-levels if necessary. * It is not valid for one of the arguments to be -1 to indicate an error. + * Pass through NPY_SAME_VALUE_CASTING_FLAG on casting1, unless both have the + * flag, in which case return max_casting | NPY_SAME_VALUE_CASTING_FLAG. + * Usually this will be exactly NPY_SAME_VALUE_CASTING, but the logic here + * should handle other 'casting with same_value' options * * @param casting1 First (left-hand) casting level to compare * @param casting2 Second (right-hand) casting level to compare @@ -271,11 +275,14 @@ PyArray_MinCastSafety(NPY_CASTING casting1, NPY_CASTING casting2) if (casting1 < 0 || casting2 < 0) { return -1; } + int both_same_casting = casting1 & casting2 & NPY_SAME_VALUE_CASTING_FLAG; + casting1 &= ~NPY_SAME_VALUE_CASTING_FLAG; + casting2 &= ~NPY_SAME_VALUE_CASTING_FLAG; /* larger casting values are less safe */ if (casting1 > casting2) { - return casting1; + return casting1 | both_same_casting; } - return casting2; + return casting2 | both_same_casting; } @@ -746,13 +753,13 @@ can_cast_pyscalar_scalar_to( } else if (PyDataType_ISFLOAT(to)) { if (flags & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } else if (PyDataType_ISINTEGER(to)) { if (!(flags & NPY_ARRAY_WAS_PYTHON_INT)) { - return casting == NPY_UNSAFE_CASTING; + return ((casting == NPY_UNSAFE_CASTING) || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)); } return 1; } @@ -828,7 +835,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, NPY_NO_EXPORT const char * npy_casting_to_string(NPY_CASTING casting) { - switch (casting) { + switch ((int)casting) { case NPY_NO_CASTING: return "'no'"; case NPY_EQUIV_CASTING: @@ -839,6 +846,16 @@ npy_casting_to_string(NPY_CASTING casting) return "'same_kind'"; case NPY_UNSAFE_CASTING: return "'unsafe'"; + case NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'no and same_value'"; + case NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'equiv and same_value'"; + case NPY_SAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'safe and same_value'"; + case NPY_SAME_KIND_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_kind and same_value'"; + case NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG: + return "'same_value'"; default: return ""; } @@ -2116,9 +2133,9 @@ legacy_same_dtype_resolve_descriptors( if (PyDataType_ISNOTSWAPPED(loop_descrs[0]) == PyDataType_ISNOTSWAPPED(loop_descrs[1])) { *view_offset = 0; - return NPY_NO_CASTING; + return NPY_NO_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } - return NPY_EQUIV_CASTING; + return NPY_EQUIV_CASTING | NPY_SAME_VALUE_CASTING_FLAG; } @@ -2305,6 +2322,7 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) if (dtypes[0]->singleton->kind == dtypes[1]->singleton->kind && from_itemsize == to_itemsize) { spec.casting = NPY_EQUIV_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; /* When there is no casting (equivalent C-types) use byteswap loops */ slots[0].slot = NPY_METH_resolve_descriptors; @@ -2319,13 +2337,17 @@ add_numeric_cast(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) } else if (_npy_can_cast_safely_table[from->type_num][to->type_num]) { spec.casting = NPY_SAFE_CASTING; - } - else if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= - dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { - spec.casting = NPY_SAME_KIND_CASTING; + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } else { - spec.casting = NPY_UNSAFE_CASTING; + if (dtype_kind_to_ordering(dtypes[0]->singleton->kind) <= + dtype_kind_to_ordering(dtypes[1]->singleton->kind)) { + spec.casting = NPY_SAME_KIND_CASTING; + } + else { + spec.casting = NPY_UNSAFE_CASTING; + } + spec.casting |= NPY_SAME_VALUE_CASTING_FLAG; } /* Create a bound method, unbind and store it */ @@ -2463,10 +2485,10 @@ cast_to_string_resolve_descriptors( return -1; } - if (self->casting == NPY_UNSAFE_CASTING) { + if ((self->casting == NPY_UNSAFE_CASTING) || ((self->casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)){ assert(dtypes[0]->type_num == NPY_UNICODE && dtypes[1]->type_num == NPY_STRING); - return NPY_UNSAFE_CASTING; + return self->casting; } if (loop_descrs[1]->elsize >= size) { diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5bcf1cdf7ba3..b7f0dcb521bb 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2830,7 +2830,6 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order) count = (src_count < dst_count) ? src_count : dst_count; if (cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata) < 0) { - res = -1; break; } diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 0dac36a0903b..d7d56d086669 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -1233,6 +1233,10 @@ can_cast_datetime64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* TODO: support this */ + return 0; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1278,6 +1282,10 @@ can_cast_timedelta64_units(NPY_DATETIMEUNIT src_unit, NPY_DATETIMEUNIT dst_unit, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { /* Allow anything with unsafe casting */ case NPY_UNSAFE_CASTING: @@ -1325,6 +1333,10 @@ can_cast_datetime64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Force SAFE_CASTING */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; @@ -1352,6 +1364,10 @@ can_cast_timedelta64_metadata(PyArray_DatetimeMetaData *src_meta, PyArray_DatetimeMetaData *dst_meta, NPY_CASTING casting) { + if ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + /* Use SAFE_CASTING, which implies SAME_VALUE */ + casting = NPY_SAFE_CASTING; + } switch (casting) { case NPY_UNSAFE_CASTING: return 1; diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index f92eec3f5a59..97f24cfe821e 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -984,7 +984,7 @@ NpyDatetime_MakeISO8601Datetime( * the string representation, so ensure that the data * is being cast according to the casting rule. */ - if (casting != NPY_UNSAFE_CASTING) { + if ((casting != NPY_UNSAFE_CASTING) && ((casting & NPY_SAME_VALUE_CASTING_FLAG) == 0)) { /* Producing a date as a local time is always 'unsafe' */ if (base <= NPY_FR_D && local) { PyErr_SetString(PyExc_TypeError, "Cannot create a local " diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 64d5bfa89e8e..dbad10842aff 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -2910,8 +2910,6 @@ _clear_cast_info_after_get_loop_failure(NPY_cast_info *cast_info) * TODO: Expand the view functionality for general offsets, not just 0: * Partial casts could be skipped also for `view_offset != 0`. * - * The `out_needs_api` flag must be initialized. - * * NOTE: In theory casting errors here could be slightly misleading in case * of a multi-step casting scenario. It should be possible to improve * this in the future. @@ -3428,11 +3426,13 @@ PyArray_CastRawArrays(npy_intp count, /* Cast */ char *args[2] = {src, dst}; npy_intp strides[2] = {src_stride, dst_stride}; - cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); + int result = cast_info.func(&cast_info.context, args, &count, strides, cast_info.auxdata); /* Cleanup */ NPY_cast_info_xfree(&cast_info); - + if (result < 0) { + return NPY_FAIL; + } if (flags & NPY_METH_REQUIRES_PYAPI && PyErr_Occurred()) { return NPY_FAIL; } @@ -3831,4 +3831,4 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape, *out_dataC = dataC; *out_ndim = ndim; return 0; -} \ No newline at end of file +} diff --git a/numpy/_core/src/multiarray/dtype_transfer.h b/numpy/_core/src/multiarray/dtype_transfer.h index 04df5cb64c22..a354820e5d45 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.h +++ b/numpy/_core/src/multiarray/dtype_transfer.h @@ -25,6 +25,15 @@ typedef struct { } NPY_cast_info; +static inline void +NPY_context_init(PyArrayMethod_Context *context, PyArray_Descr *descr[2]) +{ + context->descriptors = descr; + context->caller = NULL; + context->_reserved = NULL; + context->flags = 0; +} + /* * Create a new cast-info struct with cast_info->context.descriptors linked. * Compilers should inline this to ensure the whole struct is not actually @@ -40,13 +49,9 @@ NPY_cast_info_init(NPY_cast_info *cast_info) * a scratch space to `NPY_cast_info` and link to that instead. */ cast_info->auxdata = NULL; - cast_info->context.descriptors = cast_info->descriptors; - - // TODO: Delete this again probably maybe create a new minimal init macro - cast_info->context.caller = NULL; + NPY_context_init(&(cast_info->context), cast_info->descriptors); } - /* * Free's all references and data held inside the struct (not the struct). * First checks whether `cast_info.func == NULL`, and assume it is @@ -100,6 +105,7 @@ NPY_cast_info_copy(NPY_cast_info *cast_info, NPY_cast_info *original) Py_XINCREF(cast_info->descriptors[1]); cast_info->context.caller = original->context.caller; Py_XINCREF(cast_info->context.caller); + cast_info->context.flags = original->context.flags; cast_info->context.method = original->context.method; Py_XINCREF(cast_info->context.method); if (original->auxdata == NULL) { diff --git a/numpy/_core/src/multiarray/legacy_dtype_implementation.c b/numpy/_core/src/multiarray/legacy_dtype_implementation.c index abfc1bd0e3cd..eee7ce492fab 100644 --- a/numpy/_core/src/multiarray/legacy_dtype_implementation.c +++ b/numpy/_core/src/multiarray/legacy_dtype_implementation.c @@ -367,7 +367,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * field; recurse just in case the single field is itself structured. */ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) { - if (casting == NPY_UNSAFE_CASTING && + if ((casting == NPY_UNSAFE_CASTING || ((casting & NPY_SAME_VALUE_CASTING_FLAG) > 0)) && PyDict_Size(lfrom->fields) == 1) { Py_ssize_t ppos = 0; PyObject *tuple; @@ -399,7 +399,7 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * casting; this is not correct, but needed since the treatment in can_cast * below got out of sync with astype; see gh-13667. */ - if (casting == NPY_UNSAFE_CASTING) { + if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } } @@ -408,14 +408,14 @@ PyArray_LegacyCanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to, * If "from" is a simple data type and "to" has fields, then only * unsafe casting works (and that works always, even to multiple fields). */ - return casting == NPY_UNSAFE_CASTING; + return (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0); } /* * Everything else we consider castable for unsafe for now. * FIXME: ensure what we do here is consistent with "astype", * i.e., deal more correctly with subarrays and user-defined dtype. */ - else if (casting == NPY_UNSAFE_CASTING) { + else if (casting == NPY_UNSAFE_CASTING || (casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { return 1; } /* diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 0a69a08e678e..13a126c53850 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -17,6 +17,7 @@ #include #include #include +#include #include "lowlevel_strided_loops.h" #include "array_assign.h" @@ -24,6 +25,7 @@ #include "usertypes.h" #include "umathmodule.h" +#include "gil_utils.h" /* * x86 platform works with unaligned access but the compiler is allowed to @@ -77,6 +79,57 @@ a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \ } +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return (double)(ToDoubleBits(h)); + #endif +} + /************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/ /**begin repeat @@ -742,6 +795,7 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# + * #is_unsigned1 = 1*6, 0*12# */ /**begin repeat1 @@ -766,6 +820,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * npy_byte, npy_short, npy_int, npy_long, npy_longlong, * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# + * #type2max = 0, + * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, + * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, + * 65500, FLT_MAX, DBL_MAX, LDBL_MAX, + * FLT_MAX, DBL_MAX, LDBL_MAX# + * #type2min = 0, + * 0, 0, 0, 0, 0, + * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, + * -65500, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# @@ -810,40 +874,52 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /* Determine an appropriate casting conversion function */ #if @is_emu_half1@ - +# define _TO_RTYPE1(x) npy_half_to_float(x) # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) +# define _ROUND_TRIP(x) npy_floatbits_to_halfbits(_CONVERT_FN(x)) # elif @is_double2@ -# define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) +# define _CONVERT_FN(x) _npy_halfbits_to_doublebits(x) +# define _ROUND_TRIP(x) npy_doublebits_to_halfbits(_CONVERT_FN(x)) # elif @is_emu_half2@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)(!npy_half_iszero(x))) # else # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) +# define _ROUND_TRIP(x) npy_float_to_half((float)_CONVERT_FN(x)) # endif #elif @is_emu_half2@ +# define _TO_RTYPE1(x) (@rtype1@)(x) # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) +# define _ROUND_TRIP(x) (@rtype1@)npy_halfbits_to_floatbits(_CONVERT_FN(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) +# define _ROUND_TRIP(x) (@rtype1@)_npy_halfbits_to_doublebits(_CONVERT_FN(x)) # elif @is_emu_half1@ # define _CONVERT_FN(x) (x) +# define _ROUND_TRIP(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) +# define _ROUND_TRIP(x) (x) # else # define _CONVERT_FN(x) npy_float_to_half((float)x) +# define _ROUND_TRIP(x) ((@rtype1@)npy_half_to_float(_CONVERT_FN(x))) # endif #else - # if @is_bool2@ || @is_bool1@ # define _CONVERT_FN(x) ((npy_bool)(x != 0)) # else # define _CONVERT_FN(x) ((_TYPE2)x) # endif +# define _TO_RTYPE1(x) (@rtype1@)(x) +# define _ROUND_TRIP(x) _TO_RTYPE1(_CONVERT_FN(x)) #endif @@ -869,8 +945,109 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 #endif +#define _RETURN_SAME_VALUE_FAILURE \ + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@"); \ + return -1 + +#if !@is_bool2@ +/* + * Check various modes of failure to accurately cast src_value to dst + */ +static GCC_CAST_OPT_LEVEL int +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ src_value) { + + /* 1. NaN/Infs always work for float to float and otherwise never */ +#if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) + if (!npy_isfinite(_TO_RTYPE1(src_value))) { +# if (@is_float2@ || @is_emu_half2@ || @is_double2@ || @is_native_half2@) + return 0; /* float to float can preserve NaN/Inf */ +# else + _RETURN_SAME_VALUE_FAILURE; /* cannot preserve NaN/Inf */ +# endif + } +#endif + /* + * 2. Check that the src does not overflow the dst. + * This is complicated by a warning that, for instance, int8 cannot + * overflow int64max + */ +# ifdef __GNUC__ +# pragma GCC diagnostic push +# ifdef __clang__ +# pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +# endif +# pragma GCC diagnostic ignored "-Wtautological-compare" +# endif +# if !@is_bool1@ + if (_TO_RTYPE1(src_value) > @type2max@) { + _RETURN_SAME_VALUE_FAILURE; + } +# if !@is_unsigned1@ + if (_TO_RTYPE1(src_value) < @type2min@) { + _RETURN_SAME_VALUE_FAILURE; + } +# endif +# endif /* !is_bool1 */ + /* 3. Check that the value can round trip exactly */ + if (src_value != _ROUND_TRIP(src_value)) { + _RETURN_SAME_VALUE_FAILURE; + } +# ifdef __GNUC__ +# pragma GCC diagnostic pop +# endif /* __GNUC__ */ + return 0; +} +#endif + +/* + * Use a declaration instead of moving the function definition to here to make reviewing + * easier. TODO: move the repeat3 up here instead of these declarations + */ + +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_no_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); + +#if !@is_bool2@ +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_same_value( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *NPY_UNUSED(data)); +#endif + +/* + * This is the entry point function called outside this file + */ + static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( + PyArrayMethod_Context *context, char *const *args, + const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *data) +{ +#if !@is_bool2@ + int same_value_casting = ((context->flags & NPY_SAME_VALUE_CONTEXT_FLAG) == NPY_SAME_VALUE_CONTEXT_FLAG); + if (same_value_casting) { + return @prefix@_cast_@name1@_to_@name2@_same_value(context, args, dimensions, strides, data); + } else { +#else + { +#endif + return @prefix@_cast_@name1@_to_@name2@_no_same_value(context, args, dimensions, strides, data); +}} + +/**begin repeat3 + * #func_name = no_same_value,same_value# + * #same_value = 0,1# + */ + + +#if !(@is_bool2@ && @same_value@) +static GCC_CAST_OPT_LEVEL int +@prefix@_cast_@name1@_to_@name2@_@func_name@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, NpyAuxData *NPY_UNUSED(data)) @@ -898,7 +1075,7 @@ static GCC_CAST_OPT_LEVEL int assert(N == 0 || npy_is_aligned(dst, NPY_ALIGNOF(_TYPE2))); #endif - /*printf("@prefix@_cast_@name1@_to_@name2@\n");*/ + /* printf("@prefix@_cast_@name1@_to_@name2@_@func_name@, N=%ld\n", N); */ while (N--) { #if @aligned@ @@ -915,31 +1092,81 @@ static GCC_CAST_OPT_LEVEL int # if @is_complex2@ dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); -# elif !@aligned@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[1]) < 0) { + return -1; + } +# endif //same_value +# elif !@aligned@ # if @is_bool2@ dst_value = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else dst_value = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # else # if @is_bool2@ *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]) || _CONVERT_FN(src_value[1]); # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + return -1; + } + if (src_value[1] != 0) { + npy_gil_error(PyExc_ValueError, "could not cast 'same_value' @name1@ to @name2@: imag is not 0"); + return -1; + } +# endif //same_value # endif # endif -#else +#else // @is_complex1@ # if @is_complex2@ # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); -# else +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)src) < 0) { + return -1; + } +# endif //same_value # endif dst_value[1] = 0; # elif !@aligned@ dst_value = _CONVERT_FN(src_value); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # else *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); +# if !@is_bool2@ +# if @same_value@ + if (@prefix@_check_same_value_@name1@_to_@name2@(*((@rtype1@ *)src)) < 0) { + return -1; + } +# endif //same_value +# endif // @is_bool2@ # endif #endif @@ -962,6 +1189,9 @@ static GCC_CAST_OPT_LEVEL int } return 0; } +#endif // !@is_bool2@ + +/**end repeat3**/ #if @is_native_half1@ || @is_float1@ || @is_double1@ #if @is_native_half2@ || @is_float2@ || @is_double2@ @@ -977,6 +1207,9 @@ static GCC_CAST_OPT_LEVEL int #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 +#undef _TO_RTYPE1 +#undef _ROUND_TRIP +#undef _RETURN_SAME_VALUE_FAILURE #endif diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 28d6a5a26938..997f21928db3 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2169,7 +2169,8 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) * Could add a casting check, but apparently most assignments do * not care about safe casting. */ - if (mapiter_set(mit, &cast_info, meth_flags, is_aligned) < 0) { + int result = mapiter_set(mit, &cast_info, meth_flags, is_aligned); + if (result < 0) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 2d754d7c6e91..c856fc52742d 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -752,6 +752,10 @@ array_toscalar(PyArrayObject *self, PyObject *args) return PyArray_MultiIndexGetItem(self, multi_index); } + +NPY_NO_EXPORT int +PyArray_CastingConverterSameValue(PyObject *obj, NPY_CASTING *casting); + static PyObject * array_astype(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) @@ -770,7 +774,7 @@ array_astype(PyArrayObject *self, if (npy_parse_arguments("astype", args, len_args, kwnames, "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverter, &casting, + "|casting", &PyArray_CastingConverterSameValue, &casting, "|subok", &PyArray_PythonPyIntFromInt, &subok, "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, NULL, NULL, NULL) < 0) { @@ -840,7 +844,12 @@ array_astype(PyArrayObject *self, ((PyArrayObject_fields *)ret)->nd = PyArray_NDIM(self); ((PyArrayObject_fields *)ret)->descr = dtype; } - int success = PyArray_CopyInto(ret, self); + int success; + if (((int)casting & NPY_SAME_VALUE_CASTING_FLAG) > 0) { + success = PyArray_AssignArray(ret, self, NULL, casting); + } else { + success = PyArray_AssignArray(ret, self, NULL, NPY_UNSAFE_CASTING); + } Py_DECREF(dtype); ((PyArrayObject_fields *)ret)->nd = out_ndim; diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 705262fedd38..7a85937fcc8f 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -439,11 +439,10 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, descrs[i] = bound_res->dtypes[i]->singleton; } - PyArrayMethod_Context context = { - (PyObject *)ufunc, - bound_res->method, - descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = bound_res->method; int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index b376b94936bc..384ac052b226 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -372,7 +372,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, PyArray_NDIM(result), PyArray_DIMS(result), PyArray_DESCR(result), PyArray_BYTES(result), PyArray_STRIDES(result), - op_dtypes[0], initial_buf); + op_dtypes[0], initial_buf, NPY_UNSAFE_CASTING); if (ret < 0) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6b8cc1789f94..75e651c29088 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2085,11 +2085,10 @@ PyUFunc_GeneralizedFunctionInternal(PyUFuncObject *ufunc, NPY_SIZEOF_INTP * nop); /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayMethod_StridedLoop *strided_loop; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -2204,11 +2203,10 @@ PyUFunc_GenericFunctionInternal(PyUFuncObject *ufunc, } /* Final preparation of the arraymethod call */ - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Do the ufunc loop */ if (wheremask != NULL) { @@ -2554,11 +2552,10 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, return NULL; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; PyArrayObject *result = PyUFunc_ReduceWrapper(&context, arr, out, wheremask, axis_flags, keepdims, @@ -2630,12 +2627,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, assert(PyArray_EquivTypes(descrs[0], descrs[1]) && PyArray_EquivTypes(descrs[0], descrs[2])); - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -3062,12 +3057,10 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, goto fail; } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = descrs, - }; - + PyArrayMethod_Context context; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc, + context.method = ufuncimpl, ndim = PyArray_NDIM(arr); #if NPY_UF_DBG_TRACING @@ -5918,11 +5911,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) } } - PyArrayMethod_Context context = { - .caller = (PyObject *)ufunc, - .method = ufuncimpl, - .descriptors = operation_descrs, - }; + PyArrayMethod_Context context; + NPY_context_init(&context, operation_descrs); + context.caller = (PyObject *)ufunc; + context.method = ufuncimpl; /* Use contiguous strides; if there is such a loop it may be faster */ npy_intp strides[3] = { diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 91ecc0dc75b0..5f643f8045ba 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -10,13 +10,14 @@ import enum import random import textwrap +import warnings import pytest import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided -from numpy.testing import assert_array_equal +from numpy.testing import assert_array_equal, assert_equal # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" @@ -76,8 +77,11 @@ class Casting(enum.IntEnum): safe = 2 same_kind = 3 unsafe = 4 + same_value = 64 +same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG") + def _get_cancast_table(): table = textwrap.dedent(""" X ? b h i l q B H I L Q e f d g F D G S U V O M m @@ -117,6 +121,9 @@ def _get_cancast_table(): cancast[from_dt] = {} for to_dt, c in zip(dtypes, row[2::2]): cancast[from_dt][to_dt] = convert_cast[c] + # Of the types checked, numeric cast support same-value + if from_dt in same_value_dtypes and to_dt in same_value_dtypes: + cancast[from_dt][to_dt] |= Casting.same_value return cancast @@ -272,9 +279,11 @@ def test_simple_cancast(self, from_Dt): if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. - assert casting == Casting.no - # The above table lists this as "equivalent" - assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + assert casting == Casting.no | Casting.same_value + # The above table lists this as "equivalent", perhaps + # with "same_value" + v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value + assert Casting.equiv == v # Note that to_res may not be the same as from_dt assert from_res.isnative == to_res.isnative else: @@ -304,6 +313,7 @@ def test_simple_direct_casts(self, from_dt): to_dt = to_dt.values[0] cast = get_castingimpl(type(from_dt), type(to_dt)) + # print("from_dt", from_dt, "to_dt", to_dt) casting, (from_res, to_res), view_off = cast._resolve_descriptors( (from_dt, to_dt)) @@ -317,7 +327,9 @@ def test_simple_direct_casts(self, from_dt): arr1, arr2, values = self.get_data(from_dt, to_dt) + # print("2", arr1, arr2, cast) cast._simple_strided_call((arr1, arr2)) + # print("3") # Check via python list assert arr2.tolist() == values @@ -815,3 +827,129 @@ def test_nonstandard_bool_to_other(self, dtype): res = nonstandard_bools.astype(dtype) expected = [0, 1, 1] assert_array_equal(res, expected) + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_overflow(self, from_dtype, to_dtype): + if from_dtype == to_dtype: + return + top1 = 0 + top2 = 0 + try: + top1 = np.iinfo(from_dtype).max + except ValueError: + top1 = np.finfo(from_dtype).max + try: + top2 = np.iinfo(to_dtype).max + except ValueError: + top2 = np.finfo(to_dtype).max + # No need to test if top2 > top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index d63ca9e58df5..067c2973c592 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -172,9 +172,12 @@ def test_valid(self): self._check("no", "NPY_NO_CASTING") self._check("equiv", "NPY_EQUIV_CASTING") self._check("safe", "NPY_SAFE_CASTING") - self._check("same_kind", "NPY_SAME_KIND_CASTING") self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") class TestIntpConverter: """ Tests of PyArray_IntpConverter """ diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 21151ed11ba0..18b32ea42da2 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1870,6 +1870,10 @@ def test_datetime_as_string(self): '2032-07-18') assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'), '2032-07-18') + + with pytest.raises(ValueError): + np.datetime_as_string(a, unit='Y', casting='same_value') + assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12') assert_equal(np.datetime_as_string(a, unit='m'), '2032-07-18T12:23') diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 82789eae0679..6403d47034d7 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -79,6 +79,11 @@ def test_einsum_errors(self, do_opt, einsum_fn): b = np.ones((3, 4, 5)) einsum_fn('aabcb,abc', a, b) + with pytest.raises(ValueError): + a = np.arange(3) + # einsum_path does not yet accept kwarg 'casting' + np.einsum('ij->j', [a, a], casting='same_value') + def test_einsum_sorting_behavior(self): # Case 1: 26 dimensions (all lowercase indices) n1 = 26 diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 1b9728e5c006..2d606a2d33fd 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -383,6 +383,11 @@ def test_concatenate(self): assert_(out is rout) assert_equal(res, rout) + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + @pytest.mark.skipif( IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython" From eb354d7f470e263da747044b05dd735093be0374 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 17 Sep 2025 23:57:39 +0200 Subject: [PATCH 0476/1718] TST: Test the ``ScalarType`` ordering assumptions --- numpy/_core/tests/test_numerictypes.py | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c6ecd5327850..5763a964c41d 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -616,6 +616,35 @@ def test_names_are_undersood_by_dtype(self, t): assert np.dtype(t.__name__).type is t +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + class TestBoolDefinition: def test_bool_definition(self): assert nt.bool is np.bool From 0a8763c168335278e2a4728eb1703cd3ca7ef146 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 18 Sep 2025 09:26:40 +0300 Subject: [PATCH 0477/1718] DOC: add another mention of 'same_value' [skip actions][skip azp][skip cirrus] --- doc/source/user/basics.types.rst | 34 +++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index a605d32fcd51..41c100e519db 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -35,18 +35,6 @@ See :ref:`arrays.dtypes.constructing` for more information about specifying and constructing data type objects, including how to specify parameters like the byte order. -To convert the type of an array, use the .astype() method. For example: :: - - >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE - array([0., 1., 2.]) - -Note that, above, we could have used the *Python* float object as a dtype -instead of `numpy.float64`. NumPy knows that -:class:`int` refers to `numpy.int_`, :class:`bool` means -`numpy.bool`, that :class:`float` is `numpy.float64` and -:class:`complex` is `numpy.complex128`. The other data-types do not have -Python equivalents. - To determine the type of an array, look at the dtype attribute:: >>> z.dtype @@ -66,6 +54,28 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False +To convert the type of an array, use the .astype() method. For example: :: + + >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + +Note that, above, we could have used the *Python* float object as a dtype +instead of `numpy.float64`. NumPy knows that +:class:`int` refers to `numpy.int_`, :class:`bool` means +`numpy.bool`, that :class:`float` is `numpy.float64` and +:class:`complex` is `numpy.complex128`. The other data-types do not have +Python equivalents. + +Sometimes the conversion can overflow, for instance when converting a `numpy.int64` value +300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and +become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the +overflow action fail by using ``same_value`` for the ``casting`` argument (see also +:ref:`overflow-errors`): :: + + >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE + array([0., 1., 2.]) + + Numerical Data Types -------------------- From 22cd369bec46b557f9bf38cf8e3a8ff57ac71016 Mon Sep 17 00:00:00 2001 From: DWesl <22566757+DWesl@users.noreply.github.com> Date: Thu, 18 Sep 2025 04:04:04 -0400 Subject: [PATCH 0478/1718] BUG: Include python-including headers first (#29281) Makes sure that Python headers always come first (if implicitly) Also adds a script to check this and a CI run to ensure this. NOTE(seberg): If that script/CI run becomes problematic and not super easy to fix maybe we shouldn't worry too much about even deleting it again. It's wortwhile for sure, but I am not sure it is worthwhile to spend too many cycles making pretty. --- azure-pipelines.yml | 3 + numpy/_core/src/common/binop_override.h | 2 +- numpy/_core/src/common/blas_utils.c | 8 +- numpy/_core/src/common/blas_utils.h | 4 +- numpy/_core/src/multiarray/array_method.c | 2 +- numpy/_core/src/multiarray/common.h | 2 + .../src/multiarray/textreading/conversions.h | 4 +- numpy/_core/src/npysort/npysort_common.h | 2 +- numpy/_core/src/umath/clip.cpp | 2 +- numpy/random/src/mt19937/randomkit.c | 2 + tools/check_python_h_first.py | 254 ++++++++++++++++++ tools/get_submodule_paths.py | 31 +++ 12 files changed, 304 insertions(+), 12 deletions(-) create mode 100755 tools/check_python_h_first.py create mode 100644 tools/get_submodule_paths.py diff --git a/azure-pipelines.yml b/azure-pipelines.yml index af6e5cf52ac4..89dee2267352 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -56,6 +56,9 @@ stages: python tools/linter.py displayName: 'Run Lint Checks' failOnStderr: true + - script: | + python tools/check_python_h_first.py + displayName: 'Check Python.h is first file included' - job: Linux_Python_311_32bit_full_with_asserts pool: diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index a6b4747ca560..e17b147c1d0a 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -1,8 +1,8 @@ #ifndef NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ #define NUMPY_CORE_SRC_COMMON_BINOP_OVERRIDE_H_ -#include #include +#include #include "numpy/arrayobject.h" #include "get_attr_string.h" diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 155963891071..409d3818ae0f 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,3 +1,7 @@ +#include "numpy/npy_math.h" // npy_get_floatstatus_barrier +#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN +#include "blas_utils.h" + #include #include #include @@ -6,10 +10,6 @@ #include #endif -#include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN -#include "numpy/npy_math.h" // npy_get_floatstatus_barrier -#include "blas_utils.h" - #if NPY_BLAS_CHECK_FPE_SUPPORT /* Return whether we're running on macOS 15.4 or later diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 8c1437f88899..34d6321c2920 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -1,7 +1,7 @@ -#include - #include "numpy/numpyconfig.h" // for NPY_VISIBILITY_HIDDEN +#include + /* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check * for floating-point error (FPE) support in BLAS. */ diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index 755735ede290..a0a6c3fda7d6 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -30,8 +30,8 @@ #define _UMATHMODULE #define _MULTIARRAYMODULE -#include #include +#include #include "arrayobject.h" #include "array_coercion.h" #include "array_method.h" diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index a18f74bda71a..d6b9ad36588f 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ #define NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ +#include + #include #include "numpy/npy_common.h" #include "numpy/ndarraytypes.h" diff --git a/numpy/_core/src/multiarray/textreading/conversions.h b/numpy/_core/src/multiarray/textreading/conversions.h index 09f2510413b5..e30b28a9a7af 100644 --- a/numpy/_core/src/multiarray/textreading/conversions.h +++ b/numpy/_core/src/multiarray/textreading/conversions.h @@ -1,12 +1,12 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_TEXTREADING_CONVERSIONS_H_ -#include - #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" +#include + #include "textreading/parser_config.h" NPY_NO_EXPORT int diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 0680ae52afe3..8b7e0ef43f88 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -1,8 +1,8 @@ #ifndef __NPY_SORT_COMMON_H__ #define __NPY_SORT_COMMON_H__ -#include #include +#include #include #include "dtypemeta.h" diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index e051692c6d48..127b019ef8ae 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,7 +1,6 @@ /** * This module provides the inner loops for the clip ufunc */ -#include #define _UMATHMODULE #define _MULTIARRAYMODULE @@ -10,6 +9,7 @@ #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/halffloat.h" #include "numpy/ndarraytypes.h" #include "numpy/npy_common.h" diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c index 32f40fa49cc1..21d270234c9a 100644 --- a/numpy/random/src/mt19937/randomkit.c +++ b/numpy/random/src/mt19937/randomkit.c @@ -62,6 +62,8 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ diff --git a/tools/check_python_h_first.py b/tools/check_python_h_first.py new file mode 100755 index 000000000000..c0d44ad635f4 --- /dev/null +++ b/tools/check_python_h_first.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python +"""Check that Python.h is included before any stdlib headers. + +May be a bit overzealous, but it should get the job done. +""" +import argparse +import fnmatch +import os.path +import re +import subprocess +import sys + +from get_submodule_paths import get_submodule_paths + +HEADER_PATTERN = re.compile( + r'^\s*#\s*include\s*[<"]((?:\w+/)*\w+(?:\.h[hp+]{0,2})?)[>"]\s*$' +) + +PYTHON_INCLUDING_HEADERS = [ + "Python.h", + # This isn't all of Python.h, but it is the visibility macros + "pyconfig.h", + "numpy/npy_common.h", + "numpy/npy_math.h", + "numpy/arrayobject.h", + "numpy/ndarrayobject.h", + "numpy/ndarraytypes.h", + "numpy/random/distributions.h", + "npy_sort.h", + "npy_config.h", + "common.h", + "npy_cpu_features.h", + # Boost::Python + "boost/python.hpp", +] +LEAF_HEADERS = [ + "numpy/numpyconfig.h", + "numpy/npy_os.h", + "numpy/npy_cpu.h", + "numpy/utils.h", +] + +C_CPP_EXTENSIONS = (".c", ".h", ".cpp", ".hpp", ".cc", ".hh", ".cxx", ".hxx") +# check against list in diff_files + +PARSER = argparse.ArgumentParser(description=__doc__) +PARSER.add_argument( + "files", + nargs="*", + help="Lint these files or directories; use **/*.c to lint all files\n" + "Expects relative paths", +) + + +def check_python_h_included_first(name_to_check: str) -> int: + """Check that the passed file includes Python.h first if it does at all. + + Perhaps overzealous, but that should work around concerns with + recursion. + + Parameters + ---------- + name_to_check : str + The name of the file to check. + + Returns + ------- + int + The number of headers before Python.h + """ + included_python = False + included_non_python_header = [] + warned_python_construct = False + basename_to_check = os.path.basename(name_to_check) + in_comment = False + includes_headers = False + with open(name_to_check) as in_file: + for i, line in enumerate(in_file, 1): + # Very basic comment parsing + # Assumes /*...*/ comments are on their own lines + if "/*" in line: + if "*/" not in line: + in_comment = True + # else-branch could use regex to remove comment and continue + continue + if in_comment: + if "*/" in line: + in_comment = False + continue + line = line.split("//", 1)[0].strip() + match = HEADER_PATTERN.match(line) + if match: + includes_headers = True + this_header = match.group(1) + if this_header in PYTHON_INCLUDING_HEADERS: + if included_non_python_header and not included_python: + # Headers before python-including header + print( + f"Header before Python.h in file {name_to_check:s}\n" + f"Python.h on line {i:d}, other header(s) on line(s)" + f" {included_non_python_header}", + file=sys.stderr, + ) + # else: # no headers before python-including header + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + if os.path.dirname(name_to_check).endswith("include/numpy"): + PYTHON_INCLUDING_HEADERS.append(f"numpy/{basename_to_check:s}") + # We just found out where Python.h comes in this file + break + elif this_header in LEAF_HEADERS: + # This header is just defines, so it won't include + # the system headers that cause problems + continue + elif not included_python and ( + "numpy/" in this_header + and this_header not in LEAF_HEADERS + or "python" in this_header.lower() + ): + print( + f"Python.h not included before python-including header " + f"in file {name_to_check:s}\n" + f"{this_header:s} on line {i:d}", + file=sys.stderr, + ) + included_python = True + PYTHON_INCLUDING_HEADERS.append(basename_to_check) + elif not included_python and this_header not in LEAF_HEADERS: + included_non_python_header.append(i) + elif ( + not included_python + and not warned_python_construct + and ".h" not in basename_to_check + ) and ("py::" in line or "PYBIND11_" in line): + print( + "Python-including header not used before python constructs " + f"in file {name_to_check:s}\nConstruct on line {i:d}", + file=sys.stderr, + ) + warned_python_construct = True + if not includes_headers: + LEAF_HEADERS.append(basename_to_check) + return included_python and len(included_non_python_header) + + +def sort_order(path: str) -> tuple[int, str]: + if "include/numpy" in path: + # Want to process numpy/*.h first, to work out which of those + # include Python.h directly + priority = 0x00 + elif "h" in os.path.splitext(path)[1].lower(): + # Then other headers, which tend to include numpy/*.h + priority = 0x10 + else: + # Source files after headers, to give the best chance of + # properly checking whether they include Python.h + priority = 0x20 + if "common" in path: + priority -= 8 + path_basename = os.path.basename(path) + if path_basename.startswith("npy_"): + priority -= 4 + elif path_basename.startswith("npy"): + priority -= 3 + elif path_basename.startswith("np"): + priority -= 2 + if "config" in path_basename: + priority -= 1 + return priority, path + + +def process_files(file_list: list[str]) -> int: + n_out_of_order = 0 + submodule_paths = get_submodule_paths() + root_directory = os.path.dirname(os.path.dirname(__file__)) + for name_to_check in sorted(file_list, key=sort_order): + name_to_check = os.path.join(root_directory, name_to_check) + if any(submodule_path in name_to_check for submodule_path in submodule_paths): + continue + if ".dispatch." in name_to_check: + continue + try: + n_out_of_order += check_python_h_included_first(name_to_check) + except UnicodeDecodeError: + print(f"File {name_to_check:s} not utf-8", sys.stdout) + return n_out_of_order + + +def find_c_cpp_files(root: str) -> list[str]: + + result = [] + + for dirpath, dirnames, filenames in os.walk(root): + # I'm assuming other people have checked boost + for name in ("build", ".git", "boost"): + try: + dirnames.remove(name) + except ValueError: + pass + for name in fnmatch.filter(dirnames, "*.p"): + dirnames.remove(name) + result.extend( + [ + os.path.join(dirpath, name) + for name in filenames + if os.path.splitext(name)[1].lower() in C_CPP_EXTENSIONS + ] + ) + # Check the headers before the source files + result.sort(key=lambda path: "h" in os.path.splitext(path)[1], reverse=True) + return result + + +def diff_files(sha: str) -> list[str]: + """Find the diff since the given SHA. + + Adapted from lint.py + """ + res = subprocess.run( + [ + "git", + "diff", + "--name-only", + "--diff-filter=ACMR", + "-z", + sha, + "--", + # Check against C_CPP_EXTENSIONS + "*.[chCH]", + "*.[ch]pp", + "*.[ch]xx", + "*.cc", + "*.hh", + ], + stdout=subprocess.PIPE, + encoding="utf-8", + ) + res.check_returncode() + return [f for f in res.stdout.split("\0") if f] + + +if __name__ == "__main__": + args = PARSER.parse_args() + + if len(args.files) == 0: + files = find_c_cpp_files("numpy") + else: + files = args.files + if len(files) == 1 and os.path.isdir(files[0]): + files = find_c_cpp_files(files[0]) + + # See which of the headers include Python.h and add them to the list + n_out_of_order = process_files(files) + sys.exit(n_out_of_order) diff --git a/tools/get_submodule_paths.py b/tools/get_submodule_paths.py new file mode 100644 index 000000000000..abab86140712 --- /dev/null +++ b/tools/get_submodule_paths.py @@ -0,0 +1,31 @@ +import glob +import os.path + + +def get_submodule_paths(): + ''' + Get paths to submodules so that we can exclude them from things like + check_test_name.py, check_unicode.py, etc. + ''' + root_directory = os.path.dirname(os.path.dirname(__file__)) + gitmodule_file = os.path.join(root_directory, '.gitmodules') + with open(gitmodule_file) as gitmodules: + data = gitmodules.read().split('\n') + submodule_paths = [datum.split(' = ')[1] for datum in data if + datum.startswith('\tpath = ')] + submodule_paths = [os.path.join(root_directory, path) for path in + submodule_paths] + # vendored with a script rather than via gitmodules + with open( + os.path.join(root_directory, ".gitattributes"), "r" + ) as attr_file: + for line in attr_file: + if "vendored" in line: + pattern = line.split(" ", 1)[0] + submodule_paths.extend(glob.glob(pattern)) + + return submodule_paths + + +if __name__ == "__main__": + print('\n'.join(get_submodule_paths())) From d6b325ca8252db865bd631624b8d0097aa7b4870 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 18 Sep 2025 18:46:09 +0300 Subject: [PATCH 0479/1718] DOC: tweak colons (from review) [skip actions][skip azp][skip cirrus] --- doc/source/user/basics.types.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 41c100e519db..8145f6adb93b 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -54,7 +54,7 @@ properties of the type, such as whether it is an integer:: >>> np.issubdtype(d, np.floating) False -To convert the type of an array, use the .astype() method. For example: :: +To convert the type of an array, use the .astype() method. For example:: >>> z.astype(np.float64) #doctest: +NORMALIZE_WHITESPACE array([0., 1., 2.]) @@ -70,7 +70,7 @@ Sometimes the conversion can overflow, for instance when converting a `numpy.int 300 to `numpy.int8`. NumPy follows C casting rules, so that value would overflow and become 44 ``(300 - 256)``. If you wish to avoid such overflows, you can specify that the overflow action fail by using ``same_value`` for the ``casting`` argument (see also -:ref:`overflow-errors`): :: +:ref:`overflow-errors`):: >>> z.astype(np.float64, casting="same_value") #doctest: +NORMALIZE_WHITESPACE array([0., 1., 2.]) From 8e392ebe2eda9f9cd904b2baca7a29fd115716e4 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Fri, 19 Sep 2025 03:42:52 +0530 Subject: [PATCH 0480/1718] DOC: Update Meson build examples in usage.rst [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/index.rst | 67 ++------------------------------------- doc/source/f2py/usage.rst | 66 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 64 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index fb1c573d1269..31840e812379 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -45,73 +45,12 @@ end matches the NumPy version printed from ``python -m numpy.f2py``, then you can use the shorter version. If not, or if you cannot run ``f2py``, you should replace all calls to ``f2py`` mentioned in this guide with the longer version. -======================= -Using f2py with Meson -======================= - -Meson is a modern build system recommended for building Python extension modules, -especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and -maintainable way to build Fortran extensions with f2py. - -To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` -to invoke f2py and generate the extension module. The following minimal example -demonstrates how to do this: - -.. code-block:: meson - - # List your Fortran source files - fortran_sources = files('your_module.f90') - - # Find the Python installation - py = import('python').find_installation() - - # Create a custom target to build the extension with f2py - f2py_wrapper = custom_target( - 'your_module_wrapper', - output: 'your_module.so', - input: fortran_sources, - command: [ - py.full_path(), '-m', 'numpy.f2py', - '-c', '@INPUT@', '-m', 'your_module' - ] - ) - - # Install the built extension to the Python site-packages directory - install_data(f2py_wrapper, install_dir: py.site_packages_dir()) - -For more details and advanced usage, see the Meson build guide in -the user documentation or refer to SciPy's Meson build files for -real-world examples: https://github.com/scipy/scipy/tree/main/meson.build - -========================================== -Building NumPy ufunc Extensions with Meson -========================================== - -To build a NumPy ufunc extension (C API) using Meson, you can use the -following template: - -.. code-block:: meson - - # List your C source files - c_sources = files('your_ufunc_module.c') - - # Find the Python installation - py = import('python').find_installation() - - # Create an extension module - extension_module = py.extension_module( - 'your_ufunc_module', - c_sources, - dependencies: py.dependency(), - install: true - ) - -For more information on writing NumPy ufunc extensions, see the -official NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html +For Meson build examples, see :doc:`usage`. .. toctree:: :maxdepth: 3 - + + usage f2py-user f2py-reference windows/index diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 635455fdb58a..c0fb6c0428df 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -327,3 +327,69 @@ resulting package to work, you need to create a file named ``__init__.py`` (in the same directory as ``add.pyf``). Notice the extension module is defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The conversion of the .pyf file to a .c file is handled by `numpy.distutils`. + +=============================== +Building with Meson (Examples) +=============================== + +Using f2py with Meson +--------------------- + +Meson is a modern build system recommended for building Python extension +modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides +a robust and maintainable way to build Fortran extensions with f2py. + +To build a Fortran extension using f2py and Meson, you can use Meson's +`custom_target` to invoke f2py and generate the extension module. The +following minimal example demonstrates how to do this: + +.. code-block:: meson + + # List your Fortran source files + fortran_sources = files('your_module.f90') + + # Find the Python installation + py = import('python').find_installation() + + # Create a custom target to build the extension with f2py + f2py_wrapper = custom_target( + 'your_module_wrapper', + output: 'your_module.so', + input: fortran_sources, + command: [ + py.full_path(), '-m', 'numpy.f2py', + '-c', '@INPUT@', '-m', 'your_module' + ] + ) + + # Install the built extension to the Python site-packages directory + install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + +For more details and advanced usage, see the Meson build guide in the +user documentation or refer to SciPy's Meson build files for real-world +examples: https://github.com/scipy/scipy/tree/main/meson.build + +Building NumPy ufunc Extensions with Meson +------------------------------------------ + +To build a NumPy ufunc extension (C API) using Meson, you can use the +following template: + +.. code-block:: meson + + # List your C source files + c_sources = files('your_ufunc_module.c') + + # Find the Python installation + py = import('python').find_installation() + + # Create an extension module + extension_module = py.extension_module( + 'your_ufunc_module', + c_sources, + dependencies: py.dependency(), + install: true + ) + +For more information on writing NumPy ufunc extensions, see the official +NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html From 0aa8087067270a8ff94a21470c45022cfc942b35 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 05:24:54 -0700 Subject: [PATCH 0481/1718] DOC: Correct a typo in Troubleshooting guidelines [skip actions][skip azp][skip cirrus] (#29773) --- doc/source/user/troubleshooting-importerror.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index da456dd17e36..68ac4f939525 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -158,7 +158,7 @@ line that isn't inside NumPy to see which package has the incompatibility. Note your NumPy version and the version of the incompatible package to help you find the best solution. -There can be various reason for the incompatibility: +There can be various reasons for the incompatibility: * You have recently upgraded NumPy, most likely to NumPy 2, and the other module now also needs to be upgraded. (NumPy 2 was released in June 2024.) From a9bbd2352fc70b097954de6b898fe736b2381cbe Mon Sep 17 00:00:00 2001 From: cibarbia05 Date: Sat, 20 Sep 2025 13:35:43 -0400 Subject: [PATCH 0482/1718] BUG: np.setbufsize should raise ValueError for negative input np.setbufsize previously accepted negative values silently. This commit adds input validation so that a ValueError is raised when a negative buffer size is provided, and adds a regression test to cover this behavior. Closes #29651 --- numpy/_core/_ufunc_config.py | 3 +++ numpy/_core/tests/test_umath.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 24abecd20652..0b1c7ba3743a 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -187,6 +187,9 @@ def setbufsize(size): 8192 """ + if size < 0: + raise ValueError("buffer size must be non-negative") + old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index d8ed56c31b93..9ae0cffd5cc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4708,6 +4708,18 @@ def test_reduceat(): np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) assert_array_almost_equal(h1, h2) +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + def test_reduceat_empty(): """Reduceat should work with empty arrays""" indices = np.array([], 'i4') From 0934462b484de5b6d236e685327ceaccebbbff51 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 20:59:40 -0700 Subject: [PATCH 0483/1718] DOC: Link [CT] to cross-reference [skip actions][skip azp][skip cirrus] --- numpy/fft/_pocketfft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index c7f2f6a8bc3a..1ce7c76b8636 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -124,7 +124,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) - algorithm [CT]. + algorithm [CT]_. Parameters ---------- From bcc9355a86994362c44550ea431e5c5588fa5b8d Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 21:03:01 -0700 Subject: [PATCH 0484/1718] DOC: Add two more cross-reference links [skip actions][skip azp][skip cirrus] --- numpy/linalg/_linalg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 7e2f1ebf531b..8dde643781b8 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2073,9 +2073,9 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `A`. By default, we identify singular values less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency - (with the symbols defined above). This is the algorithm MATLAB uses [1]. + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. It also appears in *Numerical recipes* in the discussion of SVD solutions - for linear least squares [2]. + for linear least squares [2]_. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there From 0a54c50f06a940de0d73eb544eea26f5c819f1cc Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Sep 2025 21:07:38 -0700 Subject: [PATCH 0485/1718] DOC: Three dots instead of two dots [skip actions][skip azp][skip cirrus] --- numpy/lib/_shape_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 8b200cd8daa4..d1e55a48d711 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1110,7 +1110,7 @@ def kron(a, b): ----- The function assumes that the number of dimensions of `a` and `b` are the same, if necessary prepending the smallest with ones. - If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``, the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. The elements are products of elements from `a` and `b`, organized explicitly by:: From 366855733f44369f89778d113f350944b5760577 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 21 Sep 2025 11:43:55 +0200 Subject: [PATCH 0486/1718] TYP: fix and improve ``{f,i}info`` stubs in ``_core.getlimits`` --- numpy/__init__.pyi | 60 +--------- numpy/_core/getlimits.pyi | 113 ++++++++++++++++++- numpy/typing/tests/data/reveal/getlimits.pyi | 16 +-- 3 files changed, 127 insertions(+), 62 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index caa5b6f5a724..d452bff03fc3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -121,10 +121,7 @@ from numpy._typing import ( # type: ignore[deprecated] _FloatingCodes, _ComplexFloatingCodes, _InexactCodes, - _NumberCodes, _CharacterCodes, - _FlexibleCodes, - _GenericCodes, # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -314,6 +311,10 @@ from numpy._core.einsumfunc import ( einsum, einsum_path, ) +from numpy._core.getlimits import ( + finfo, + iinfo, +) from numpy._core.multiarray import ( array, @@ -779,9 +780,7 @@ _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _InexactT = TypeVar("_InexactT", bound=inexact) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) -_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) _NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) _NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] @@ -944,7 +943,7 @@ _CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "u _OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None _OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 _ModeKind: TypeAlias = L["raise", "wrap", "clip"] _PartitionKind: TypeAlias = L["introselect"] @@ -3577,7 +3576,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(self) -> None: ... + def __new__(cls) -> None: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @@ -5793,53 +5792,6 @@ class busdaycalendar: @property def holidays(self) -> NDArray[datetime64]: ... -class finfo(Generic[_FloatingT_co]): - dtype: Final[dtype[_FloatingT_co]] - bits: Final[int] - eps: Final[_FloatingT_co] - epsneg: Final[_FloatingT_co] - iexp: Final[int] - machep: Final[int] - max: Final[_FloatingT_co] - maxexp: Final[int] - min: Final[_FloatingT_co] - minexp: Final[int] - negep: Final[int] - nexp: Final[int] - nmant: Final[int] - precision: Final[int] - resolution: Final[_FloatingT_co] - smallest_subnormal: Final[_FloatingT_co] - @property - def smallest_normal(self) -> _FloatingT_co: ... - @property - def tiny(self) -> _FloatingT_co: ... - @overload - def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ... - @overload - def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ... - @overload - def __new__(cls, dtype: str) -> finfo[floating]: ... - -class iinfo(Generic[_IntegerT_co]): - dtype: Final[dtype[_IntegerT_co]] - kind: Final[LiteralString] - bits: Final[int] - key: Final[LiteralString] - @property - def min(self) -> int: ... - @property - def max(self) -> int: ... - - @overload - def __new__( - cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] - ) -> iinfo[_IntegerT_co]: ... - @overload - def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... - @overload - def __new__(cls, dtype: str) -> iinfo[Any]: ... - @final class nditer: def __new__( diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index 9d79b178f4dc..eb832f55bb7e 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,3 +1,114 @@ -from numpy import finfo, iinfo +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] + eps: _FloatingT_co + epsneg: _FloatingT_co + resolution: _FloatingT_co + smallest_subnormal: _FloatingT_co + max: _FloatingT_co + min: _FloatingT_co + + bits: Final[int] + iexp: Final[int] + machep: Final[int] + maxexp: Final[int] + minexp: Final[int] + negep: Final[int] + nexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @property + def smallest_normal(self, /) -> _FloatingT_co: ... + @property + def tiny(self, /) -> _FloatingT_co: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 3a1157121750..cc964d753055 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,11 +1,11 @@ -from typing import Any, LiteralString, assert_type +from typing import assert_type import numpy as np -from numpy._typing import _64Bit f: float f8: np.float64 c8: np.complex64 +c16: np.complex128 i: int i8: np.int64 @@ -15,9 +15,10 @@ finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] assert_type(np.finfo(f), np.finfo[np.float64]) -assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(f8), np.finfo[np.float64]) assert_type(np.finfo(c8), np.finfo[np.float32]) -assert_type(np.finfo("f2"), np.finfo[np.floating]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) assert_type(finfo_f8.dtype, np.dtype[np.float64]) assert_type(finfo_f8.bits, int) @@ -41,11 +42,12 @@ assert_type(finfo_f8.smallest_subnormal, np.float64) assert_type(np.iinfo(i), np.iinfo[np.int_]) assert_type(np.iinfo(i8), np.iinfo[np.int64]) assert_type(np.iinfo(u4), np.iinfo[np.uint32]) -assert_type(np.iinfo("i2"), np.iinfo[Any]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.kind, str) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.key, str) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) From ba3dbe46604c0cf50310d1bdbfc3719fb00243ea Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 21 Sep 2025 12:54:12 -0400 Subject: [PATCH 0487/1718] BLD: Upgrade spin requirement to version 0.15 --- environment.yml | 2 +- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/environment.yml b/environment.yml index 61eed13bfd67..fc171a285a9d 100644 --- a/environment.yml +++ b/environment.yml @@ -16,7 +16,7 @@ dependencies: - ninja - pkg-config - meson-python - - spin==0.13 + - spin==0.15 - ccache # For testing - pytest diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 8bd2521933d7..1f6eb1435cfc 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.14 +spin==0.15 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 10ac5f7ecc9f..4cb678d5d047 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.14 +spin==0.15 # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.30.0.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index bd2f75ab8c1d..aab147bf8c17 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.14 +spin==0.15 # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.30.0.1 scipy-openblas64==0.3.30.0.1 From 30a464b11da232e20d17d4b0dbdf8dc35644e71b Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 21 Sep 2025 13:45:39 -0400 Subject: [PATCH 0488/1718] CI: Upgrade spin before building numpy in the clang_TSAN job. --- .github/workflows/compiler_sanitizers.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index e91468b85068..2721d77eb4a2 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -90,6 +90,9 @@ jobs: - name: Uninstall pytest-xdist (conflicts with TSAN) run: pip uninstall -y pytest-xdist + - name: Upgrade spin (gh-29777) + run: pip install -U spin + - name: Build NumPy with ThreadSanitizer run: python -m spin build -- -Db_sanitize=thread From b5023dbfe796f6b3a08491c7e154e148014d2e3a Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 22 Sep 2025 10:17:30 +0300 Subject: [PATCH 0489/1718] remove whitespace --- numpy/_core/_ufunc_config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 0b1c7ba3743a..b16147c18ee6 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -189,7 +189,6 @@ def setbufsize(size): """ if size < 0: raise ValueError("buffer size must be non-negative") - old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) _extobj_contextvar.set(extobj) From 19f2a720d867a944bc501e93fdeb6bcfb4be510d Mon Sep 17 00:00:00 2001 From: Sergey Fedorov Date: Mon, 22 Sep 2025 16:17:45 +0800 Subject: [PATCH 0490/1718] BUG: Fix pocketfft umath strides for AIX compatibility (#29768) * _pocketfft_umath.cpp: fix types Fixes: https://github.com/numpy/numpy/issues/29758 Credits to @seberg * _pocketfft_umath.cpp: fix unused variable warning * Move npts_in calculation into #ifndef --------- Co-authored-by: Sebastian Berg --- numpy/fft/_pocketfft_umath.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index ab8af5aa522e..f616fe9b0bdc 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -32,7 +32,7 @@ template static void wrap_legacy_cpp_ufunc(char **args, npy_intp const *dimensions, - ptrdiff_t const *steps, void *func) + npy_intp const *steps, void *func) { NPY_ALLOW_C_API_DEF try { @@ -86,14 +86,14 @@ copy_output(T buff[], char *out, npy_intp step_out, size_t n) */ template static void -fft_loop(char **args, npy_intp const *dimensions, ptrdiff_t const *steps, +fft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void *func) { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; bool direction = *((bool *)func); /* pocketfft::FORWARD or BACKWARD */ assert (nout > 0); @@ -144,9 +144,9 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, { char *ip = args[0], *fp = args[1], *op = args[2]; size_t n_outer = (size_t)dimensions[0]; - ptrdiff_t si = steps[0], sf = steps[1], so = steps[2]; + npy_intp si = steps[0], sf = steps[1], so = steps[2]; size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; - ptrdiff_t step_in = steps[3], step_out = steps[4]; + npy_intp step_in = steps[3], step_out = steps[4]; assert (nout > 0 && nout == npts / 2 + 1); @@ -233,14 +233,13 @@ irfft_loop(char **args, npy_intp const *dimensions, npy_intp const *steps, void size_t nin = (size_t)dimensions[1], nout = (size_t)dimensions[2]; ptrdiff_t step_in = steps[3], step_out = steps[4]; - size_t npts_in = nout / 2 + 1; - assert(nout > 0); #ifndef POCKETFFT_NO_VECTORS /* * Call pocketfft directly if vectorization is possible. */ + size_t npts_in = nout / 2 + 1; constexpr auto vlen = pocketfft::detail::VLEN::val; if (vlen > 1 && n_outer >= vlen && nin >= npts_in && sf == 0) { std::vector axes = { 1 }; From 055bcb38207d98241c6d081a3a31d1553c913ecc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 6 Sep 2025 11:34:49 +0200 Subject: [PATCH 0491/1718] BUG: Fix assert in nditer buffer setup When using a buffered iteration, there was a new assert to check that strides were set up nicely to ensure that we are not missing dimension coalescing. However, when the strides are set up to track an index, then they were still set up with a 0 for length 1. I _think_ that is just unnecessary (i.e. the assert is correct to point it out). But, let's do this first for backporting at least. Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/nditer_constr.c | 10 ++++++++-- numpy/lib/tests/test_index_tricks.py | 5 +++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 3ed5cf1a0245..a8f13a73ee1e 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -2077,8 +2077,14 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) npy_intp *strides = NAD_STRIDES(axisdata); for (int iop = 0; iop < nop; iop++) { - /* Check that we set things up nicely (if shape is ever 1) */ - assert((axisdata->shape == 1) ? (prev_strides[iop] == strides[iop]) : 1); + /* + * Check that we set things up nicely so strides coalesc. Except + * for index operands, which currently disrupts coalescing. + * NOTE(seberg): presumably `npyiter_compute_index_strides` should + * not set the strides to 0, but this was safer for backporting. + */ + assert((axisdata->shape != 1) || (prev_strides[iop] == strides[iop]) + || (op_itflags[iop] & (NPY_ITER_C_INDEX|NPY_ITER_F_INDEX))); if (op_single_stride_dims[iop] == idim) { /* Best case: the strides still collapse for this operand. */ diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 387fdfec28f1..81e47ec3dff2 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -199,6 +199,11 @@ def test_empty_array_unravel(self): with assert_raises(ValueError): np.unravel_index([1], (2, 1, 0)) + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + class TestGrid: def test_basic(self): a = mgrid[-1:1:10j] From ef2b624fff9074480744ae9152552b6821fdc187 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Sep 2025 17:06:32 +0000 Subject: [PATCH 0492/1718] MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.1.4 to 3.2.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/c923d83ad9c1bc00211c5041d0c3f73294ff88f6...7c619efba910c04005a835b110b057fc28fd6e93) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index c5f770c9d785..5af18aa14107 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 + - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1885577f7a12..8e8c8768cd9c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -98,7 +98,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@c923d83ad9c1bc00211c5041d0c3f73294ff88f6 # v3.1.4 + uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 0e27997fcf31f0e138a110fbab5640b74672b524 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:40:16 -0700 Subject: [PATCH 0493/1718] DOC: Add cross-reference to Glantz (2002) [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index c1b78ce5b97e..b2ac08cd6ebb 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -1521,7 +1521,7 @@ cdef class Generator: Examples -------- - An example from Glantz[1], pp 47-40: + An example from Glantz [1]_, pp 47-40: Two groups, children of diabetics (25 people) and children from people without diabetes (25 controls). Fasting blood glucose was measured, From e456a3b2de3cb6204d367e60edb50fc5dbc3d59a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:42:31 -0700 Subject: [PATCH 0494/1718] DOC: Improve formatting of random.Generator.logseries [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index b2ac08cd6ebb..fa35d43f4f66 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3670,8 +3670,8 @@ cdef class Generator: The log series distribution is frequently used to represent species richness and occurrence, first proposed by Fisher, Corbet, and - Williams in 1943 [2]. It may also be used to model the numbers of - occupants seen in cars [3]. + Williams in 1943 [2]_. It may also be used to model the numbers of + occupants seen in cars [3]_. References ---------- @@ -3699,7 +3699,7 @@ cdef class Generator: >>> bins = np.arange(-.5, max(s) + .5 ) >>> count, bins, _ = plt.hist(s, bins=bins, label='Sample count') - # plot against distribution + Plot against the distribution: >>> def logseries(k, p): ... return -p**k/(k*np.log(1-p)) From 04a17d8e7edee511fe968598f0bfa5a81d5476ca Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 23 Sep 2025 18:43:21 -0700 Subject: [PATCH 0495/1718] DOC: Correct a math typo [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index fa35d43f4f66..0fabf98147c3 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -3773,7 +3773,7 @@ cdef class Generator: Covariance indicates the level to which two variables vary together. From the multivariate normal distribution, we draw N-dimensional - samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix + samples, :math:`X = [x_1, x_2, ..., x_N]`. The covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its "spread"). From 09abc4fe404daa3c52147199c3d97fb585db7bf4 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Wed, 24 Sep 2025 06:22:39 -0400 Subject: [PATCH 0496/1718] MAINT: Remove xfail and deprecation filter from a test. (#29802) --- numpy/_core/tests/test_numeric.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index db5269c507d5..fba8e8abbe6d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -2835,16 +2835,12 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): actual = np.clip(arr, amin, amax) assert_equal(actual, expected) - @pytest.mark.xfail(reason="propagation doesn't match spec") @pytest.mark.parametrize("arr, amin, amax", [ (np.array([1] * 10, dtype='m8'), np.timedelta64('NaT'), np.zeros(10, dtype=np.int32)), ]) - @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_NaT_propagation(self, arr, amin, amax): - # NOTE: the expected function spec doesn't - # propagate NaT, but clip() now does expected = np.minimum(np.maximum(arr, amin), amax) actual = np.clip(arr, amin, amax) assert_equal(actual, expected) From 43c52d791ffd628a2be8e6ee281e8857caa4c5c3 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Wed, 24 Sep 2025 05:47:26 -0700 Subject: [PATCH 0497/1718] TST: clarify logic in float_alias_names test (#29796) Co-authored-by: Sebastian Berg --- numpy/_core/tests/test_dtype.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index d9bd17c48434..175680451f3e 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1687,9 +1687,8 @@ def test_integer_alias_names(self, int_, size): @pytest.mark.parametrize("name", ["Half", "Float", "Double", "CFloat", "CDouble"]) - def test_float_alias_names(self, name): - with pytest.raises(AttributeError): - getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType + def test_float_alias_names_not_present(self, name): + assert not hasattr(numpy.dtypes, f"{name}DType") def test_scalar_helper_all_dtypes(self): for dtype in np.dtypes.__all__: From fe312b05d2c60a4d79842ff11c56b39b102b876f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 17:05:38 +0000 Subject: [PATCH 0498/1718] MAINT: Bump actions/cache from 4.2.4 to 4.3.0 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.4 to 4.3.0. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0400d5f644dc74513175e3cd8d07132dd4860809...0057852bfaa89a56745cba8c7296529d2fc39830) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 53780ae097a3..e0767d81213e 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -108,7 +108,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -213,7 +213,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 97ef55fb0228..ef9e86f6ba70 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -46,7 +46,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -70,7 +70,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 869138f8f178f25a44f2d47efbed1f02479198c4 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 25 Sep 2025 14:06:24 +0300 Subject: [PATCH 0499/1718] BLD: refactor to avoid 'unused function' warnings' --- .../multiarray/lowlevel_strided_loops.c.src | 104 +++++++++--------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 13a126c53850..e75dd37cc465 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -79,57 +79,6 @@ a = (x)[7]; (x)[7] = (x)[8]; (x)[8] = a; \ } -/* half-to-double, copied from CPP to allow inlining */ - -static NPY_GCC_OPT_3 -uint64_t ToDoubleBits(uint16_t h) -{ - uint16_t h_exp = (h&0x7c00u); - uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; - switch (h_exp) { - case 0x0000u: { // 0 or subnormal - uint16_t h_sig = (h&0x03ffu); - // Signed zero - if (h_sig == 0) { - return d_sgn; - } - // Subnormal - h_sig <<= 1; - while ((h_sig&0x0400u) == 0) { - h_sig <<= 1; - h_exp++; - } - uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; - uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; - return d_sgn + d_exp + d_sig; - } - case 0x7c00u: // inf or NaN - // All-ones exponent and a copy of the significand - return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); - default: // normalized - // Just need to adjust the exponent and shift - return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); - } -} - -NPY_FINLINE -npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ - /* - * Use npymath versions for all the special cases, only inline the - * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and - * can then explore inlining more - */ - #if defined(NPY_HAVE_AVX512FP16) - return npy_halfbits_to_doublebits(h); - #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) - return npy_halfbits_to_doublebits(h); - #elif defined(__ARM_FP16_FORMAT_IEEE) - return npy_halfbits_to_doublebits(h); - #else - return (double)(ToDoubleBits(h)); - #endif -} - /************* STRIDED COPYING/SWAPPING SPECIALIZED FUNCTIONS *************/ /**begin repeat @@ -767,6 +716,59 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * typedef npy_half _npy_half; #endif +#if EMULATED_FP16 +/* half-to-double, copied from CPP to allow inlining */ + +static NPY_GCC_OPT_3 +uint64_t ToDoubleBits(uint16_t h) +{ + uint16_t h_exp = (h&0x7c00u); + uint64_t d_sgn = ((uint64_t)h&0x8000u) << 48; + switch (h_exp) { + case 0x0000u: { // 0 or subnormal + uint16_t h_sig = (h&0x03ffu); + // Signed zero + if (h_sig == 0) { + return d_sgn; + } + // Subnormal + h_sig <<= 1; + while ((h_sig&0x0400u) == 0) { + h_sig <<= 1; + h_exp++; + } + uint64_t d_exp = ((uint64_t)(1023 - 15 - h_exp)) << 52; + uint64_t d_sig = ((uint64_t)(h_sig&0x03ffu)) << 42; + return d_sgn + d_exp + d_sig; + } + case 0x7c00u: // inf or NaN + // All-ones exponent and a copy of the significand + return d_sgn + 0x7ff0000000000000ULL + (((uint64_t)(h&0x03ffu)) << 42); + default: // normalized + // Just need to adjust the exponent and shift + return d_sgn + (((uint64_t)(h&0x7fffu) + 0xfc000u) << 42); + } +} + +NPY_FINLINE +npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ + /* + * Use npymath versions for all the special cases, only inline the + * x86_64 non-intrinsic case. Someday we will rewrite this in CPP and + * can then explore inlining more + */ + #if defined(NPY_HAVE_AVX512FP16) + return npy_halfbits_to_doublebits(h); + #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) + return npy_halfbits_to_doublebits(h); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return npy_halfbits_to_doublebits(h); + #else + return (double)(ToDoubleBits(h)); + #endif +} +#endif + /**begin repeat * * #NAME1 = BOOL, From 92ab40bed3cdc020239e2a0d0ed116cde292d03d Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Thu, 25 Sep 2025 14:32:31 +0200 Subject: [PATCH 0500/1718] BUG: linalg: emit a MemoryError on a malloc failure (#29811) Otherwise, a malloc failure in `init_gesv(...)` etc. is not acted upon, and the python return value is silently wrong. --- numpy/linalg/umath_linalg.cpp | 109 +++++++++++++++++++++++----------- 1 file changed, 73 insertions(+), 36 deletions(-) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index 1b6850145bc8..4845b1261aca 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -448,6 +448,15 @@ set_fp_invalid_or_clear(int error_occurred) } } +static inline void +report_no_memory() +{ + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + PyErr_NoMemory(); + NPY_DISABLE_C_API; +} + /* ***************************************************************************** ** Some handy constants ** @@ -1199,10 +1208,7 @@ slogdet(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1255,10 +1261,7 @@ det(char **args, } else { /* TODO: Requires use of new ufunc API to indicate error return */ - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); } } @@ -1331,7 +1334,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff = (npy_uint8 *)malloc(alloc_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1365,7 +1368,7 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, mem_buff2 = (npy_uint8 *)malloc(lwork*sizeof(typ) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1378,6 +1381,9 @@ init_evd(EIGH_PARAMS_t* params, char JOBZ, char UPLO, return 1; + no_memory: + report_no_memory(); + error: /* something failed */ memset(params, 0, sizeof(*params)); @@ -1440,7 +1446,7 @@ using fbasetyp = fortran_type_t; mem_buff = (npy_uint8 *)malloc(safe_N * safe_N * sizeof(typ) + safe_N * sizeof(basetyp)); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; w = mem_buff + safe_N * safe_N * sizeof(typ); @@ -1478,7 +1484,7 @@ using fbasetyp = fortran_type_t; lrwork*sizeof(basetyp) + liwork*sizeof(fortran_int)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -1495,6 +1501,8 @@ using fbasetyp = fortran_type_t; return 1; /* something failed */ +no_memory: + report_no_memory(); error: memset(params, 0, sizeof(*params)); free(mem_buff2); @@ -1733,7 +1741,10 @@ init_gesv(GESV_PARAMS_t *params, fortran_int N, fortran_int NRHS) params->LDB = ld; return 1; + error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -1977,6 +1988,8 @@ init_potrf(POTR_PARAMS_t *params, char UPLO, fortran_int N) return 1; error: + report_no_memory(); + free(mem_buff); memset(params, 0, sizeof(*params)); @@ -2175,7 +2188,7 @@ scalar_trait) vlr_size + vrr_size + w_size + vl_size + vr_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2218,7 +2231,7 @@ scalar_trait) mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(typ)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2226,6 +2239,10 @@ scalar_trait) params->WORK = (typ*)work; return 1; + + no_memory: + report_no_memory(); + error: free(mem_buff2); free(mem_buff); @@ -2392,7 +2409,7 @@ using realtyp = basetype_t; mem_buff = (npy_uint8 *)malloc(total_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2434,7 +2451,7 @@ using realtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_count*sizeof(ftyp)); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2443,6 +2460,9 @@ using realtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: free(mem_buff2); free(mem_buff); @@ -2754,7 +2774,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff = (npy_uint8 *)malloc(a_size + s_size + u_size + vt_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2798,7 +2818,7 @@ init_gesdd(GESDD_PARAMS_t *params, mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2807,6 +2827,9 @@ init_gesdd(GESDD_PARAMS_t *params, params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -2894,7 +2917,7 @@ using frealtyp = basetype_t; rwork_size + iwork_size); if (!mem_buff) { - goto error; + goto no_memory; } a = mem_buff; @@ -2939,7 +2962,7 @@ using frealtyp = basetype_t; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) { - goto error; + goto no_memory; } work = mem_buff2; @@ -2948,6 +2971,10 @@ using frealtyp = basetype_t; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff2); @@ -3186,7 +3213,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3219,13 +3246,17 @@ using ftyp = fortran_doublereal; work_size = (size_t) params->LWORK * sizeof(ftyp); mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3260,7 +3291,7 @@ using ftyp = fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(a_size + tau_size); if (!mem_buff) - goto error; + goto no_memory; a = mem_buff; tau = a + a_size; @@ -3295,13 +3326,17 @@ using ftyp = fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3433,7 +3468,7 @@ using ftyp = fortran_doublereal; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3468,13 +3503,17 @@ using ftyp = fortran_doublereal; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; params->WORK = (ftyp*)work; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3512,7 +3551,7 @@ using ftyp=fortran_doublecomplex; mem_buff = (npy_uint8 *)malloc(q_size + tau_size + a_size); if (!mem_buff) - goto error; + goto no_memory; q = mem_buff; tau = q + q_size; @@ -3548,7 +3587,7 @@ using ftyp=fortran_doublecomplex; mem_buff2 = (npy_uint8 *)malloc(work_size); if (!mem_buff2) - goto error; + goto no_memory; work = mem_buff2; @@ -3556,6 +3595,10 @@ using ftyp=fortran_doublecomplex; params->LWORK = work_count; return 1; + + no_memory: + report_no_memory(); + error: TRACE_TXT("%s failed init\n", __FUNCTION__); free(mem_buff); @@ -3898,10 +3941,7 @@ scalar_trait) return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); @@ -4034,10 +4074,7 @@ using frealtyp = basetype_t; return 1; no_memory: - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - PyErr_NoMemory(); - NPY_DISABLE_C_API; + report_no_memory(); error: TRACE_TXT("%s failed init\n", __FUNCTION__); From fe0affe49b705dc06895e3da17e3b90032fdbc5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:06:28 +0000 Subject: [PATCH 0501/1718] MAINT: Bump github/codeql-action from 3.30.3 to 3.30.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.3 to 3.30.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/192325c86100d080feab897ff886c34abd4c83a3...303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6ea330ea2637..4a64b3ca7276 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/autobuild@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5d8f1b91cbae..092f9ef9e696 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v2.1.27 + uses: github/codeql-action/upload-sarif@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v2.1.27 with: sarif_file: results.sarif From 087615c53726b255e995a5c1e247c7ab76b14365 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 26 Sep 2025 14:35:34 +0300 Subject: [PATCH 0502/1718] ENH: add warning when calling ufunc with 'where' and without 'out' (#29813) * add warning when calling ufunc with 'where' and without 'out' * DOC: add a release note Co-authored-by: Sebastian Berg --- .../upcoming_changes/29813.new_feature.rst | 6 ++++++ numpy/_core/src/umath/ufunc_object.c | 9 +++++++++ numpy/_core/tests/test_ufunc.py | 18 ++++++++++++----- numpy/_core/tests/test_umath.py | 20 +++++++++++++------ 4 files changed, 42 insertions(+), 11 deletions(-) create mode 100644 doc/release/upcoming_changes/29813.new_feature.rst diff --git a/doc/release/upcoming_changes/29813.new_feature.rst b/doc/release/upcoming_changes/29813.new_feature.rst new file mode 100644 index 000000000000..690d7ca88799 --- /dev/null +++ b/doc/release/upcoming_changes/29813.new_feature.rst @@ -0,0 +1,6 @@ +Warning emitted when using `where` without `out` +------------------------------------------------ +Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will +now emit a warning. This usage tends to trip up users who expect some value in +output locations where the mask is ``False`` (the ufunc will not touch those +locations). The warning can be supressed by using ``out=None``. diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 75e651c29088..60eb2d32b7c6 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4486,6 +4486,15 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return override; } + /* Warn if "where" is used without "out", issue 29561 */ + if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if (PyErr_Warn(PyExc_UserWarning, + "'where' used without 'out', expect unitialized memory in output. " + "If this is intentional, use out=None.") < 0) { + goto fail; + } + } + if (outer) { /* Outer uses special preparation of inputs (expand dims) */ PyObject *new_in = prepare_input_arguments_for_outer(full_args.in, ufunc); diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 84e31bd1bc3c..976f9fe2f615 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1710,9 +1710,6 @@ def test_where_param(self): assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): - # This test is temporarily skipped because it requires - # adding masking features to the nditer to work properly - # With casting on output a = np.ones(10, np.int64) b = np.ones(10, np.int64) @@ -1724,12 +1721,12 @@ def test_where_param_alloc(self): # With casting and allocated output a = np.array([1], dtype=np.int64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) # No casting and allocated output a = np.array([1], dtype=np.float64) m = np.array([True], dtype=bool) - assert_equal(np.sqrt(a, where=m), [1]) + assert_equal(np.sqrt(a, where=m, out=None), [1]) def test_where_with_broadcasting(self): # See gh-17198 @@ -1743,6 +1740,17 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + @staticmethod def identityless_reduce_arrs(): yield np.empty((2, 3, 4), order='C') diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 9ae0cffd5cc8..572302541e47 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -3156,7 +3156,8 @@ def do_test(f_call, f_expected): do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) # Also check the where mask handling: - do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) def test_wrap_with_iterable(self): @@ -3713,7 +3714,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = kwargs.copy() if "out" in kwargs: - kwargs["out"] = self._unwrap(kwargs["out"]) + kwargs["out"] = self._unwrap(kwargs["out"])[0] if kwargs["out"] is NotImplemented: return NotImplemented @@ -3744,21 +3745,28 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): array = np.array([1, 2, 3]) where = np.array([True, False, True]) - expected = ufunc(array, where=where) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) with pytest.raises(TypeError): - ufunc(array, where=where.view(OverriddenArrayOld)) + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) result_1 = ufunc( array, - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out, ) assert isinstance(result_1, OverriddenArrayNew) assert np.all(np.array(result_1) == expected, where=where) result_2 = ufunc( array.view(OverriddenArrayNew), - where=where.view(OverriddenArrayNew) + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), ) assert isinstance(result_2, OverriddenArrayNew) assert np.all(np.array(result_2) == expected, where=where) From 4078485243a760818ac491f1237afadb6953b15c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Sep 2025 17:06:20 +0000 Subject: [PATCH 0503/1718] MAINT: Bump actions/dependency-review-action from 4.7.3 to 4.8.0 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.7.3 to 4.8.0. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/595b5aeba73380359d98a5e087f648dbb0edce1b...56339e523c0409420f6c2c9a2f4292bbb3c07dd3) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index be1c6f7c4bb1..a592a969eb94 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3 + uses: actions/dependency-review-action@56339e523c0409420f6c2c9a2f4292bbb3c07dd3 # v4.8.0 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From 60a340442535663a24951da28fe21684777cde9a Mon Sep 17 00:00:00 2001 From: lvllvl <24905907+lvllvl@users.noreply.github.com> Date: Fri, 26 Sep 2025 13:56:19 -0400 Subject: [PATCH 0504/1718] BUG: resolve invalid grep with env neutral script (#29551) --- tools/ci/check_c_api_usage.py | 265 ++++++++++++++++++++++++++++++++++ tools/ci/check_c_api_usage.sh | 91 ------------ tools/linter.py | 13 +- 3 files changed, 273 insertions(+), 96 deletions(-) create mode 100644 tools/ci/check_c_api_usage.py delete mode 100644 tools/ci/check_c_api_usage.sh diff --git a/tools/ci/check_c_api_usage.py b/tools/ci/check_c_api_usage.py new file mode 100644 index 000000000000..49c317a1259c --- /dev/null +++ b/tools/ci/check_c_api_usage.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import re +import sys +import tempfile +from concurrent.futures import ThreadPoolExecutor, as_completed +from pathlib import Path +from re import Pattern + +""" +Borrow-ref C API linter (Python version). + +- Recursively scans source files under --root (default: numpy) +- Matches suspicious CPython C-API calls as whole identifiers +- Skips: + - lines with '// noqa: borrowed-ref OK' or + '// noqa: borrowed-ref - manual fix needed' + - line comments (// ...) + - block comments (/* ... */), even when they span lines +- Prints findings and exits 1 if any issues found, else 0 +""" + +def strip_comments(line: str, in_block: bool) -> tuple[str, bool]: + """ + Return (code_without_comments, updated_in_block). + Removes // line comments and /* ... */ block comments (non-nesting, C-style). + """ + i = 0 + out_parts: list[str] = [] + n = len(line) + + while i < n: + if in_block: + end = line.find("*/", i) + if end == -1: + # Entire remainder is inside a block comment. + return ("".join(out_parts), True) + i = end + 2 + in_block = False + continue + + # Not in block: look for next // or /* from current i + sl = line.find("//", i) + bl = line.find("/*", i) + + if sl != -1 and (bl == -1 or sl < bl): + # Line comment starts first: take code up to '//' and stop + out_parts.append(line[i:sl]) + return ("".join(out_parts), in_block) + + if bl != -1: + # Block comment starts: take code up to '/*', then enter block + out_parts.append(line[i:bl]) + i = bl + 2 + in_block = True + continue + + # No more comments + out_parts.append(line[i:]) + break + + return ("".join(out_parts), in_block) + +def iter_source_files(root: Path, exts: set[str], excludes: set[str]) -> list[Path]: + """ + Return a list of source files under 'root', where filenames end with any of the + extensions in 'exts' (e.g., '.c.src', '.c', '.h'). + Excludes directories whose names are in 'excludes'. + """ + results: list[Path] = [] + + for dirpath, dirnames, filenames in os.walk(root): + # Prune excluded directories + dirnames[:] = [d for d in dirnames if d not in excludes] + for fn in filenames: + # endswith handles mult-suffice patterns, e.g., .c.src + if any(fn.endswith(ext) for ext in exts): + results.append(Path(dirpath) / fn) + return results + +def build_func_rx(funcs: tuple[str, ...]) -> Pattern[str]: + return re.compile(r"(? list[tuple[str, int, str, str]]: + """ + Scan a single file. + Returns list of (func_name, line_number, path_str, raw_line_str). + """ + hits: list[tuple[str, int, str, str]] = [] + in_block = False + noqa_set = set(noqa_markers) + + try: + with path.open("r", encoding="utf-8", errors="ignore") as f: + for lineno, raw in enumerate(f, 1): + # Skip if approved by noqa markers + if any(mark in raw for mark in noqa_set): + continue + + # Remove comments; if nothing remains, skip + code, in_block = strip_comments(raw.rstrip("\n"), in_block) + if not code.strip(): + continue + + # Find all suspicious calls in non-comment code + for m in func_rx.finditer(code): + hits.append((m.group(0), lineno, str(path), raw.rstrip("\n"))) + except FileNotFoundError: + # File may have disappeared; ignore gracefully + pass + return hits + + +def main(argv: list[str] | None = None) -> int: + # List of suspicious function calls: + suspicious_funcs: tuple[str, ...] = ( + "PyList_GetItem", + "PyDict_GetItem", + "PyDict_GetItemWithError", + "PyDict_GetItemString", + "PyDict_SetDefault", + "PyDict_Next", + "PyWeakref_GetObject", + "PyWeakref_GET_OBJECT", + "PyList_GET_ITEM", + "_PyDict_GetItemStringWithError", + "PySequence_Fast" + ) + func_rx = build_func_rx(suspicious_funcs) + noqa_markers = ( + "noqa: borrowed-ref OK", + "noqa: borrowed-ref - manual fix needed" + ) + default_exts = {".c", ".h", ".c.src", ".cpp"} + default_excludes = {"pythoncapi-compat"} + + ap = argparse.ArgumentParser(description="Borrow-ref C API linter (Python).") + ap.add_argument( + "--quiet", + action="store_true", + help="Suppress normal output; exit status alone indicates result (useful\ + for CI).", + ) + ap.add_argument( + "-j", "--jobs", + type=int, + default=0, + help="Number of worker threads (0=auto, 1=sequential).", + ) + ap.add_argument( + "--root", + default="numpy", + type=str, + help="Root directory to scan (default: numpy)" + ) + ap.add_argument( + "--ext", + action="append", + default=None, + help=f"File extension(s) to include (repeatable). Defaults to {default_exts}", + ) + ap.add_argument( + "--exclude", + action="append", + default=None, + help=f"Directory name(s) to exclude (repeatable). Default: {default_excludes}", + ) + args = ap.parse_args(argv) + + if args.ext: + exts = {e if e.startswith(".") else f".{e}" for e in args.ext} + else: + exts = set(default_exts) + excludes = set(args.exclude) if args.exclude else set(default_excludes) + + root = Path(args.root) + if not root.exists(): + print(f"error: root '{root}' does not exist", file=sys.stderr) + return 2 + + files = sorted(iter_source_files(root, exts, excludes), key=str) + + # Determine concurrency: auto picks a reasonable cap for I/O-bound work + if args.jobs is None or args.jobs <= 0: + max_workers = min(32, (os.cpu_count() or 1) * 5) + else: + max_workers = max(1, args.jobs) + print(f'Scanning {len(files)} C/C++ source files...\n') + + # Output file (mirrors your shell behavior) + tmpdir = Path(".tmp") + tmpdir.mkdir(exist_ok=True) + + findings = 0 + + # Run the scanning in parallel; only the main thread writes the report + all_hits: list[tuple[str, int, str, str]] = [] + if max_workers == 1: + for p in files: + all_hits.extend(scan_file(p, func_rx, noqa_markers)) + else: + with ThreadPoolExecutor(max_workers=max_workers) as ex: + fut_to_file = {ex.submit(scan_file, p, func_rx, noqa_markers): + p for p in files} + for fut in as_completed(fut_to_file): + try: + all_hits.extend(fut.result()) + except Exception as e: + print(f'Failed to scan {fut_to_file[fut]}: {e}') + + # Sort for deterministic output: by path, then line number + all_hits.sort(key=lambda t: (t[2], t[1])) + + # There no hits, linter passed + if not all_hits: + if not args.quiet: + print("All checks passed! C API borrow-ref linter found no issues.\n") + return 0 + + # There are some linter failures: create a log file + with tempfile.NamedTemporaryFile( + prefix="c_api_usage_report.", + suffix=".txt", + dir=tmpdir, + mode="w+", + encoding="utf-8", + delete=False, + ) as out: + report_path = Path(out.name) + out.write("Running Suspicious C API usage report workflow...\n\n") + for func, lineo, pstr, raw in all_hits: + findings += 1 + out.write(f"Found suspicious call to {func} in file: {pstr}\n") + out.write(f" -> {pstr}:{lineo}a:{raw}\n") + out.write("Recommendation:\n") + out.write( + "If this use is intentional and safe, add " + "'// noqa: borrowed-ref OK' on the same line " + "to silence this warning.\n" + ) + out.write( + "Otherwise, consider replacing the call " + "with a thread-safe API function.\n\n" + ) + + out.flush() + if not args.quiet: + out.seek(0) + sys.stdout.write(out.read()) + print(f"Report written to: {report_path}\n\n\ +C API borrow-ref linter FAILED.") + + return 1 + + +if __name__ == "__main__": + + sys.exit(main()) diff --git a/tools/ci/check_c_api_usage.sh b/tools/ci/check_c_api_usage.sh deleted file mode 100644 index b04410d7667d..000000000000 --- a/tools/ci/check_c_api_usage.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -set -e - -# List of suspicious function calls: -SUSPICIOUS_FUNCS=( - "PyList_GetItem" - "PyDict_GetItem" - "PyDict_GetItemWithError" - "PyDict_GetItemString" - "PyDict_SetDefault" - "PyDict_Next" - "PyWeakref_GetObject" - "PyWeakref_GET_OBJECT" - "PyList_GET_ITEM" - "_PyDict_GetItemStringWithError" - "PySequence_Fast" -) - -# Find all C/C++ source files in the repo -ALL_FILES=$(find numpy -type f \( -name "*.c" -o -name "*.h" -o -name "*.c.src" -o -name "*.cpp" \) ! -path "*/pythoncapi-compat/*") - -# For debugging: print out file count -echo "Scanning $(echo "$ALL_FILES" | wc -l) C/C++ source files..." - -# Prepare a result file -mkdir -p .tmp -OUTPUT=$(mktemp .tmp/c_api_usage_report.XXXXXX) -echo -e "Running Suspicious C API usage report workflow...\n" > $OUTPUT - -FAIL=0 - -# Scan each changed file -for file in $ALL_FILES; do - - for func in "${SUSPICIOUS_FUNCS[@]}"; do - # -n : show line number - # -P : perl-style boundaries - # (?> "$OUTPUT" - echo " -> $line" >> "$OUTPUT" - echo "Recommendation:" >> "$OUTPUT" - echo "If this use is intentional and safe, add '// noqa: borrowed-ref OK' on the same line to silence this warning." >> "$OUTPUT" - echo "Otherwise, consider replacing $func with a thread-safe API function." >> "$OUTPUT" - echo "" >> "$OUTPUT" - FAIL=1 - done <<< "$matches" - fi - done -done - -if [[ $FAIL -eq 1 ]]; then - echo "C API borrow-ref linter found issues." -else - echo "C API borrow-ref linter found no issues." > $OUTPUT -fi - -cat "$OUTPUT" -exit "$FAIL" diff --git a/tools/linter.py b/tools/linter.py index 4b5115c97a2b..f614be100159 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -47,15 +47,18 @@ def run_lint(self, fix: bool) -> None: sys.exit(retcode) def run_check_c_api(self) -> tuple[int, str]: - # Running borrowed ref checker + """Run C-API borrowed-ref checker""" print("Running C API borrow-reference linter...") - borrowed_ref_script = os.path.join(self.repository_root, "tools", "ci", - "check_c_api_usage.sh") + borrowed_ref_script = os.path.join( + self.repository_root, "tools", "ci", "check_c_api_usage.py" + ) borrowed_res = subprocess.run( - ["bash", borrowed_ref_script], + [sys.executable, borrowed_ref_script], + cwd=self.repository_root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - encoding="utf-8", + text=True, + check=False, ) # Exit with non-zero if C API Check fails From dfc14f7adfaaa2084b1ba895d68de31d56854fec Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 28 Sep 2025 09:29:46 +0300 Subject: [PATCH 0505/1718] ENH: convert PyErr_Warn to PyErr_WarnEx --- numpy/_core/src/common/npy_longdouble.c | 4 ++-- numpy/_core/src/multiarray/arraytypes.c.src | 4 ++-- numpy/_core/src/multiarray/ctors.c | 4 ++-- numpy/_core/src/multiarray/flagsobject.c | 2 +- numpy/_core/src/umath/extobj.c | 2 +- numpy/_core/src/umath/ufunc_object.c | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index ce80a9ae2bc3..644af776f9a9 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -144,8 +144,8 @@ npy_longdouble_from_PyLong(PyObject *long_obj) { result = NumPyOS_ascii_strtold(cstr, &end); if (errno == ERANGE) { /* strtold returns INFINITY of the correct sign. */ - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from python long") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from python long", 1) < 0) { goto fail; } } diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 52c9bdfb6bcc..dd51a937031a 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -541,8 +541,8 @@ string_to_long_double(PyObject*op) errno = 0; temp = NumPyOS_ascii_strtold(s, &end); if (errno == ERANGE) { - if (PyErr_Warn(PyExc_RuntimeWarning, - "overflow encountered in conversion from string") < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, + "overflow encountered in conversion from string", 1) < 0) { Py_XDECREF(b); return 0; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b7f0dcb521bb..76aa2fca86e7 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1304,12 +1304,12 @@ _array_from_buffer_3118(PyObject *memoryview) return NULL; } - if (PyErr_Warn( + if (PyErr_WarnEx( PyExc_RuntimeWarning, "A builtin ctypes object gave a PEP3118 format " "string that does not match its itemsize, so a " "best-guess will be made of the data type. " - "Newer versions of python may behave correctly.") < 0) { + "Newer versions of python may behave correctly.", 1) < 0) { Py_DECREF(descr); return NULL; } diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 8257727030c0..2570d3ec5d16 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -185,7 +185,7 @@ static char *msg = "future versions will not create a writeable " PyArrayFlagsObject *self, void *NPY_UNUSED(ignored)) \ { \ if (self->flags & NPY_ARRAY_WARN_ON_WRITE) { \ - if (PyErr_Warn(PyExc_FutureWarning, msg) < 0) {\ + if (PyErr_WarnEx(PyExc_FutureWarning, msg, 1) < 0) {\ return NULL; \ } \ }\ diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 755d8665b11d..91b0b4c62d30 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -398,7 +398,7 @@ _error_handler(const char *name, int method, PyObject *pyfunc, char *errtype, switch(method) { case UFUNC_ERR_WARN: PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { + if (PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1) < 0) { goto fail; } break; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 60eb2d32b7c6..0bdc14e04cde 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4488,9 +4488,9 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Warn if "where" is used without "out", issue 29561 */ if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { - if (PyErr_Warn(PyExc_UserWarning, + if (PyErr_WarnEx(PyExc_UserWarning, "'where' used without 'out', expect unitialized memory in output. " - "If this is intentional, use out=None.") < 0) { + "If this is intentional, use out=None.", 1) < 0) { goto fail; } } From d46e685dca68980e599efc9286f967ba82ee5d03 Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 28 Sep 2025 09:30:55 +0300 Subject: [PATCH 0506/1718] TST: remove xfail, test should pass everywhere --- numpy/_core/tests/test_multiarray.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 943e9642ec3c..c2fe0abb6137 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1868,11 +1868,9 @@ def _test_cast_from_flexible(self, dtype): def test_cast_from_void(self): self._test_cast_from_flexible(np.void) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_unicode(self): self._test_cast_from_flexible(np.str_) - @pytest.mark.xfail(reason="See gh-9847") def test_cast_from_bytes(self): self._test_cast_from_flexible(np.bytes_) From ad746c90ff15e4126f18e3ff85d5c8a06e9ec5ea Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Fri, 19 Sep 2025 12:12:04 +0530 Subject: [PATCH 0507/1718] DOC: Update Meson build examples in usage.rst [skip actions][skip azp][skip cirrus] --- doc/source/f2py/index.rst | 3 +-- doc/source/f2py/usage.rst | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst index 31840e812379..46f1de0212d6 100644 --- a/doc/source/f2py/index.rst +++ b/doc/source/f2py/index.rst @@ -49,8 +49,7 @@ For Meson build examples, see :doc:`usage`. .. toctree:: :maxdepth: 3 - - usage + f2py-user f2py-reference windows/index diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index c0fb6c0428df..d6e042863b55 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -328,12 +328,11 @@ resulting package to work, you need to create a file named ``__init__.py`` defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The conversion of the .pyf file to a .c file is handled by `numpy.distutils`. -=============================== Building with Meson (Examples) -=============================== +============================== Using f2py with Meson ---------------------- +~~~~~~~~~~~~~~~~~~~~~ Meson is a modern build system recommended for building Python extension modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides @@ -370,7 +369,7 @@ user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build Building NumPy ufunc Extensions with Meson ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build a NumPy ufunc extension (C API) using Meson, you can use the following template: From 69c8ba3d5681d5d6056ee1d13acdfddeeeb001d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:35:33 +0000 Subject: [PATCH 0508/1718] MAINT: Bump github/codeql-action from 3.30.4 to 3.30.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.4 to 3.30.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9...3599b3baa15b485a2e49ef411a7a4bb2452e7f93) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4a64b3ca7276..9f015242f94b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/autobuild@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 + uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 092f9ef9e696..3fb782a66a8c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v2.1.27 + uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v2.1.27 with: sarif_file: results.sarif From 9e9ea43efdacaa9e1ea5ac947520ceb427b9863d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:35:38 +0000 Subject: [PATCH 0509/1718] MAINT: Bump int128/hide-comment-action from 1.43.0 to 1.44.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.43.0 to 1.44.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/a30d551065e4231e6d7a671bb5ce884f9ee6417b...9803637eab610cca14ac6f64c42c0d7ffe9327e0) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.44.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index e62f26edd85f..266d7a8f2074 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@a30d551065e4231e6d7a671bb5ce884f9ee6417b # v1.43.0 + uses: int128/hide-comment-action@9803637eab610cca14ac6f64c42c0d7ffe9327e0 # v1.44.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 4a2defe2774943b2a658ac6adcb6181d85e299ea Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 29 Sep 2025 18:35:53 -0600 Subject: [PATCH 0510/1718] TST, WIP: Try updating circleci Python. [skip actions] [skip azp] [skip cirrus] --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8c2b443f1e84..1760eca727dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.10 + - image: cimg/python:3.11.13 working_directory: ~/repo From e299cbfb40a2a3972db8255d93431dd4fc5f15dd Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 30 Sep 2025 15:29:58 +0300 Subject: [PATCH 0511/1718] pin pyparsing since 3.3 interacts badly with matplotlib==3.10.6 [skip actions][skip azp][skip cirrusci] --- requirements/doc_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a6eb6e97b5cf..9d006b961923 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -6,6 +6,7 @@ sphinx-copybutton sphinx-design scipy matplotlib +pyparsing<3.3 pandas breathe>4.33.0 ipython!=8.1.0 From 90642e37adfb6a44724b334f4a527674b9d562bd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:47:57 +0200 Subject: [PATCH 0512/1718] TYP: Fix ``generic.__new__`` return type (#29842) --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d452bff03fc3..ddb30065fcfe 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3576,7 +3576,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(cls) -> None: ... + def __new__(cls) -> Self: ... def __hash__(self) -> int: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... From 7ad8192a64a12231af4aeb5bb030f052ef4df8ab Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:49:29 +0200 Subject: [PATCH 0513/1718] TYP: remove unused ``# type: ignore``s (#29843) * TYP: remove unused ``# type: ignore``s * TYP: remove unused ``# type: ignore``s in ``testing._private.utils`` --- numpy/__init__.pyi | 8 ++++---- numpy/testing/_private/utils.pyi | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ddb30065fcfe..884e0bcbd671 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5034,11 +5034,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] - def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] - def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] - def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] half: TypeAlias = float16 single: TypeAlias = float32 diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index a9b773cf2247..e90071a3b450 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -137,8 +137,7 @@ NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... class IgnoreException(Exception): ... -# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed -class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): class_modules: ClassVar[tuple[types.ModuleType, ...]] = () modules: Final[set[types.ModuleType]] @overload # record: True From 836d16d6c0d7b3a026b8ab75081aeffa276ee932 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:51:50 +0200 Subject: [PATCH 0514/1718] TYP: fix ``testing.assert_warns`` decorator order (#29844) mypy wants it to be the other way around, and reported > @overload should be placed before @deprecated --- numpy/testing/_private/utils.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index e90071a3b450..31c7fab7bdf8 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -358,11 +358,11 @@ def assert_array_max_ulp( ) -> NDArray[Any]: ... # -@deprecated("Please use warnings.catch_warnings or pytest.warns instead") @overload -def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... # From 31592aca6903e78a8579f64276ecb98120de22b9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:53:00 +0200 Subject: [PATCH 0515/1718] TYP: Add missing ``rtol`` kwarg to ``linalg.pinv`` (#29845) Apparently it was added in 2.0.0, but was missing from the stubs. --- numpy/linalg/_linalg.pyi | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index a63492013ade..8b09762d9536 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -28,6 +28,7 @@ from numpy import ( ) from numpy._core.fromnumeric import matrix_transpose from numpy._core.numeric import tensordot +from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, @@ -324,20 +325,26 @@ def matrix_rank( @overload def pinv( a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[float64]: ... @overload def pinv( a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[floating]: ... @overload def pinv( a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co = None, + rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and From f393cf81251f0ae3e400d516ad63e92db4d7016e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:54:37 +0200 Subject: [PATCH 0516/1718] TYP: Fix signatures of ``linalg.matmul`` and ``linalg.outer`` (#29846) This fixes several issues in np.linalg.outer (positional-only params, a misplaced out=... param, and an orphanced -> _ArrayT), and also fixes linalg.matmul parameters to be positional-only. I also snuck in an an additional overload that improves the return type if both input array-likes have the same dtype. --- numpy/linalg/_linalg.pyi | 52 +++++++++-------------- numpy/typing/tests/data/reveal/linalg.pyi | 12 +++--- 2 files changed, 25 insertions(+), 39 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 8b09762d9536..876384d6cacc 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -79,7 +79,7 @@ __all__ = [ "vecdot", ] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_NumberT = TypeVar("_NumberT", bound=np.number) _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] @@ -183,33 +183,29 @@ def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floati def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... @overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never]) -> NDArray[Any]: ... +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ... +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... +def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def outer( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload -def outer( - x1: _ArrayLikeTD64_co, - x2: _ArrayLikeTD64_co, - out: None = ..., -) -> NDArray[timedelta64]: ... +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +@overload +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ... +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... @overload def outer( x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, -) -> _ArrayT: ... + /, +) -> NDArray[Any]: ... @overload def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @@ -492,22 +488,12 @@ def cross( ) -> NDArray[complexfloating]: ... @overload -def matmul( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, -) -> NDArray[signedinteger]: ... +def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def matmul( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, -) -> NDArray[unsignedinteger]: ... +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload -def matmul( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload -def matmul( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 663fb888f012..ef7819f448ca 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -43,9 +43,9 @@ assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) @@ -129,6 +129,6 @@ assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) From 820b45e29313eda5562f06c1ae9ada3c4b98223e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:56:24 +0200 Subject: [PATCH 0517/1718] TYP: Fix incompatible defaults in ``polyfit``, ``histogram``, and ``histogramdd`` (#29847) The default values weren't assignable to the types of the parameters. --- numpy/lib/_histograms_impl.pyi | 4 ++-- numpy/lib/_polynomial_impl.pyi | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 7cb70ef21508..72a31dcedc1f 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -27,14 +27,14 @@ def histogram( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = 10, range: tuple[float, float] | None = None, - density: bool = None, + density: bool | None = None, weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], NDArray[Any]]: ... def histogramdd( sample: ArrayLike, bins: SupportsIndex | ArrayLike = 10, - range: Sequence[tuple[float, float]] = None, + range: Sequence[tuple[float, float]] | None = None, density: bool | None = None, weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 0cdab5f11b5c..9c02a7f867c5 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -138,7 +138,8 @@ def polyfit( rcond: float | None = None, full: L[False] = False, w: _ArrayLikeFloat_co | None = None, - cov: L[True, "unscaled"] = False, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[float64]]: ... @overload def polyfit( @@ -148,7 +149,8 @@ def polyfit( rcond: float | None = None, full: L[False] = False, w: _ArrayLikeFloat_co | None = None, - cov: L[True, "unscaled"] = False, + *, + cov: L[True, "unscaled"], ) -> _2Tup[NDArray[complex128]]: ... @overload def polyfit( From 69e44403eb217df45c288efa3c941eb667ddd079 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 03:57:42 +0200 Subject: [PATCH 0518/1718] MAINT,TYP: bump `mypy` to `1.18.2` (#29848) https://mypy.readthedocs.io/en/stable/changelog.html#mypy-1-18-2 --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index fc171a285a9d..6eeddb2311ea 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.18.1 + - mypy=1.18.2 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 95396afaf5a0..101cb6251251 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.18.1; platform_python_implementation != "PyPy" +mypy==1.18.2; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From 8fecead7a7091ce5e757529f10565412e26cbb88 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 1 Oct 2025 04:12:29 +0200 Subject: [PATCH 0519/1718] CI: Use ``uv`` instead of ``pip`` in the mypy workflow (#29849) Because that's what the cool kids are doing nowadays --- .github/workflows/mypy.yml | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e2dcdccf868b..c6b7c89b8f5c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -55,17 +55,21 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with: python-version: ${{ matrix.os_python[1] }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - # orjson makes mypy faster but the default requirements.txt - # can't install it because orjson doesn't support 32 bit Linux - pip install orjson - pip install -r requirements/test_requirements.txt + # orjson makes mypy faster but the default requirements.txt + # can't install it because orjson doesn't support 32 bit Linux + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + orjson - name: Build run: | spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv From c111c3c06d0c7bb92aaf56319a8edc9448815424 Mon Sep 17 00:00:00 2001 From: Arthur Lacote Date: Wed, 1 Oct 2025 09:52:03 +0200 Subject: [PATCH 0520/1718] ENH: speedup numpy.quantile when weights are provided (#29837) In the implementation numpy.quantile(..., weights=w, method="inverted_cdf"), the argsort doesn't need to be stable. See some elements of proof here: scikit-learn/scikit-learn#32285 Because it's just an optimization, I don't think this requires new tests. Speedup for big arrays might be up to ~2x. I wrote an initial proof in the equivalent PR for scikit-learn: scikit-learn/scikit-learn#32285. But while reading carefully the doc of numpy.quantile I found a new simpler formulation: The doc says: --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 421dee5578ab..81ba8ea6bc3b 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4895,7 +4895,7 @@ def _quantile( weights = np.asanyarray(weights) if axis != 0: weights = np.moveaxis(weights, axis, destination=0) - index_array = np.argsort(arr, axis=0, kind="stable") + index_array = np.argsort(arr, axis=0) # arr = arr[index_array, ...] # but this adds trailing dimensions of # 1. From d7f9fa86b75ce50d3b608c5ae8adbfa7fe1cc0ea Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 1 Oct 2025 03:22:57 -0700 Subject: [PATCH 0521/1718] DOC: Add a few missing commas in math operations (#29852) * DOC: Add a missing comma in numpy.cov [skip actions][skip azp][skip cirrus] * DOC: Add missing comma in laguerre.py [skip actions][skip azp][skip cirrus] * DOC: Also add missing commas in legendre.py [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 2 +- numpy/polynomial/laguerre.py | 2 +- numpy/polynomial/legendre.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 81ba8ea6bc3b..b8d6926bca88 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2715,7 +2715,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. - If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 38eb5a80b200..8d5d5ae67632 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1185,7 +1185,7 @@ def lagvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index b43bdfa83034..5fdc5245b9d3 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1164,7 +1164,7 @@ def legvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares From e7276da07f6fc69cc2947f4dc67a87472e4b3a8b Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Wed, 1 Oct 2025 03:31:27 -0700 Subject: [PATCH 0522/1718] DEP: Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar (#29841) This PR removes deprecated functionality, as requested in gh-29835. The functionality will be deprecated for about 2.5 years (NumPy 1.25). --- .../upcoming_changes/29841.expired.rst | 6 +++ numpy/_core/src/multiarray/common.c | 18 -------- numpy/_core/tests/test_deprecations.py | 12 ----- numpy/_core/tests/test_multiarray.py | 45 ++++++++++--------- 4 files changed, 31 insertions(+), 50 deletions(-) create mode 100644 doc/release/upcoming_changes/29841.expired.rst diff --git a/doc/release/upcoming_changes/29841.expired.rst b/doc/release/upcoming_changes/29841.expired.rst new file mode 100644 index 000000000000..34977cec2f70 --- /dev/null +++ b/doc/release/upcoming_changes/29841.expired.rst @@ -0,0 +1,6 @@ +Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar +----------------------------------------------------------------------- +Conversion of an array with `ndim > 0` to a scalar was deprecated in +NumPy 1.25. Now, attempting to do so raises `TypeError`. +Ensure you extract a single element from your array before performing +this operation. diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 4d1d9c238418..3bae4a28efba 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -449,24 +449,6 @@ check_is_convertible_to_scalar(PyArrayObject *v) return 0; } - /* Remove this if-else block when the deprecation expires */ - if (PyArray_SIZE(v) == 1) { - /* Numpy 1.25.0, 2023-01-02 */ - if (DEPRECATE( - "Conversion of an array with ndim > 0 to a scalar " - "is deprecated, and will error in future. " - "Ensure you extract a single element from your array " - "before performing this operation. " - "(Deprecated NumPy 1.25.)") < 0) { - return -1; - } - return 0; - } else { - PyErr_SetString(PyExc_TypeError, - "only length-1 arrays can be converted to Python scalars"); - return -1; - } - PyErr_SetString(PyExc_TypeError, "only 0-dimensional arrays can be converted to Python scalars"); return -1; diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 3c6ed420dbfa..4981f2fd0e30 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -241,18 +241,6 @@ def test_both_passed(self, func): func([0., 1.], 0., interpolation="nearest", method="nearest") -class TestScalarConversion(_DeprecationTestCase): - # 2023-01-02, 1.25.0 - def test_float_conversion(self): - self.assert_deprecated(float, args=(np.array([3.14]),)) - - def test_behaviour(self): - b = np.array([[3.14]]) - c = np.zeros(5) - with pytest.warns(DeprecationWarning): - c[0] = b - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c2fe0abb6137..347c92e48c73 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3886,21 +3886,10 @@ def test__complex__(self): '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) - b = np.array([7], dtype=dt) - c = np.array([[[[[7]]]]], dtype=dt) - msg = f'dtype: {dt}' ap = complex(a) assert_equal(ap, a, msg) - with pytest.warns(DeprecationWarning): - bp = complex(b) - assert_equal(bp, b, msg) - - with pytest.warns(DeprecationWarning): - cp = complex(c) - assert_equal(cp, c, msg) - def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', @@ -3908,7 +3897,11 @@ def test__complex__should_not_work(self): '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) + b = np.array([7], dtype=dt) + c = np.array([[[[[7]]]]], dtype=dt) assert_raises(TypeError, complex, a) + assert_raises(TypeError, complex, b) + assert_raises(TypeError, complex, c) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) @@ -3921,8 +3914,7 @@ def test__complex__should_not_work(self): assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') - with pytest.warns(DeprecationWarning): - assert_raises(TypeError, complex, e) + assert_raises(TypeError, complex, e) class TestCequenceMethods: def test_array_contains(self): @@ -9244,10 +9236,8 @@ def test_to_int_scalar(self): int_funcs = (int, lambda x: x.__int__()) for int_func in int_funcs: assert_equal(int_func(np.array(0)), 0) - with pytest.warns(DeprecationWarning): - assert_equal(int_func(np.array([1])), 1) - with pytest.warns(DeprecationWarning): - assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) assert_raises(TypeError, int_func, np.array([1, 2])) # gh-9972 @@ -9260,9 +9250,24 @@ def __int__(self): raise NotImplementedError assert_raises(NotImplementedError, int_func, np.array(NotConvertible())) - with pytest.warns(DeprecationWarning): - assert_raises(NotImplementedError, - int_func, np.array([NotConvertible()])) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) class TestWhere: From f1b4145e7283bb724b509736f28f9c42733b2ed3 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Wed, 1 Oct 2025 22:12:35 +0300 Subject: [PATCH 0523/1718] CI: Fix loongarch64 CI (#29856) * CI: Fix loongarch64 CI by moving to the latest tag of tonistiigi/binfmt which come with qemu-10 --- .github/workflows/linux_qemu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e0767d81213e..c11eca8dc5ab 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -205,7 +205,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged loongcr.lcpu.dev/multiarch/archlinux --reset -p yes + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all - name: Install GCC cross-compilers run: | From 17d5deda759e3a3ce79d88966ce69fdb323f623b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:24:33 -0600 Subject: [PATCH 0524/1718] MAINT: Bump ossf/scorecard-action from 2.4.2 to 2.4.3 (#29854) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.2 to 2.4.3. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/05b42c624433fc40578a4040d5cf5e36ddca8cde...4eaacf0543bb3f2c246792bd56e8cdeffafb205a) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-version: 2.4.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecards.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 3fb782a66a8c..1855c3c2d54b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif From ffe55df43f77e4fee273957fa936f455a70c2d2a Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Thu, 2 Oct 2025 10:26:19 +0100 Subject: [PATCH 0525/1718] TYP: Remove ``None`` from definition of ``DTypeLike`` type alias (#29739) Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/29739.change.rst | 15 ++++++ numpy/__init__.pyi | 18 +++---- numpy/_core/_asarray.pyi | 4 +- numpy/_core/fromnumeric.pyi | 40 ++++++++-------- numpy/_core/multiarray.pyi | 10 ++-- numpy/_core/numeric.pyi | 10 ++-- numpy/_core/numerictypes.pyi | 2 +- numpy/_core/records.pyi | 30 ++++++------ numpy/_core/shape_base.pyi | 6 +-- numpy/_typing/_dtype_like.py | 2 +- numpy/_typing/_ufunc.pyi | 48 +++++++++---------- numpy/ctypeslib/_ctypeslib.pyi | 4 +- numpy/lib/_function_base_impl.pyi | 4 +- numpy/lib/_npyio_impl.pyi | 6 +-- numpy/lib/_twodim_base_impl.pyi | 4 +- numpy/matlib.pyi | 2 +- numpy/matrixlib/defmatrix.pyi | 2 +- numpy/random/_generator.pyi | 4 +- numpy/typing/tests/data/pass/arithmetic.py | 2 +- 19 files changed, 114 insertions(+), 99 deletions(-) create mode 100644 doc/release/upcoming_changes/29739.change.rst diff --git a/doc/release/upcoming_changes/29739.change.rst b/doc/release/upcoming_changes/29739.change.rst new file mode 100644 index 000000000000..5d1316a1ba41 --- /dev/null +++ b/doc/release/upcoming_changes/29739.change.rst @@ -0,0 +1,15 @@ +``numpy.typing.DTypeLike`` no longer accepts ``None`` +----------------------------------------------------- +The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of + +.. code-block:: python + + dtype: DTypeLike = None + +it should now be + +.. code-block:: python + + dtype: DTypeLike | None = None + +instead. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 884e0bcbd671..d29bb196a070 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1553,10 +1553,10 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are @@ -2054,7 +2054,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape: _ShapeLike, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., buffer: _SupportsBuffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., @@ -2558,7 +2558,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., @@ -3618,7 +3618,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def astype( self, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., @@ -5799,7 +5799,7 @@ class nditer: op: ArrayLike | Sequence[ArrayLike | None], flags: Sequence[_NDIterFlagsKind] | None = ..., op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., @@ -6009,7 +6009,7 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __new__( subtype, # pyright: ignore[reportSelfClsParameterName] data: ArrayLike, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., copy: builtins.bool = ..., ) -> matrix[_2D, Incomplete]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index da5884d49aba..349933323b95 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -26,7 +26,7 @@ def require( @overload def require( a: object, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, requirements: _E | Iterable[_RequirementsWithE] | None = None, *, like: _SupportsArrayFunc | None = None @@ -34,7 +34,7 @@ def require( @overload def require( a: object, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, requirements: _Requirements | Iterable[_Requirements] | None = None, *, like: _SupportsArrayFunc | None = None diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 05816088f041..dc8a040e30e8 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -792,7 +792,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., @@ -802,7 +802,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., @@ -812,7 +812,7 @@ def sum( def sum( a: ArrayLike, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ..., @@ -930,21 +930,21 @@ def cumsum( def cumsum( a: ArrayLike, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumsum( a: ArrayLike, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -985,7 +985,7 @@ def cumulative_sum( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @@ -995,7 +995,7 @@ def cumulative_sum( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: _ArrayT, include_initial: bool = False, ) -> _ArrayT: ... @@ -1288,21 +1288,21 @@ def cumprod( def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @@ -1384,7 +1384,7 @@ def cumulative_prod( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, ) -> NDArray[Any]: ... @@ -1394,7 +1394,7 @@ def cumulative_prod( /, *, axis: SupportsIndex | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: _ArrayT, include_initial: bool = False, ) -> _ArrayT: ... @@ -1489,7 +1489,7 @@ def mean( def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ..., *, @@ -1622,7 +1622,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1635,7 +1635,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1648,7 +1648,7 @@ def std( def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0, @@ -1714,7 +1714,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1727,7 +1727,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., @@ -1740,7 +1740,7 @@ def var( def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 5a3fa46bee20..8ab52b6fe74d 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -559,7 +559,7 @@ def concatenate( axis: SupportsIndex | None = ..., *, out: _ArrayT, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., casting: _CastingKind | None = ... ) -> _ArrayT: ... @overload @@ -569,7 +569,7 @@ def concatenate( axis: SupportsIndex | None, out: _ArrayT, *, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., casting: _CastingKind | None = ... ) -> _ArrayT: ... @@ -605,7 +605,7 @@ def can_cast( def min_scalar_type(a: ArrayLike, /) -> dtype: ... -def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... @@ -941,7 +941,7 @@ def fromiter( @overload def fromiter( iter: Iterable[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., @@ -1328,7 +1328,7 @@ def nested_iters( axes: Sequence[Sequence[SupportsIndex]], flags: Sequence[_NDIterFlagsKind] | None = ..., op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., buffersize: SupportsIndex = ..., diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index d08eb540743d..f9ce1ce3a4a3 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1221,19 +1221,19 @@ def indices( @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., sparse: L[False] = False, ) -> NDArray[Any]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike, + dtype: DTypeLike | None, sparse: L[True], ) -> tuple[NDArray[Any], ...]: ... @overload def indices( dimensions: Sequence[int], - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, sparse: L[True], ) -> tuple[NDArray[Any], ...]: ... @@ -1242,7 +1242,7 @@ def fromfunction( function: Callable[..., _T], shape: Sequence[int], *, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., like: _SupportsArrayFunc | None = None, **kwargs: Any, ) -> _T: ... @@ -1320,7 +1320,7 @@ def astype( @overload def astype( x: ndarray[_ShapeT, dtype], - dtype: DTypeLike, + dtype: DTypeLike | None, /, *, copy: py_bool = True, diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 0a6ac988163d..beec14079b48 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -155,7 +155,7 @@ class _TypeCodes(TypedDict): All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... typecodes: Final[_TypeCodes] = ... ScalarType: Final[ diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index ead165918478..200d5310f955 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -77,7 +77,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, byteorder: _ByteOrder | None = None, @@ -88,7 +88,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): def __new__( subtype, shape: _ShapeLike, - dtype: DTypeLike, + dtype: DTypeLike | None, buf: _SupportsBuffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, @@ -115,7 +115,7 @@ class format_parser: def __init__( self, /, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None, titles: str | Sequence[str] | None, aligned: bool = False, @@ -140,7 +140,7 @@ def fromarrays( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -164,7 +164,7 @@ def fromrecords( dtype: None = None, shape: _ShapeLike | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -175,7 +175,7 @@ def fromrecords( @overload def fromstring( datastring: _SupportsBuffer, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -191,7 +191,7 @@ def fromstring( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -202,7 +202,7 @@ def fromstring( @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, formats: None = None, @@ -218,7 +218,7 @@ def fromfile( shape: _ShapeLike | None = None, offset: int = 0, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -243,7 +243,7 @@ def array( @overload def array( obj: ArrayLike, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -262,7 +262,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -272,7 +272,7 @@ def array( @overload def array( obj: None, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -291,7 +291,7 @@ def array( shape: _ShapeLike, offset: int = 0, strides: tuple[int, ...] | None = None, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, @@ -301,7 +301,7 @@ def array( @overload def array( obj: _SupportsReadInto, - dtype: DTypeLike, + dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, strides: tuple[int, ...] | None = None, @@ -320,7 +320,7 @@ def array( offset: int = 0, strides: tuple[int, ...] | None = None, *, - formats: DTypeLike, + formats: DTypeLike | None, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, aligned: bool = False, diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 9a5f8c3b9d60..b4e8b473ed71 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -82,7 +82,7 @@ def vstack( def vstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @@ -104,7 +104,7 @@ def hstack( def hstack( tup: Sequence[ArrayLike], *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @@ -132,7 +132,7 @@ def stack( axis: SupportsIndex = 0, out: None = None, *, - dtype: DTypeLike = None, + dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 526fb86dd322..b44dd06e0d7f 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -97,7 +97,7 @@ def dtype(self) -> _DTypeT_co: ... # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str | None +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 790149d9c7fb..3f58801004d1 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -110,7 +110,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> Any: ... @@ -124,7 +124,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> NDArray[Any]: ... @@ -138,7 +138,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., ) -> Any: ... @@ -242,7 +242,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., @@ -253,7 +253,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @@ -262,7 +262,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @@ -353,7 +353,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -369,7 +369,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... @@ -385,7 +385,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -428,7 +428,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... @@ -445,7 +445,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... @@ -486,7 +486,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] *, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., @@ -501,7 +501,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] *, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., @@ -672,7 +672,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, keepdims: bool = ..., initial: _ScalarLike_co = ..., @@ -684,7 +684,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], keepdims: bool = ..., @@ -697,7 +697,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., *, keepdims: Literal[True], @@ -710,7 +710,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., @@ -724,7 +724,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -734,7 +734,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @@ -745,7 +745,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -755,7 +755,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: _SupportsArrayUFunc, indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @@ -765,7 +765,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex, - dtype: DTypeLike, + dtype: DTypeLike | None, out: _ArrayT, ) -> _ArrayT: ... @overload @@ -774,7 +774,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @@ -784,7 +784,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, array: ArrayLike, axis: SupportsIndex = ..., - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index ebe9dbf91f04..4ab670037e2b 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -111,7 +111,7 @@ def ndpointer( ) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, + dtype: DTypeLike | None, ndim: int | None = None, *, shape: _ShapeLike, @@ -126,7 +126,7 @@ def ndpointer( ) -> type[_ndptr[dtype[_ScalarT]]]: ... @overload def ndpointer( - dtype: DTypeLike, + dtype: DTypeLike | None, ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index bee5da4c58ae..8e58cf342ae0 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -219,7 +219,7 @@ def asarray_chkfinite( @overload def asarray_chkfinite( a: Any, - dtype: DTypeLike, + dtype: DTypeLike | None, order: _OrderKACF = None, ) -> NDArray[Any]: ... @@ -472,7 +472,7 @@ def cov( fweights: ArrayLike | None = None, aweights: ArrayLike | None = None, *, - dtype: DTypeLike, + dtype: DTypeLike | None, ) -> NDArray[Any]: ... # NOTE `bias` and `ddof` are deprecated and ignored diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 18fce91382ab..cdef1003cff5 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -159,7 +159,7 @@ def loadtxt( @overload def loadtxt( fname: _FName, - dtype: DTypeLike, + dtype: DTypeLike | None, comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -197,7 +197,7 @@ def fromregex( def fromregex( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: DTypeLike, + dtype: DTypeLike | None, encoding: str | None = None, ) -> NDArray[Any]: ... @@ -262,7 +262,7 @@ def genfromtxt( @overload def genfromtxt( fname: _FName, - dtype: DTypeLike, + dtype: DTypeLike | None, comments: str = "#", delimiter: str | int | Iterable[int] | None = None, skip_header: int = 0, diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 2c4de938444f..fba688b26206 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -119,7 +119,7 @@ def eye( N: int, M: int | None = None, k: int = 0, - dtype: DTypeLike = ..., # = float + dtype: DTypeLike | None = ..., # = float order: _OrderCF = "C", *, device: L["cpu"] | None = None, @@ -168,7 +168,7 @@ def tri( N: int, M: int | None = None, k: int = 0, - dtype: DTypeLike = ..., # = float + dtype: DTypeLike | None = ..., # = float *, like: _SupportsArrayFunc | None = None ) -> NDArray[Any]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 93bee1975df3..552e518fe9b9 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -559,7 +559,7 @@ def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Orde @overload def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... # @overload diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 05e4c77303c0..462e8b209d2c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -13,5 +13,5 @@ def bmat( ) -> matrix[tuple[int, int], Any]: ... def asmatrix( - data: ArrayLike, dtype: DTypeLike = None + data: ArrayLike, dtype: DTypeLike | None = None ) -> matrix[tuple[int, int], Any]: ... diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index dc78a76eda70..b090663a104f 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -462,7 +462,7 @@ class Generator: low: int, high: int | None = None, size: None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> Any: ... @overload @@ -471,7 +471,7 @@ class Generator: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: DTypeLike = ..., + dtype: DTypeLike | None = ..., endpoint: bool = False, ) -> NDArray[Any]: ... diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 62c978a2fc51..e347ec096e21 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -29,7 +29,7 @@ class Object: - def __array__(self, dtype: np.typing.DTypeLike = None, + def __array__(self, dtype: np.typing.DTypeLike | None = None, copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self From aed883c3892376b8277855d86153c3530897595b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:06:34 +0000 Subject: [PATCH 0526/1718] MAINT: Bump github/codeql-action from 3.30.5 to 3.30.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.5 to 3.30.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/3599b3baa15b485a2e49ef411a7a4bb2452e7f93...64d10c13136e1c5bce3e5fbde8d4906eeaafc885) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 9f015242f94b..0b055370119d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.30.5 + uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 1855c3c2d54b..3ea1074c5a9d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v2.1.27 + uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v2.1.27 with: sarif_file: results.sarif From 9a35bbd0274f48bf9aac2f806110734037717a11 Mon Sep 17 00:00:00 2001 From: Britney Whittington Date: Tue, 30 Sep 2025 20:58:15 -0400 Subject: [PATCH 0527/1718] TST: Remove parametrize data races --- numpy/_core/tests/test_nditer.py | 5 +++-- numpy/_core/tests/test_ufunc.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index f0e10333808c..6396a19fe97f 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -2895,7 +2895,7 @@ def _is_buffered(iterator): return True return False -@pytest.mark.parametrize("a", +@pytest.mark.parametrize("arrs", [np.zeros((3,), dtype='f8'), np.zeros((9876, 3 * 5), dtype='f8')[::2, :], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], @@ -2904,10 +2904,11 @@ def _is_buffered(iterator): np.zeros((9,), dtype='f8')[::3], np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) -def test_iter_writemasked(a): +def test_iter_writemasked(arrs): # Note, the slicing above is to ensure that nditer cannot combine multiple # axes into one. The repetition is just to make things a bit more # interesting. + a = arrs.copy() shape = a.shape reps = shape[-1] // 3 msk = np.empty(shape, dtype=bool) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 976f9fe2f615..d5f85d4b5c3b 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1768,10 +1768,11 @@ def identityless_reduce_arrs(): a = a[1:, 1:, 1:] yield a - @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) - def test_identityless_reduction(self, a, pos): + def test_identityless_reduction(self, arrs, pos): # np.minimum.reduce is an identityless reduction + a = arrs.copy() a[...] = 1 a[pos] = 0 From f6402e9cf270bf79ad1dc1ba9585c81ff0df3fa3 Mon Sep 17 00:00:00 2001 From: Britney Whittington Date: Thu, 2 Oct 2025 14:26:00 -0400 Subject: [PATCH 0528/1718] TST: Fix roundtrip thread safety --- numpy/lib/tests/test_io.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b8c8e6cfcac1..b4b0db36ae2a 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -125,8 +125,6 @@ def roundtrip(self, save_func, *args, **kwargs): arr_reloaded = np.load(load_file, **load_kwds) - self.arr = arr - self.arr_reloaded = arr_reloaded finally: if not isinstance(target_file, BytesIO): target_file.close() @@ -135,6 +133,8 @@ def roundtrip(self, save_func, *args, **kwargs): if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): os.remove(target_file.name) + return arr, arr_reloaded + def check_roundtrips(self, a): self.roundtrip(a) self.roundtrip(a, file_on_disk=True) @@ -195,26 +195,26 @@ def test_format_2_0(self): class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.save, *args, **kwargs) - assert_equal(self.arr[0], self.arr_reloaded) - assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) - assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(arr[0], arr_reloaded) + assert_equal(arr[0].dtype, arr_reloaded.dtype) + assert_equal(arr[0].flags.fnc, arr_reloaded.flags.fnc) class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): - RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: - for n, arr in enumerate(self.arr): - reloaded = self.arr_reloaded['arr_%d' % n] - assert_equal(arr, reloaded) - assert_equal(arr.dtype, reloaded.dtype) - assert_equal(arr.flags.fnc, reloaded.flags.fnc) + for n, a in enumerate(arr): + reloaded = arr_reloaded['arr_%d' % n] + assert_equal(a, reloaded) + assert_equal(a.dtype, reloaded.dtype) + assert_equal(a.flags.fnc, reloaded.flags.fnc) finally: # delete tempfile, must be done here on windows - if self.arr_reloaded.fid: - self.arr_reloaded.fid.close() - os.remove(self.arr_reloaded.fid.name) + if arr_reloaded.fid: + arr_reloaded.fid.close() + os.remove(arr_reloaded.fid.name) def test_load_non_npy(self): """Test loading non-.npy files and name mapping in .npz.""" From 420967e2b7db85a7849b9fa1822b526a6bf279b0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Fri, 3 Oct 2025 12:02:47 +0100 Subject: [PATCH 0529/1718] MAINT: Add Cython linter to spin (#29861) Add Cython linter and fix e.g. unused variables. --- environment.yml | 1 + numpy/_core/tests/examples/cython/checks.pyx | 4 ++-- numpy/random/_common.pyx | 10 +++------- .../cython/extending_distributions.pyx | 1 - numpy/random/_generator.pyx | 11 ++--------- numpy/random/_philox.pyx | 2 -- numpy/random/bit_generator.pyx | 1 - numpy/random/mtrand.pyx | 7 +------ requirements/linter_requirements.txt | 1 + tools/linter.py | 19 +++++++++++++++++++ 10 files changed, 29 insertions(+), 28 deletions(-) diff --git a/environment.yml b/environment.yml index 6eeddb2311ea..3bf6fcbc4319 100644 --- a/environment.yml +++ b/environment.yml @@ -45,6 +45,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting + - cython-lint - ruff=0.12.0 - gitpython # Used in some tests diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 57df05c1e3b5..f0f427d2167f 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -244,9 +244,9 @@ def npyiter_has_multi_index(it: "nditer"): def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) - cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ cnp.NpyIter_GetGetMultiIndex(cit, NULL) - cdef cnp.NpyIter_IterNextFunc iternext = \ + cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) return 1 diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index f8420b3951cc..1fc2f7a02e11 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -240,7 +240,6 @@ cdef np.ndarray int_to_array(object value, object name, object bits, object uint cdef validate_output_shape(iter_shape, np.ndarray output): cdef np.npy_intp *dims cdef np.npy_intp ndim, i - cdef bint error dims = np.PyArray_DIMS(output) ndim = np.PyArray_NDIM(output) output_shape = tuple((dims[i] for i in range(ndim))) @@ -296,7 +295,7 @@ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, o cdef double out_val cdef double *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -320,7 +319,7 @@ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, ob cdef float out_val cdef float *out_array_data cdef np.ndarray out_array - cdef np.npy_intp i, n + cdef np.npy_intp n if size is None and out is None: with lock: @@ -427,7 +426,6 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con cdef int check_constraint(double val, object name, constraint_type cons) except -1: - cdef bint is_nan if cons == CONS_NON_NEGATIVE: if not isnan(val) and signbit(val): raise ValueError(f"{name} < 0") @@ -760,7 +758,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l np.ndarray a_arr, object a_name, constraint_type a_constraint, np.ndarray b_arr, object b_name, constraint_type b_constraint): cdef np.ndarray randoms - cdef int64_t *randoms_data cdef np.broadcast it cdef random_uint_di f = (func) cdef np.npy_intp i, n @@ -777,7 +774,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l it = np.PyArray_MultiIterNew2(a_arr, b_arr) randoms = np.empty(it.shape, np.int64) - randoms_data = np.PyArray_DATA(randoms) n = np.PyArray_SIZE(randoms) it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr) @@ -1047,7 +1043,7 @@ cdef object cont_f(void *func, bitgen_t *state, object size, object lock, object a, object a_name, constraint_type a_constraint, object out): - cdef np.ndarray a_arr, b_arr, c_arr + cdef np.ndarray a_arr cdef float _a cdef bint is_scalar = True cdef int requirements = np.NPY_ARRAY_ALIGNED | np.NPY_ARRAY_FORCECAST diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx index e1d1ea6c820b..8de722686304 100644 --- a/numpy/random/_examples/cython/extending_distributions.pyx +++ b/numpy/random/_examples/cython/extending_distributions.pyx @@ -92,7 +92,6 @@ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The default dtype value is 'd' """ - cdef Py_ssize_t i cdef bitgen_t *rng cdef const char *capsule_name = "BitGenerator" cdef np.ndarray randoms diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 0fabf98147c3..d9c2730de8ca 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -16,13 +16,11 @@ from numpy.lib.array_utils import normalize_axis_index from .c_distributions cimport * from libc cimport string from libc.math cimport sqrt -from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, - int32_t, int64_t, INT64_MAX, SIZE_MAX) +from libc.stdint cimport (uint64_t, int64_t, INT64_MAX, SIZE_MAX) from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, _rand_uint8, _gen_mask) from ._pcg64 import PCG64 -from ._mt19937 import MT19937 from numpy.random cimport bitgen_t from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, @@ -352,7 +350,6 @@ cdef class Generator: [-1.23204345, -1.75224494]]) """ - cdef double temp _dtype = np.dtype(dtype) if _dtype == np.float64: return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out) @@ -845,7 +842,7 @@ cdef class Generator: """ - cdef int64_t val, t, loc, size_i, pop_size_i + cdef int64_t val, loc, size_i, pop_size_i cdef int64_t *idx_data cdef np.npy_intp j cdef uint64_t set_size, mask @@ -1080,7 +1077,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, rng cdef object temp @@ -1370,7 +1366,6 @@ cdef class Generator: >>> plt.show() """ - cdef void *func _dtype = np.dtype(dtype) if _dtype == np.float64: return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1, @@ -2932,7 +2927,6 @@ cdef class Generator: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3595,7 +3589,6 @@ cdef class Generator: """ cdef double HYPERGEOM_MAX = 10**9 - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 5faa281818fd..3f33c7078f83 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -1,7 +1,5 @@ #cython: binding=True -from cpython.pycapsule cimport PyCapsule_New - import numpy as np cimport numpy as np diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index fbedb0fd5786..d9a733c7a618 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -34,7 +34,6 @@ SOFTWARE. """ import abc -import sys from itertools import cycle import re from secrets import randbits diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 719d1d860d3c..2bf6441bb368 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -380,7 +380,6 @@ cdef class RandomState: if len(state) > 3: st['has_gauss'] = state[3] st['gauss'] = state[4] - value = st self._aug_state.gauss = st.get('gauss', 0.0) self._aug_state.has_gauss = st.get('has_gauss', 0) @@ -437,7 +436,6 @@ cdef class RandomState: [-1.23204345, -1.75224494]]) """ - cdef double temp return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None) def random(self, size=None): @@ -1159,7 +1157,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef np.ndarray alow, ahigh, arange cdef double _low, _high, range cdef object temp @@ -3325,7 +3322,6 @@ cdef class RandomState: >>> plt.show() """ - cdef bint is_scalar = True cdef double fleft, fmode, fright cdef np.ndarray oleft, omode, oright @@ -3947,7 +3943,6 @@ cdef class RandomState: # answer = 0.003 ... pretty unlikely! """ - cdef bint is_scalar = True cdef np.ndarray ongood, onbad, onsample cdef int64_t lngood, lnbad, lnsample @@ -4248,7 +4243,7 @@ cdef class RandomState: # GH10839, ensure double to make tol meaningful cov = cov.astype(np.double) - (u, s, v) = svd(cov) + (_u, s, v) = svd(cov) if check_valid != 'ignore': if check_valid != 'warn' and check_valid != 'raise': diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 05319a9bdb8a..a28b989fbb03 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,3 +1,4 @@ # keep in sync with `environment.yml` +cython-lint ruff==0.12.0 GitPython>=3.1.30 diff --git a/tools/linter.py b/tools/linter.py index f614be100159..4e9aed85054a 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -31,6 +31,18 @@ def run_ruff(self, fix: bool) -> tuple[int, str]: ) return res.returncode, res.stdout + def run_cython_lint(self) -> tuple[int, str]: + print("Running cython-lint...") + command = ["cython-lint", "--no-pycodestyle", "numpy"] + + res = subprocess.run( + command, + stdout=subprocess.PIPE, + cwd=self.repository_root, + encoding="utf-8", + ) + return res.returncode, res.stdout + def run_lint(self, fix: bool) -> None: # Ruff Linter @@ -44,6 +56,13 @@ def run_lint(self, fix: bool) -> None: retcode, c_API_errors = self.run_check_c_api() c_API_errors and print(c_API_errors) + if retcode: + sys.exit(retcode) + + # Cython Linter + retcode, cython_errors = self.run_cython_lint() + cython_errors and print(cython_errors) + sys.exit(retcode) def run_check_c_api(self) -> tuple[int, str]: From 538654066cb0f44c8b816c80420f76dcd56f3d9c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 3 Oct 2025 13:03:27 +0200 Subject: [PATCH 0530/1718] ENH: Improve performance of numpy scalar __copy__ and __deepcopy__ (#29656) The numpy scalars are immutable (expect for subclasses of np.void), so copy or deepcopy can return the same object. This improves performance, since the current implementation converts to array, copies and converts back to scalar. --- numpy/_core/src/multiarray/scalartypes.c.src | 35 +++++++++++++++++--- numpy/_core/tests/test_multiarray.py | 14 ++++++++ 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index f84c120ad409..ad49de0c231e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2093,12 +2093,11 @@ gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) /* * These gentype_* functions do not take keyword arguments. - * The proper flag is METH_VARARGS. + * The proper flag is METH_VARARGS or METH_NOARGS. */ /**begin repeat * - * #name = tolist, item, __deepcopy__, __copy__, - * swapaxes, conj, conjugate, nonzero, + * #name = tolist, item, swapaxes, conj, conjugate, nonzero, * fill, transpose# */ static PyObject * @@ -2108,6 +2107,34 @@ gentype_@name@(PyObject *self, PyObject *args) } /**end repeat**/ +static PyObject * +gentype___copy__(PyObject *self) +{ + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { + // path via array + return gentype_generic_method(self, NULL, NULL, "__copy__"); + } + return Py_NewRef(self); +} + +static PyObject * +gentype___deepcopy__(PyObject *self, PyObject *args) +{ + // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo + + // scalars are immutable, so we can return a new reference + // the only expections are scalars with void dtype + // if the number of arguments is not 1, we let gentype_generic_method do the + // error handling + if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { + // path via array + return gentype_generic_method(self, args, NULL, "__deepcopy__"); + } + return Py_NewRef(self); +} + static PyObject * gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) { @@ -2652,7 +2679,7 @@ static PyMethodDef gentype_methods[] = { /* for the copy module */ {"__copy__", (PyCFunction)gentype___copy__, - METH_VARARGS, NULL}, + METH_NOARGS, NULL}, {"__deepcopy__", (PyCFunction)gentype___deepcopy__, METH_VARARGS, NULL}, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 347c92e48c73..bf339c07f068 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2476,6 +2476,20 @@ def test__deepcopy__(self, dtype): with pytest.raises(AssertionError): assert_array_equal(a, b) + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + def test__deepcopy__catches_failure(self): class MyObj: def __deepcopy__(self, *args, **kwargs): From 70e5ac69e1a85389f67e85ff2981c441629f4b8d Mon Sep 17 00:00:00 2001 From: Alverok Date: Fri, 3 Oct 2025 14:54:27 +0530 Subject: [PATCH 0531/1718] DOC: Add warning and examples for sliding_window_view [skip azp][skip actions][skip cirrus] --- numpy/lib/_stride_tricks_impl.py | 33 ++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index d4780783a638..f6f3b8370a72 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -173,6 +173,14 @@ def sliding_window_view(x, window_shape, axis=None, *, Notes ----- + .. warning:: + + This function creates views with overlapping memory. When + ``writeable=True``, writing to the view will modify the original array + and may affect multiple view positions. See the examples below and + :doc:`this guide ` + about the difference between copies and views. + For many applications using a sliding window view can be convenient, but potentially very slow. Often specialized solutions exist, for example: @@ -297,6 +305,31 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + The two examples below demonstrate the effect of ``writeable=True``. + + Creating a view with the default ``writeable=False`` and then writing to + it raises an error. + + >>> v = sliding_window_view(x, 3) + >>> v[0,1] = 10 + Traceback (most recent call last): + ... + ValueError: assignment destination is read-only + + Creating a view with ``writeable=True`` and then writing to it changes + the original array and multiple view positions. + + >>> x = np.arange(6) # reset x for the second example + >>> v = sliding_window_view(x, 3, writeable=True) + >>> v[0,1] = 10 + >>> x + array([ 0, 10, 2, 3, 4, 5]) + >>> v + array([[ 0, 10, 2], + [10, 2, 3], + [ 2, 3, 4], + [ 3, 4, 5]]) + Note that a sliding window approach is often **not** optimal (see Notes). """ window_shape = (tuple(window_shape) From f001a708ef4fade19b85ec59c6fc5f3934e90d6b Mon Sep 17 00:00:00 2001 From: Swayam Date: Sun, 5 Oct 2025 18:56:44 +0530 Subject: [PATCH 0532/1718] ENH, FEAT: Reorganize finfo and add new constant slot (#29836) Introduce `NPY_DT_get_constant` as a slot and a corresponding function that fills in a single element pointer for an arbitrary (corresponding) dtype/descriptor. Since we do want some integer values for `finfo`, some values are set to always fill in an `npy_intp` value instead. The slot assumes that it will be used with the GIL held and on uninitialized data, since I suspect that this is the typical use-case (i.e. it is unlikely that we need to fetch a constant deep during a calculation, especially in a context where the correct type/value isn't known anyway). This completely re-organizes and simplifies `finfo` by moving all definitions to C. Co-authored-by: Sebastian Berg --- numpy/__init__.py | 3 - numpy/_core/__init__.py | 10 +- numpy/_core/_machar.py | 355 ------------- numpy/_core/_machar.pyi | 55 -- numpy/_core/getlimits.py | 476 ++++-------------- numpy/_core/include/numpy/dtype_api.h | 39 +- numpy/_core/meson.build | 2 - numpy/_core/src/multiarray/arraytypes.c.src | 203 +++++++- numpy/_core/src/multiarray/dtypemeta.c | 41 +- numpy/_core/src/multiarray/dtypemeta.h | 11 +- numpy/_core/src/multiarray/multiarraymodule.c | 105 ++++ numpy/_core/src/multiarray/usertypes.c | 5 +- numpy/_core/tests/test_deprecations.py | 8 - numpy/_core/tests/test_finfo.py | 83 +++ numpy/_core/tests/test_getlimits.py | 54 +- numpy/_core/tests/test_longdouble.py | 3 +- numpy/_core/tests/test_machar.py | 30 -- numpy/_core/tests/test_numeric.py | 4 +- .../typing/tests/data/reveal/arrayterator.pyi | 2 +- 19 files changed, 573 insertions(+), 916 deletions(-) delete mode 100644 numpy/_core/_machar.py delete mode 100644 numpy/_core/_machar.pyi create mode 100644 numpy/_core/tests/test_finfo.py delete mode 100644 numpy/_core/tests/test_machar.py diff --git a/numpy/__init__.py b/numpy/__init__.py index d6702bba4622..7bfc12119694 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -677,9 +677,6 @@ from ._array_api_info import __array_namespace_info__ - # now that numpy core module is imported, can initialize limits - _core.getlimits._register_known_types() - __all__ = list( __numpy_submodules__ | set(_core.__all__) | diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index b0be8d1cbab6..7b53022c3bc3 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -106,15 +106,7 @@ from .numerictypes import sctypeDict, sctypes multiarray.set_typeDict(nt.sctypeDict) -from . import ( - _machar, - einsumfunc, - fromnumeric, - function_base, - getlimits, - numeric, - shape_base, -) +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base from .einsumfunc import * from .fromnumeric import * from .function_base import * diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py deleted file mode 100644 index b49742a15802..000000000000 --- a/numpy/_core/_machar.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Machine arithmetic - determine the parameters of the -floating-point arithmetic system - -Author: Pearu Peterson, September 2003 - -""" -__all__ = ['MachAr'] - -from ._ufunc_config import errstate -from .fromnumeric import any - -# Need to speed this up...especially for longdouble - -# Deprecated 2021-10-20, NumPy 1.22 -class MachAr: - """ - Diagnosing machine parameters. - - Attributes - ---------- - ibeta : int - Radix in which numbers are represented. - it : int - Number of base-`ibeta` digits in the floating point mantissa M. - machep : int - Exponent of the smallest (most negative) power of `ibeta` that, - added to 1.0, gives something different from 1.0 - eps : float - Floating-point number ``beta**machep`` (floating point precision) - negep : int - Exponent of the smallest power of `ibeta` that, subtracted - from 1.0, gives something different from 1.0. - epsneg : float - Floating-point number ``beta**negep``. - iexp : int - Number of bits in the exponent (including its sign and bias). - minexp : int - Smallest (most negative) power of `ibeta` consistent with there - being no leading zeros in the mantissa. - xmin : float - Floating-point number ``beta**minexp`` (the smallest [in - magnitude] positive floating point number with full precision). - maxexp : int - Smallest (positive) power of `ibeta` that causes overflow. - xmax : float - ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] - usable floating value). - irnd : int - In ``range(6)``, information on what kind of rounding is done - in addition, and on how underflow is handled. - ngrd : int - Number of 'guard digits' used when truncating the product - of two mantissas to fit the representation. - epsilon : float - Same as `eps`. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - huge : float - Same as `xmax`. - precision : float - ``- int(-log10(eps))`` - resolution : float - ``- 10**(-precision)`` - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754. Same as `xmin`. - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - float_conv : function, optional - Function that converts an integer or integer array to a float - or float array. Default is `float`. - int_conv : function, optional - Function that converts a float or float array to an integer or - integer array. Default is `int`. - float_to_float : function, optional - Function that converts a float array to float. Default is `float`. - Note that this does not seem to do anything useful in the current - implementation. - float_to_str : function, optional - Function that converts a single float to a string. Default is - ``lambda v:'%24.16e' %v``. - title : str, optional - Title that is printed in the string representation of `MachAr`. - - See Also - -------- - finfo : Machine limits for floating point types. - iinfo : Machine limits for integer types. - - References - ---------- - .. [1] Press, Teukolsky, Vetterling and Flannery, - "Numerical Recipes in C++," 2nd ed, - Cambridge University Press, 2002, p. 31. - - """ - - def __init__(self, float_conv=float, int_conv=int, - float_to_float=float, - float_to_str=lambda v: f'{v:24.16e}', - title='Python floating point number'): - """ - - float_conv - convert integer to float (array) - int_conv - convert float (array) to integer - float_to_float - convert float array to float - float_to_str - convert array float to str - title - description of used floating point numbers - - """ - # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the running arch. - with errstate(under='ignore'): - self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) - - def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): - max_iterN = 10000 - msg = "Did not converge after %d tries with %s" - one = float_conv(1) - two = one + one - zero = one - one - - # Do we really need to do this? Aren't they 2 and 2.0? - # Determine ibeta and beta - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - b = one - for _ in range(max_iterN): - b = b + b - temp = a + b - itemp = int_conv(temp - a) - if any(itemp != 0): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - ibeta = itemp - beta = float_conv(ibeta) - - # Determine it and irnd - it = -1 - b = one - for _ in range(max_iterN): - it = it + 1 - b = b * beta - temp = b + one - temp1 = temp - b - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - - betah = beta / two - a = one - for _ in range(max_iterN): - a = a + a - temp = a + one - temp1 = temp - a - if any(temp1 - one != zero): - break - else: - raise RuntimeError(msg % (_, one.dtype)) - temp = a + betah - irnd = 0 - if any(temp - a != zero): - irnd = 1 - tempa = a + beta - temp = tempa + betah - if irnd == 0 and any(temp - tempa != zero): - irnd = 2 - - # Determine negep and epsneg - negep = it + 3 - betain = one / beta - a = one - for i in range(negep): - a = a * betain - b = a - for _ in range(max_iterN): - temp = one - a - if any(temp - one != zero): - break - a = a * beta - negep = negep - 1 - # Prevent infinite loop on PPC with gcc 4.0: - if negep < 0: - raise RuntimeError("could not determine machine tolerance " - "for 'negep', locals() -> %s" % (locals())) - else: - raise RuntimeError(msg % (_, one.dtype)) - negep = -negep - epsneg = a - - # Determine machep and eps - machep = - it - 3 - a = b - - for _ in range(max_iterN): - temp = one + a - if any(temp - one != zero): - break - a = a * beta - machep = machep + 1 - else: - raise RuntimeError(msg % (_, one.dtype)) - eps = a - - # Determine ngrd - ngrd = 0 - temp = one + eps - if irnd == 0 and any(temp * one - one != zero): - ngrd = 1 - - # Determine iexp - i = 0 - k = 1 - z = betain - t = one + eps - nxres = 0 - for _ in range(max_iterN): - y = z - z = y * y - a = z * one # Check here for underflow - temp = z * t - if any(a + a == zero) or any(abs(z) >= y): - break - temp1 = temp * betain - if any(temp1 * beta == z): - break - i = i + 1 - k = k + k - else: - raise RuntimeError(msg % (_, one.dtype)) - if ibeta != 10: - iexp = i + 1 - mx = k + k - else: - iexp = 2 - iz = ibeta - while k >= iz: - iz = iz * ibeta - iexp = iexp + 1 - mx = iz + iz - 1 - - # Determine minexp and xmin - for _ in range(max_iterN): - xmin = y - y = y * betain - a = y * one - temp = y * t - if any((a + a) != zero) and any(abs(y) < xmin): - k = k + 1 - temp1 = temp * betain - if any(temp1 * beta == y) and any(temp != y): - nxres = 3 - xmin = y - break - else: - break - else: - raise RuntimeError(msg % (_, one.dtype)) - minexp = -k - - # Determine maxexp, xmax - if mx <= k + k - 3 and ibeta != 10: - mx = mx + mx - iexp = iexp + 1 - maxexp = mx + minexp - irnd = irnd + nxres - if irnd >= 2: - maxexp = maxexp - 2 - i = maxexp + minexp - if ibeta == 2 and not i: - maxexp = maxexp - 1 - if i > 20: - maxexp = maxexp - 1 - if any(a != y): - maxexp = maxexp - 2 - xmax = one - epsneg - if any(xmax * one != xmax): - xmax = one - beta * epsneg - xmax = xmax / (xmin * beta * beta * beta) - i = maxexp + minexp + 3 - for j in range(i): - if ibeta == 2: - xmax = xmax + xmax - else: - xmax = xmax * beta - - smallest_subnormal = abs(xmin / beta ** (it)) - - self.ibeta = ibeta - self.it = it - self.negep = negep - self.epsneg = float_to_float(epsneg) - self._str_epsneg = float_to_str(epsneg) - self.machep = machep - self.eps = float_to_float(eps) - self._str_eps = float_to_str(eps) - self.ngrd = ngrd - self.iexp = iexp - self.minexp = minexp - self.xmin = float_to_float(xmin) - self._str_xmin = float_to_str(xmin) - self.maxexp = maxexp - self.xmax = float_to_float(xmax) - self._str_xmax = float_to_str(xmax) - self.irnd = irnd - - self.title = title - # Commonly used parameters - self.epsilon = self.eps - self.tiny = self.xmin - self.huge = self.xmax - self.smallest_normal = self.xmin - self._str_smallest_normal = float_to_str(self.xmin) - self.smallest_subnormal = float_to_float(smallest_subnormal) - self._str_smallest_subnormal = float_to_str(smallest_subnormal) - - import math - self.precision = int(-math.log10(float_to_float(self.eps))) - ten = two + two + two + two + two - resolution = ten ** (-self.precision) - self.resolution = float_to_float(resolution) - self._str_resolution = float_to_str(resolution) - - def __str__(self): - fmt = ( - 'Machine parameters for %(title)s\n' - '---------------------------------------------------------------------\n' - 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' - 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' - 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' - 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' - 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' - 'smallest_normal=%(smallest_normal)s ' - 'smallest_subnormal=%(smallest_subnormal)s\n' - '---------------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - -if __name__ == '__main__': - print(MachAr()) diff --git a/numpy/_core/_machar.pyi b/numpy/_core/_machar.pyi deleted file mode 100644 index 02637a17b6a8..000000000000 --- a/numpy/_core/_machar.pyi +++ /dev/null @@ -1,55 +0,0 @@ -from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload - -import numpy as np -from numpy import _CastingKind -from numpy._utils import set_module as set_module - -### - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - -class UFuncTypeError(TypeError): - ufunc: Final[np.ufunc] - def __init__(self, /, ufunc: np.ufunc) -> None: ... - -class _UFuncNoLoopError(UFuncTypeError): - dtypes: tuple[np.dtype, ...] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype, np.dtype] - def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... - -class _UFuncCastingError(UFuncTypeError): - casting: Final[_CastingKind] - from_: Final[np.dtype] - to: Final[np.dtype] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... - -class _UFuncInputCastingError(_UFuncCastingError): - in_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _UFuncOutputCastingError(_UFuncCastingError): - out_i: Final[int] - def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... - -class _ArrayMemoryError(MemoryError): - shape: tuple[int, ...] - dtype: np.dtype - def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... - @property - def _total_size(self) -> int: ... - @staticmethod - def _size_to_string(num_bytes: int) -> str: ... - -@overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... -@overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index a3d0086974b1..1317c8b1ea0a 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -3,15 +3,15 @@ """ __all__ = ['finfo', 'iinfo'] +import math import types import warnings +from functools import cached_property from numpy._utils import set_module from . import numeric, numerictypes as ntypes -from ._machar import MachAr -from .numeric import array, inf, nan -from .umath import exp2, isnan, log10, nextafter +from ._multiarray_umath import _populate_finfo_constants def _fr0(a): @@ -30,96 +30,6 @@ def _fr1(a): return a -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - f'The value of the smallest subnormal for {self.ftype} type is zero.', - UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex128: ntypes.float64, @@ -146,240 +56,6 @@ def _float_to_str(self, value): 'fmt': '%12.5e', 'title': _title_fmt.format('half')}} -# Key to identify the floating point type. Key is result of -# -# ftype = np.longdouble # or float64, float32, etc. -# v = (ftype(-1.0) / ftype(10.0)) -# v.view(v.dtype.newbyteorder('<')).tobytes() -# -# Uses division to work around deficiencies in strtold on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar - - -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended - # number of digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = nan - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)) - key = key.view(key.dtype.newbyteorder("<")).tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v: _fr0(v.astype(params['itype']))[0], - lambda v: array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - @set_module('numpy') class finfo: @@ -548,75 +224,105 @@ def __new__(cls, dtype): def _init(self, dtype): self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) return self + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + def __str__(self): + if self._fmt is not None: + return self._fmt + + def get_str(name, pad=None): + if (val := getattr(self, name)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt def __repr__(self): + if self._repr is not None: + return self._repr + c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - @property - def smallest_normal(self): - """Return the value for the smallest normal. + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) - Returns - ------- - smallest_normal : float - Value for the smallest normal. + resolution_str = str(self.resolution) - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str @property def tiny(self): diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 8c3ff720b372..cc61a826c103 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -379,6 +379,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, #define NPY_DT_get_clear_loop 9 #define NPY_DT_get_fill_zero_loop 10 #define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 // These PyArray_ArrFunc slots will be deprecated and replaced eventually // getitem and setitem can be defined as a performance optimization; @@ -389,7 +390,7 @@ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, // used to separate dtype slots from arrfuncs slots // intended only for internal use but defined here for clarity -#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) // Cast is disabled // #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET @@ -479,6 +480,42 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); */ typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + /* * TODO: These two functions are currently only used for experimental DType * API support. Their relation should be "reversed": NumPy should diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index e05f25da39ca..0d8c513d4fde 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1390,8 +1390,6 @@ python_sources = [ '_exceptions.pyi', '_internal.py', '_internal.pyi', - '_machar.py', - '_machar.pyi', '_methods.py', '_methods.pyi', '_simd.pyi', diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index dd51a937031a..0a063f6ba07a 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -3,6 +3,7 @@ #include #include #include +#include #include #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -15,6 +16,7 @@ #include "npy_pycompat.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "numpy/dtype_api.h" #include "npy_config.h" #include "npy_sort.h" @@ -4318,10 +4320,202 @@ PyArray_DescrFromType(int type) /* ***************************************************************************** - ** SETUP TYPE INFO ** + ** NEWSTYLE TYPE METHODS ** ***************************************************************************** */ +static int +BOOL_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + switch (constant_id) { + case NPY_CONSTANT_zero: + case NPY_CONSTANT_minimum_finite: + *(npy_bool *)ptr = NPY_FALSE; + return 1; + case NPY_CONSTANT_one: + case NPY_CONSTANT_maximum_finite: + *(npy_bool *)ptr = NPY_TRUE; + return 1; + default: + return 0; + } +} + +/**begin repeat + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG# + * #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong# + * #IS_UNSIGNED = 0, 1, 0, 1, 0, 1, + * 0, 1, 0, 1# + * #MIN = NPY_MIN_BYTE, 0, NPY_MIN_SHORT, 0, NPY_MIN_INT, 0, + * NPY_MIN_LONG, 0, NPY_MIN_LONGLONG, 0# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: +#if @IS_UNSIGNED@ + val = 0; +#else + val = @MIN@; +#endif + break; + case NPY_CONSTANT_maximum_finite: + val = NPY_MAX_@NAME@; + break; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + +/* +Keeping Half macros consistent with standard C +Refernce: https://en.cppreference.com/w/c/types/limits.html +*/ +#define HALF_MAX 31743 /* Bit pattern for 65504.0 */ +#define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ +#define HALF_NEG_MAX 64511 /* Bit pattern for -65504.0 */ +#define HALF_EPSILON 5120 +#define HALF_TRUE_MIN 0x0001 /* Bit pattern for smallest positive subnormal: 2^-24 */ +#define HALF_MAX_EXP 16 +#define HALF_MIN_EXP -13 +#define HALF_MANT_DIG 11 /* 10 + 1 (implicit) */ +#define HALF_DIG 3 + +/* + * On PPC64 systems with IBM double-double format pair of IEEE binary64 + * values (not a true IEEE quad). We derived the values based on the Interval machine epsilon definition of epsilon, + * difference between 1.0 and the next representable floating-point number larger than 1.0 + * ~106 bits of mantissa precision (53+53) gives epsilon of 2^-105, but glibc returns 2^-1074 (DBL_TRUE_MIN). + */ +#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + #undef LDBL_EPSILON + #define LDBL_EPSILON 0x1p-105L /* 2^-105 */ +#endif + +/* + * Define *_TRUE_MIN macros for smallest subnormal values if not available. + * Use nextafter(0, 1) to get the smallest positive representable value. + */ +#ifndef FLT_TRUE_MIN + #define FLT_TRUE_MIN npy_nextafterf(0.0f, 1.0f) +#endif +#ifndef DBL_TRUE_MIN + #define DBL_TRUE_MIN npy_nextafter(0.0, 1.0) +#endif +#ifndef LDBL_TRUE_MIN + #define LDBL_TRUE_MIN npy_nextafterl(0.0L, 1.0L) +#endif + +/**begin repeat + * #NAME = HALF,FLOAT, DOUBLE, LONGDOUBLE# + * #ABB = HALF, FLT, DBL, LDBL# + * #type = npy_half, npy_float, npy_double, npy_longdouble# + * #RADIX = 16384, 2, 2, 2# + * #NEG_MAX = HALF_NEG_MAX, -FLT_MAX, -DBL_MAX, -LDBL_MAX# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + @type@ val; + switch (constant_id) { + case NPY_CONSTANT_zero: + val = 0; + break; + case NPY_CONSTANT_one: + val = 1; + break; + case NPY_CONSTANT_minimum_finite: + val = @NEG_MAX@; + break; + case NPY_CONSTANT_maximum_finite: + #if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) + /* For IBM double-double, use nextafter(inf, 0) to get the true + * maximum representable value (matches old MachArLike behavior) */ + if (sizeof(@type@) == sizeof(npy_longdouble)) { + val = npy_nextafterl((@type@)NPY_INFINITY, (@type@)0.0L); + break; + } + #endif + val = @ABB@_MAX; + break; + case NPY_CONSTANT_inf: + val = (@type@)NPY_INFINITYF; + break; + case NPY_CONSTANT_nan: + val = (@type@)NPY_NANF; + break; + case NPY_CONSTANT_finfo_radix: + val = @RADIX@; + break; + case NPY_CONSTANT_finfo_eps: + val = @ABB@_EPSILON; + break; + case NPY_CONSTANT_finfo_smallest_normal: + val = @ABB@_MIN; + break; + case NPY_CONSTANT_finfo_smallest_subnormal: + val = @ABB@_TRUE_MIN; + break; + case NPY_CONSTANT_finfo_nmant: + *(npy_intp *)ptr = @ABB@_MANT_DIG - 1; + return 1; + case NPY_CONSTANT_finfo_min_exp: + /* + Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively + + refernce: https://en.cppreference.com/w/c/types/limits.html + */ + *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; + return 1; + case NPY_CONSTANT_finfo_max_exp: + *(npy_intp *)ptr = @ABB@_MAX_EXP; + return 1; + case NPY_CONSTANT_finfo_decimal_digits: + *(npy_intp *)ptr = @ABB@_DIG; + return 1; + default: + return 0; + } + @NAME@_copyswap(ptr, &val, !PyArray_ISNBO(descr->byteorder), NULL); + return 1; +} +/**end repeat**/ + + +/**begin repeat + * #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ +static int +@NAME@_get_constant(PyArray_Descr *descr, int constant_id, void *ptr) +{ + // TODO: We currently don't use this, but we could quickly for + // reduction identity/initial value so should implement these. + return 0; +} +/**end repeat**/ + +/* + ***************************************************************************** + ** SETUP TYPE INFO ** + ***************************************************************************** + */ /* * This function is called during numpy module initialization, @@ -4331,6 +4525,7 @@ NPY_NO_EXPORT int set_typeinfo(PyObject *dict) { PyObject *infodict = NULL; + PyArray_DTypeMeta *dtypemeta; // borrowed int i; _PyArray_LegacyDescr *dtype; @@ -4385,7 +4580,7 @@ set_typeinfo(PyObject *dict) * PyArray_ComplexAbstractDType*3, * PyArrayDescr_Type*6 # */ - if (dtypemeta_wrap_legacy_descriptor( + dtypemeta = dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, (PyTypeObject *)&@scls@, @@ -4395,9 +4590,11 @@ set_typeinfo(PyObject *dict) #else NULL #endif - ) < 0) { + ); + if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 293d7dfee1b3..3f569cc3ccec 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -118,6 +118,17 @@ use_new_as_default(PyArray_DTypeMeta *self) } +/* + * By default fill in zero, one, and negative one via the Python casts, + * users should override this, but this allows us to use it for legacy user dtypes. + */ +static int +default_get_constant(PyArray_Descr *descr, int constant_id, void *data) +{ + return 0; +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -195,6 +206,7 @@ dtypemeta_initialize_struct_from_spec( NPY_DT_SLOTS(DType)->get_clear_loop = NULL; NPY_DT_SLOTS(DType)->get_fill_zero_loop = NULL; NPY_DT_SLOTS(DType)->finalize_descr = NULL; + NPY_DT_SLOTS(DType)->get_constant = default_get_constant; NPY_DT_SLOTS(DType)->f = default_funcs; PyType_Slot *spec_slot = spec->slots; @@ -1068,9 +1080,9 @@ object_common_dtype( * Some may have more aliases, as `intp` is not its own thing, * as of writing this, these are not added here. * - * @returns 0 on success, -1 on failure. + * @returns A borrowed references to the new DType or NULL. */ -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias) @@ -1097,19 +1109,20 @@ dtypemeta_wrap_legacy_descriptor( "that of an existing dtype (with the assumption it is just " "copied over and can be replaced).", descr->typeobj, Py_TYPE(descr)); - return -1; + return NULL; } NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { - return -1; + return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); + dt_slots->get_constant = default_get_constant; PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); - return -1; + return NULL; } /* @@ -1148,12 +1161,12 @@ dtypemeta_wrap_legacy_descriptor( /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } dt_slots->castingimpls = PyDict_New(); if (dt_slots->castingimpls == NULL) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* @@ -1169,13 +1182,15 @@ dtypemeta_wrap_legacy_descriptor( /* Set default functions (correct for most dtypes, override below) */ dt_slots->default_descr = nonparametric_default_descr; dt_slots->discover_descr_from_pyobject = ( - nonparametric_discover_descr_from_pyobject); + nonparametric_discover_descr_from_pyobject); dt_slots->is_known_scalar_type = python_builtins_are_known_scalar_types; dt_slots->common_dtype = default_builtin_common_dtype; dt_slots->common_instance = NULL; dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; + dt_slots->setitem = NULL; + dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { /* Convert our scalars (raise on too large unsigned and NaN, etc.) */ @@ -1233,7 +1248,7 @@ dtypemeta_wrap_legacy_descriptor( if (_PyArray_MapPyTypeToDType(dtype_class, descr->typeobj, PyTypeNum_ISUSERDEF(dtype_class->type_num)) < 0) { Py_DECREF(dtype_class); - return -1; + return NULL; } /* Finally, replace the current class of the descr */ @@ -1243,23 +1258,23 @@ dtypemeta_wrap_legacy_descriptor( if (!PyTypeNum_ISUSERDEF(descr->type_num)) { if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - return -1; + return NULL; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { - return -1; + return NULL; } } else { // ensure the within dtype cast is populated for legacy user dtypes if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { - return -1; + return NULL; } } - return 0; + return dtype_class; } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index a8b78e3f7518..202116a4cc91 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -67,6 +67,11 @@ typedef struct { * parameters, if any, as the operand dtype. */ PyArrayDTypeMeta_FinalizeDescriptor *finalize_descr; + /* + * Function to fetch constants. Always defined, but may return "undefined" + * for all values. + */ + PyArrayDTypeMeta_GetConstant *get_constant; /* * The casting implementation (ArrayMethod) to convert between two * instances of this DType, stored explicitly for fast access: @@ -89,7 +94,7 @@ typedef struct { // This must be updated if new slots before within_dtype_castingimpl // are added -#define NPY_NUM_DTYPE_SLOTS 11 +#define NPY_NUM_DTYPE_SLOTS 12 #define NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS 22 #define NPY_DT_MAX_ARRFUNCS_SLOT \ NPY_NUM_DTYPE_PYARRAY_ARRFUNCS_SLOTS + _NPY_DT_ARRFUNCS_OFFSET @@ -124,6 +129,8 @@ typedef struct { NPY_DT_SLOTS(NPY_DTYPE(descr))->getitem(descr, data_ptr) #define NPY_DT_CALL_setitem(descr, value, data_ptr) \ NPY_DT_SLOTS(NPY_DTYPE(descr))->setitem(descr, value, data_ptr) +#define NPY_DT_CALL_get_constant(descr, constant_id, data_ptr) \ + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_constant(descr, constant_id, data_ptr) /* @@ -153,7 +160,7 @@ NPY_NO_EXPORT int python_builtins_are_known_scalar_types( PyArray_DTypeMeta *cls, PyTypeObject *pytype); -NPY_NO_EXPORT int +NPY_NO_EXPORT PyArray_DTypeMeta * dtypemeta_wrap_legacy_descriptor( _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, PyTypeObject *dtype_super_class, const char *name, const char *alias); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index a7fdf3efba17..4ab3f5bae02c 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4327,6 +4327,109 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), } +static PyObject * +_populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (PyTuple_Size(args) != 2) { + PyErr_SetString(PyExc_TypeError, "Expected 2 arguments"); + return NULL; + } + PyObject *finfo = PyTuple_GetItem(args, 0); + if (finfo == NULL || finfo == Py_None) { + PyErr_SetString(PyExc_TypeError, "First argument cannot be None"); + return NULL; + } + PyArray_Descr *descr = (PyArray_Descr *)PyTuple_GetItem(args, 1); + if (!PyArray_DescrCheck(descr)) { + PyErr_SetString(PyExc_TypeError, "Second argument must be a dtype"); + return NULL; + } + + static const struct { + char *name; + int id; + npy_bool is_int; + } finfo_constants[] = { + {"max", NPY_CONSTANT_maximum_finite, 0}, + {"min", NPY_CONSTANT_minimum_finite, 0}, + {"_radix", NPY_CONSTANT_finfo_radix, 0}, + {"eps", NPY_CONSTANT_finfo_eps, 0}, + {"smallest_normal", NPY_CONSTANT_finfo_smallest_normal, 0}, + {"smallest_subnormal", NPY_CONSTANT_finfo_smallest_subnormal, 0}, + {"nmant", NPY_CONSTANT_finfo_nmant, 1}, + {"minexp", NPY_CONSTANT_finfo_min_exp, 1}, + {"maxexp", NPY_CONSTANT_finfo_max_exp, 1}, + {"precision", NPY_CONSTANT_finfo_decimal_digits, 1}, + }; + static const int n_finfo_constants = sizeof(finfo_constants) / sizeof(finfo_constants[0]); + + int n_float_constants = 0; + for (int i = 0; i < n_finfo_constants; i++) { + if (!finfo_constants[i].is_int) { + n_float_constants++; + } + } + + PyArrayObject *buffer_array = NULL; + char *buffer_data = NULL; + npy_intp dims[1] = {n_float_constants}; + + Py_INCREF(descr); + buffer_array = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, + descr, 1, dims, NULL, NULL, 0, NULL); + if (buffer_array == NULL) { + return NULL; + } + buffer_data = PyArray_BYTES(buffer_array); + npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; + + for (int i = 0; i < n_finfo_constants; i++) + { + PyObject *value_obj; + if (!finfo_constants[i].is_int) { + int res = NPY_DT_CALL_get_constant(descr, + finfo_constants[i].id, buffer_data); + if (res < 0) { + goto fail; + } + if (res == 0) { + buffer_data += elsize; // Move to next element + continue; + } + // Return as 0-d array item to preserve numpy scalar type + value_obj = PyArray_ToScalar(buffer_data, buffer_array); + buffer_data += elsize; // Move to next element + } + else { + npy_intp int_value; + int res = NPY_DT_CALL_get_constant(descr, finfo_constants[i].id, &int_value); + if (res < 0) { + goto fail; + } + if (res == 0) { + continue; + } + value_obj = PyLong_FromSsize_t(int_value); + } + if (value_obj == NULL) { + goto fail; + } + int res = PyObject_SetAttrString(finfo, finfo_constants[i].name, value_obj); + Py_DECREF(value_obj); + if (res < 0) { + goto fail; + } + } + + Py_DECREF(buffer_array); + Py_RETURN_NONE; + fail: + Py_XDECREF(buffer_array); + return NULL; +} + + + static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4556,6 +4659,8 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_load_from_filelike", (PyCFunction)_load_from_filelike, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_populate_finfo_constants", (PyCFunction)_populate_finfo_constants, + METH_VARARGS, NULL}, /* from umath */ {"frompyfunc", (PyCFunction) ufunc_frompyfunc, diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 9d026f32044f..71c95a8ae39c 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -306,8 +306,9 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor( - descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { + PyArray_DTypeMeta *wrapped_dtype = dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL); + if (wrapped_dtype == NULL) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 4981f2fd0e30..27e98a563930 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -215,14 +215,6 @@ def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -class TestMachAr(_DeprecationTestCase): - # Deprecated 2022-11-22, NumPy 1.25 - warning_cls = DeprecationWarning - - def test_deprecated_module(self): - self.assert_deprecated(lambda: np._core.MachAr) - - class TestQuantileInterpolationDeprecation(_DeprecationTestCase): # Deprecated 2021-11-08, NumPy 1.22 @pytest.mark.parametrize("func", diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py new file mode 100644 index 000000000000..572490c1eb08 --- /dev/null +++ b/numpy/_core/tests/test_finfo.py @@ -0,0 +1,83 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 721c6ac6cdf9..4e911b89e89f 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -9,7 +9,6 @@ import numpy as np from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy._core.getlimits import _discovered_machar, _float_ma from numpy.testing import assert_, assert_equal, assert_raises ################################################## @@ -139,53 +138,20 @@ def test_instances(): finfo(np.int64(1)) -def assert_ma_equal(discovered, ma_like): - # Check MachAr-like objects same as calculated MachAr instances - for key, value in discovered.__dict__.items(): - assert_equal(value, getattr(ma_like, key)) - if hasattr(value, 'shape'): - assert_equal(value.shape, getattr(ma_like, key).shape) - assert_equal(value.dtype, getattr(ma_like, key).dtype) - - -def test_known_types(): - # Test we are correctly compiling parameters for known types - for ftype, ma_like in ((np.float16, _float_ma[16]), - (np.float32, _float_ma[32]), - (np.float64, _float_ma[64])): - assert_ma_equal(_discovered_machar(ftype), ma_like) - # Suppress warning for broken discovery of double double on PPC - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - assert_ma_equal(ld_ma, _float_ma[80]) - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - assert_ma_equal(ld_ma, _float_ma[128]) - - def test_subnormal_warning(): """Test that the subnormal is zero warning is not being raised.""" - with np.errstate(all='ignore'): - ld_ma = _discovered_machar(np.longdouble) - bytes = np.dtype(np.longdouble).itemsize with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): - # 80-bit extended precision - ld_ma.smallest_subnormal - assert len(w) == 0 - elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: - # IEE 754 128-bit - ld_ma.smallest_subnormal - assert len(w) == 0 - else: - # Double double - ld_ma.smallest_subnormal - # This test may fail on some platforms - assert len(w) == 0 + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 def test_plausible_finfo(): diff --git a/numpy/_core/tests/test_longdouble.py b/numpy/_core/tests/test_longdouble.py index f7edd9774573..a7aa9145711a 100644 --- a/numpy/_core/tests/test_longdouble.py +++ b/numpy/_core/tests/test_longdouble.py @@ -291,7 +291,8 @@ def test_array_repr(): b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") - assert_(repr(a) != repr(b)) + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale diff --git a/numpy/_core/tests/test_machar.py b/numpy/_core/tests/test_machar.py deleted file mode 100644 index 2d772dd51233..000000000000 --- a/numpy/_core/tests/test_machar.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -Test machar. Given recent changes to hardcode type data, we might want to get -rid of both MachAr and this test at some point. - -""" -import numpy._core.numerictypes as ntypes -from numpy import array, errstate -from numpy._core._machar import MachAr - - -class TestMachAr: - def _run_machar_highprec(self): - # Instantiate MachAr instance with high enough precision to cause - # underflow - try: - hiprec = ntypes.float96 - MachAr(lambda v: array(v, hiprec)) - except AttributeError: - # Fixme, this needs to raise a 'skip' exception. - "Skipping test: no ntypes.float96 available on this platform." - - def test_underlow(self): - # Regression test for #759: - # instantiating MachAr for dtype = np.float96 raises spurious warning. - with errstate(all='raise'): - try: - self._run_machar_highprec() - except FloatingPointError as e: - msg = f"Caught {e} exception, should not have been raised." - raise AssertionError(msg) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index fba8e8abbe6d..751c66584275 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1000,7 +1000,7 @@ def test_floating_exceptions(self, typecode): if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) - ft_tiny = fi._machar.tiny + ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' @@ -1009,7 +1009,7 @@ def test_floating_exceptions(self, typecode): # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) - ft_tiny = ftype(fi._machar.tiny) + ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 470160c24de3..85eeff4add08 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -13,7 +13,7 @@ assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) assert_type(ar_iter.shape, tuple[Any, ...]) -assert_type(ar_iter.flat, Generator[np.int64, None, None]) +assert_type(ar_iter.flat, Generator[np.int64]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) From b5b2d3d3d527319befcb2656f7c6ce5ff311019b Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 6 Oct 2025 03:11:17 +0300 Subject: [PATCH 0533/1718] MAINT: Fix MSVC warnings and add CI check with allowlist (#29868) --- .github/check-warnings/action.yml | 38 +++++++++++++++++++ .../check-warnings/msvc-allowed-warnings.txt | 31 +++++++++++++++ .github/workflows/windows.yml | 9 ++++- numpy/_core/src/common/npy_config.h | 5 +++ numpy/_core/src/multiarray/arraytypes.c.src | 2 +- numpy/_core/src/multiarray/item_selection.c | 27 ++++++------- .../multiarray/lowlevel_strided_loops.c.src | 16 ++++---- numpy/_core/src/multiarray/nditer_constr.c | 6 +-- .../multiarray/stringdtype/static_string.c | 7 ++-- .../src/umath/loops_autovec.dispatch.c.src | 2 +- .../src/umath/loops_logical.dispatch.cpp | 4 +- .../src/umath/loops_minmax.dispatch.c.src | 2 +- numpy/_core/src/umath/scalarmath.c.src | 16 +++++--- numpy/_core/src/umath/string_buffer.h | 6 +++ numpy/linalg/lapack_lite/f2c.c | 2 +- .../random/src/distributions/distributions.c | 10 ++--- numpy/random/src/distributions/logfactorial.c | 2 +- .../random/src/legacy/legacy-distributions.c | 2 +- 18 files changed, 141 insertions(+), 46 deletions(-) create mode 100644 .github/check-warnings/action.yml create mode 100644 .github/check-warnings/msvc-allowed-warnings.txt diff --git a/.github/check-warnings/action.yml b/.github/check-warnings/action.yml new file mode 100644 index 000000000000..f3f6778e229b --- /dev/null +++ b/.github/check-warnings/action.yml @@ -0,0 +1,38 @@ +name: "Check Warnings" +description: "Filter build warnings against an allowlist" + +inputs: + log-file: + description: "Path to build log file" + required: true + allowlist: + description: "Path to allowed warnings regex file" + required: true + warning-regex: + description: "Regex to extract warnings from the log" + required: true + +runs: + using: "composite" + steps: + - name: Extract warnings + shell: bash + run: | + echo "Extracting warnings from ${{ inputs.log-file }} using regex: ${{ inputs['warning-regex'] }}" + grep -E "${{ inputs['warning-regex'] }}" "${{ inputs.log-file }}" | tee warnings.log || true + + if [ ! -s warnings.log ]; then + echo "No warnings found." + exit 0 + fi + + echo "Filtering against allowlist ${{ inputs.allowlist }}" + grep -v -F -f "${{ inputs.allowlist }}" warnings.log | tee disallowed.log || true + + if [ -s disallowed.log ]; then + echo "::error::Disallowed warnings detected:" + cat disallowed.log + exit 1 + else + echo "All warnings are allowed." + fi diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt new file mode 100644 index 000000000000..7d2c149629ec --- /dev/null +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -0,0 +1,31 @@ +../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' +cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(52): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value +D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: + diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 2cfb98e67aba..b86543189941 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -96,7 +96,14 @@ jobs: - name: Build and install run: | - python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" + python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log + + - name: Check warnings + uses: ./.github/check-warnings + with: + log-file: ./build.log + allowlist: ./.github/check-warnings/msvc-allowed-warnings.txt + warning-regex: "warning C|Command line warning" - name: Install test dependencies run: | diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index 82641a85509e..ccb81ca7110b 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -1,6 +1,11 @@ #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ #define NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_ +#if defined(_MSC_VER) +// Suppress warn C4146: -x is valid for unsigned (wraps around) +#pragma warning(disable:4146) +#endif + #include "config.h" #include "npy_cpu_dispatch.h" // brings NPY_HAVE_[CPU features] #include "numpy/numpyconfig.h" diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 0a063f6ba07a..2828b9543fbd 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -1353,7 +1353,7 @@ static void while (n--) { @type1@ t = (@type1@)*ip++; - *op++ = t; + *op++ = (@type2@)t; #if @steps@ == 2 /* complex type */ *op++ = 0; #endif diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 9ec70acde6dd..5e622ed59e08 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -625,25 +625,26 @@ npy_fastputmask( npy_intp ni, npy_intp nv, npy_intp chunk) { if (chunk == 1) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 2) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 2) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 4) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 4) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 8) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 8) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 16) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 16) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - if (chunk == 32) { - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + else if (chunk == 32) { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); + } + else { + npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } - - return npy_fastputmask_impl(dest, src, mask_data, ni, nv, chunk); } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index e75dd37cc465..368d299d5d01 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -764,7 +764,7 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #elif defined(__ARM_FP16_FORMAT_IEEE) return npy_halfbits_to_doublebits(h); #else - return (double)(ToDoubleBits(h)); + return ToDoubleBits(h); #endif } #endif @@ -825,12 +825,12 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ * #type2max = 0, * UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, ULLONG_MAX, * SCHAR_MAX, SHRT_MAX, INT_MAX, LONG_MAX, LLONG_MAX, - * 65500, FLT_MAX, DBL_MAX, LDBL_MAX, + * 65500.0f, FLT_MAX, DBL_MAX, LDBL_MAX, * FLT_MAX, DBL_MAX, LDBL_MAX# * #type2min = 0, * 0, 0, 0, 0, 0, * SCHAR_MIN, SHRT_MIN, INT_MIN, LONG_MIN, LLONG_MIN, - * -65500, -FLT_MAX, -DBL_MAX, -LDBL_MAX, + * -65500.0f, -FLT_MAX, -DBL_MAX, -LDBL_MAX, * -FLT_MAX, -DBL_MAX, -LDBL_MAX# * #is_bool2 = 1, 0*17# * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# @@ -848,7 +848,10 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For emulated half types, don't use actual double/float types in conversion */ +/* + * For emulated half types, don't use actual double/float types in conversion + * except for *_check_same_value_*(), follow _ROUND_TRIP and _TO_RTYPE1. + */ #if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ @@ -896,13 +899,12 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ #elif @is_emu_half2@ # define _TO_RTYPE1(x) (@rtype1@)(x) - # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) -# define _ROUND_TRIP(x) (@rtype1@)npy_halfbits_to_floatbits(_CONVERT_FN(x)) +# define _ROUND_TRIP(x) npy_half_to_float(npy_float_to_half(x)) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# define _ROUND_TRIP(x) (@rtype1@)_npy_halfbits_to_doublebits(_CONVERT_FN(x)) +# define _ROUND_TRIP(x) npy_half_to_double(npy_double_to_half(x)) # elif @is_emu_half1@ # define _CONVERT_FN(x) (x) # define _ROUND_TRIP(x) (x) diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index a8f13a73ee1e..ffe37e80c9be 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -2126,11 +2126,11 @@ npyiter_find_buffering_setup(NpyIter *iter, npy_intp buffersize) break; /* Avoid a zero coresize. */ } - double bufsize = size; - if (bufsize > maximum_size && + double bufsize = (double)size; + if (size > maximum_size && (cost > 1 || !(itflags & NPY_ITFLAG_GROWINNER))) { /* If we need buffering, limit size in cost calculation. */ - bufsize = maximum_size; + bufsize = (double)maximum_size; } NPY_IT_DBG_PRINT(" dim=%d, n_buffered=%d, cost=%g @bufsize=%g (prev scaled cost=%g)\n", diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 89b53bcb8538..f5c2025e183a 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -177,14 +177,15 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) } else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > string_storage_size) { - newsize = ARENA_EXPAND_FACTOR * arena->size; + newsize = (size_t)(ARENA_EXPAND_FACTOR * arena->size); } else { newsize = arena->size + string_storage_size; } - if ((arena->cursor + size) >= newsize) { + // If there enough room for both the payload and its header + if ((arena->cursor + string_storage_size) > newsize) { // need extra room beyond the expansion factor, leave some padding - newsize = ARENA_EXPAND_FACTOR * (arena->cursor + size); + newsize = (size_t)(ARENA_EXPAND_FACTOR * (arena->cursor + string_storage_size)); } // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 983fa1b5eb80..a8fb2084a241 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -51,7 +51,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_square) NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_reciprocal) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in); + UNARY_LOOP_FAST(@type@, @type@, *out = (@type@)(1.0 / in)); } /**begin repeat1 diff --git a/numpy/_core/src/umath/loops_logical.dispatch.cpp b/numpy/_core/src/umath/loops_logical.dispatch.cpp index c05584f467aa..5c1834cc29e2 100644 --- a/numpy/_core/src/umath/loops_logical.dispatch.cpp +++ b/numpy/_core/src/umath/loops_logical.dispatch.cpp @@ -61,12 +61,12 @@ HWY_INLINE HWY_ATTR Vec simd_logical_or_u8(Vec a, Vec return byte_to_true(r); } -HWY_INLINE HWY_ATTR npy_bool simd_any_u8(Vec v) +HWY_INLINE HWY_ATTR bool simd_any_u8(Vec v) { return hn::ReduceMax(_Tag(), v) != 0; } -HWY_INLINE HWY_ATTR npy_bool simd_all_u8(Vec v) +HWY_INLINE HWY_ATTR bool simd_all_u8(Vec v) { return hn::ReduceMin(_Tag(), v) != 0; } diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index c11f391f9159..a33297ca83d5 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -123,7 +123,7 @@ simd_reduce_c_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip, npyv_lanetype_@sfx@ npyv_@sfx@ acc = npyv_setall_@sfx@(op1[0]); for (; len >= wstep; len -= wstep, ip += wstep) { #ifdef NPY_HAVE_SSE2 - NPY_PREFETCH(ip + wstep, 0, 3); + NPY_PREFETCH((const char*)(ip + wstep), 0, 3); #endif npyv_@sfx@ v0 = npyv_load_@sfx@(ip + vstep * 0); npyv_@sfx@ v1 = npyv_load_@sfx@(ip + vstep * 1); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index a565eee8f939..3c57d6866c56 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -826,6 +826,10 @@ typedef enum { * npy_long, npy_ulong, npy_longlong, npy_ulonglong, * npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# + * #scalar_type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_half, npy_float, npy_double, npy_longdouble, + * npy_float, npy_double, npy_longdouble# * #c = x*14, f, , l# */ @@ -846,10 +850,10 @@ typedef enum { *result = npy_float_to_half((float)(value)) #elif defined(IS_CFLOAT) || defined(IS_CDOUBLE) || defined(IS_CLONGDOUBLE) #define CONVERT_TO_RESULT(value) \ - npy_csetreal@c@(result, value); \ + npy_csetreal@c@(result, ((@scalar_type@)(value))); \ npy_csetimag@c@(result, 0) #else - #define CONVERT_TO_RESULT(value) *result = value + #define CONVERT_TO_RESULT(value) *result = ((@type@)(value)) #endif @@ -1386,7 +1390,7 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: - other_val = other_val_conv; /* Need a float value */ + other_val = (double)other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: @@ -1405,12 +1409,12 @@ static PyObject * npy_clear_floatstatus_barrier((char*)&arg1); if (is_forward) { - arg1 = PyArrayScalar_VAL(a, @Name@); + arg1 = (double)PyArrayScalar_VAL(a, @Name@); arg2 = other_val; } else { arg1 = other_val; - arg2 = PyArrayScalar_VAL(b, @Name@); + arg2 = (double)PyArrayScalar_VAL(b, @Name@); } /* Note that arguments are already float64, so we can just divide */ @@ -1831,7 +1835,7 @@ static PyObject * } return @func@(@to_ctype@(npy_creal@n@(PyArrayScalar_VAL(obj, @Name@)))); #else - return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); + return @func@((double)(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)))); #endif } /**end repeat**/ diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index dafedcbc03ff..1e7bea49a365 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -17,6 +17,12 @@ #define CHECK_OVERFLOW(index) if (buf + (index) >= after) return 0 #define MSB(val) ((val) >> 7 & 1) +#ifdef _MSC_VER +// MSVC sometimes complains (C4715: "not all control paths return a value") +// on switch statements over enum classes, even though all enum values are covered. +// This warning is suppressed here to avoid invasive changes. +# pragma warning(disable:4715) +#endif enum class ENCODING { ASCII, UTF32, UTF8 diff --git a/numpy/linalg/lapack_lite/f2c.c b/numpy/linalg/lapack_lite/f2c.c index 47e4d5729b83..9afac89e61d1 100644 --- a/numpy/linalg/lapack_lite/f2c.c +++ b/numpy/linalg/lapack_lite/f2c.c @@ -191,7 +191,7 @@ integer i_dnnt(x) doublereal *x; integer i_dnnt(doublereal *x) #endif { -return( (*x)>=0 ? +return (integer)( (*x)>=0 ? floor(*x + .5) : -floor(.5 - *x) ); } diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 2b7c69481064..8102fee72323 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -597,7 +597,7 @@ static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) { /* log(V) == log(0.0) ok here */ /* if U==0.0 so that us==0.0, log is ok since always returns */ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= - (-lam + k * loglam - random_loggam(k + 1))) { + (-lam + (double)k * loglam - random_loggam((double)k + 1))) { return k; } } @@ -733,10 +733,10 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, if (A > (t + rho)) goto Step10; - x1 = y + 1; - f1 = m + 1; - z = n + 1 - m; - w = n - y + 1; + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; x2 = x1 * x1; f2 = f1 * f1; z2 = z * z; diff --git a/numpy/random/src/distributions/logfactorial.c b/numpy/random/src/distributions/logfactorial.c index 1305164699fa..337ec1a98db5 100644 --- a/numpy/random/src/distributions/logfactorial.c +++ b/numpy/random/src/distributions/logfactorial.c @@ -154,5 +154,5 @@ double logfactorial(int64_t k) * was within 2 ULP of the best 64 bit floating point value for * k up to 10000000.) */ - return (k + 0.5)*log(k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); + return (k + 0.5)*log((double)k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k))); } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index 385c4c239a57..e84bd19fdaee 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -469,7 +469,7 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - return random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + random_multinomial(bitgen_state, n, mnix, pix, d, binomial); } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { From 891fa0d236fb2e9ed6de67f41f62e1713c5204fa Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 5 Oct 2025 21:54:29 -0400 Subject: [PATCH 0534/1718] DOC: Remove unused arrays from the structured dtype ufunc example. (#29882) [skip actions][skip azp][skip cirrus] --- doc/source/user/c-info.ufunc-tutorial.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 76e8af63462f..8d8b267a6bd1 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -865,12 +865,6 @@ The C file is given below. } } - /* This a pointer to the above function */ - PyUFuncGenericFunction funcs[1] = {&add_uint64_triplet}; - - /* These are the input and return dtypes of add_uint64_triplet. */ - static const char types[3] = {NPY_UINT64, NPY_UINT64, NPY_UINT64}; - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "struct_ufunc_test", From 3c7801da49f4c59fd088a17b81ddee2cee419872 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 6 Oct 2025 09:26:19 +0200 Subject: [PATCH 0535/1718] MAINT: Rewrite setitem to use the new API (mostly) (#29880) This has a mild speed advantage since `PyArray_Pack` had this hack in place to ensure that we have an "array" to pass on. This hack is now only used in extreme cases (i.e. user dtypes). This gives a very slight speed advantage, but mostly, I like to change this. It would be nice to change `getitem` as well, but `getitem` is trickier due to structured dtypes. I wanted to first just remove `->setitem` but one (brutal!) test failed and I reconsidered things thinking that someone might be using `descr->setitem` without an array which would be broken without keeping an explicit definition around. The 0-D unpacking path should be unimportant these days (it only applies to _nested_ arrays so the tests don't even notice it) but I kept it around for now anyway. Signed-off-by: Sebastian Berg * Small style/docs fixup based on review Signed-off-by: Sebastian Berg --------- Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/array_coercion.c | 15 +- numpy/_core/src/multiarray/arraytypes.c.src | 276 ++++++++++---------- numpy/_core/src/multiarray/arraytypes.h.src | 2 +- numpy/_core/src/multiarray/common.c | 24 ++ numpy/_core/src/multiarray/common.h | 11 + numpy/_core/src/multiarray/dtypemeta.c | 25 +- numpy/_core/src/multiarray/dtypemeta.h | 3 +- numpy/_core/src/multiarray/scalarapi.c | 11 +- numpy/_core/src/umath/_scaled_float_dtype.c | 7 +- numpy/_core/src/umath/scalarmath.c.src | 8 +- numpy/_core/tests/test_array_coercion.py | 5 - 11 files changed, 207 insertions(+), 180 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 8271fb6812d1..2de639611bf6 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -479,20 +479,13 @@ npy_cast_raw_scalar_item( NPY_NO_EXPORT int PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) { - PyArrayObject_fields arr_fields = { - .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ - }; - Py_SET_TYPE(&arr_fields, &PyArray_Type); - Py_SET_REFCNT(&arr_fields, 1); - if (NPY_UNLIKELY(descr->type_num == NPY_OBJECT)) { /* * We always have store objects directly, casting will lose some * type information. Any other dtype discards the type information. * TODO: For a Categorical[object] this path may be necessary? */ - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } /* discover_dtype_from_pyobject includes a check for is_known_scalar_type */ @@ -527,8 +520,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (DType == NPY_DTYPE(descr) || DType == (PyArray_DTypeMeta *)Py_None) { /* We can set the element directly (or at least will try to) */ Py_XDECREF(DType); - arr_fields.descr = descr; - return PyDataType_GetArrFuncs(descr)->setitem(value, item, &arr_fields); + return NPY_DT_CALL_setitem(descr, value, item); } PyArray_Descr *tmp_descr; tmp_descr = NPY_DT_CALL_discover_descr_from_pyobject(DType, value); @@ -546,8 +538,7 @@ PyArray_Pack(PyArray_Descr *descr, void *item, PyObject *value) if (PyDataType_FLAGCHK(tmp_descr, NPY_NEEDS_INIT)) { memset(data, 0, tmp_descr->elsize); } - arr_fields.descr = tmp_descr; - if (PyDataType_GetArrFuncs(tmp_descr)->setitem(value, data, &arr_fields) < 0) { + if (NPY_DT_CALL_setitem(tmp_descr, value, data) < 0) { PyObject_Free(data); Py_DECREF(tmp_descr); return -1; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 2828b9543fbd..da00596e7e24 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -46,6 +46,20 @@ #include "umathmodule.h" #include "npy_static_data.h" +/**begin repeat + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * DATETIME, TIMEDELTA# + */ +static inline void +@NAME@_copyswap(void *dst, void *src, int swap, void *arr); + +/**end repeat**/ + + /* * Define a stack allocated dummy array with only the minimum information set: * 1. The descr, the main field interesting here. @@ -98,7 +112,7 @@ MyPyFloat_AsDouble(PyObject *obj) } num = PyNumber_Float(obj); if (num == NULL) { - return NPY_NAN; + return -1; } ret = PyFloat_AS_DOUBLE(num); Py_DECREF(num); @@ -127,7 +141,7 @@ MyPyFloat_AsHalf(PyObject *obj) npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { - return npy_double_to_half(-1.); + return -1; // exception return as integer } } return res; @@ -139,10 +153,16 @@ MyPyFloat_FromHalf(npy_half h) return PyFloat_FromDouble(npy_half_to_double(h)); } -/* Handle case of assigning from an array scalar in setitem */ +/* + * Handle case of assigning from an array scalar in setitem. + * NOTE/TODO(seberg): This was important, but is now only used + * for *nested* 0-D arrays which makes it dubious whether it should + * remain used. + * (At the point of writing, I did not want to worry about BC though.) + */ static int -convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, - int (*setitem)(PyObject *op, void *ov, void *vap)) +convert_to_scalar_and_retry(PyArray_Descr *descr, PyObject *op, char *ov, + int (*setitem)(PyArray_Descr *descr, PyObject *op, char *ov)) { PyObject *temp; @@ -153,7 +173,7 @@ convert_to_scalar_and_retry(PyObject *op, void *ov, void *vap, return -1; } else { - int res = setitem(temp, ov, vap); + int res = setitem(descr, temp, ov); Py_DECREF(temp); return res; } @@ -326,9 +346,8 @@ static PyObject * } NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *op, void *ov, void *vap) +@TYPE@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; @type@ temp; /* ensures alignment */ #if @is_int@ @@ -366,28 +385,23 @@ NPY_NO_EXPORT int } else { temp = (@type@)@func2@(op); - } - if (PyErr_Occurred()) { - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (PySequence_NoString_Check(op)) { - PyErr_SetString(PyExc_ValueError, - "setting an array element with a sequence."); - npy_PyErr_ChainExceptionsCause(type, value, traceback); - } - else { - PyErr_Restore(type, value, traceback); + if (temp == (@type@)-1 && PyErr_Occurred()) { + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (PySequence_NoString_Check(op)) { + PyErr_SetString(PyExc_ValueError, + "setting an array element with a sequence."); + npy_PyErr_ChainExceptionsCause(type, value, traceback); + } + else { + PyErr_Restore(type, value, traceback); + } + return -1; } - return -1; - } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - assert(npy_is_aligned(ov, NPY_ALIGNOF(@type@))); - *((@type@ *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); } + // Support descr == NULL for some scalarmath paths. + @TYPE@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -433,19 +447,17 @@ static PyObject * * #suffix = f, , l# */ NPY_NO_EXPORT int -@NAME@_setitem(PyObject *op, void *ov, void *vap) +@NAME@_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; Py_complex oop; @type@ temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem); - } - if (PyArray_IsScalar(op, @kind@)){ temp = PyArrayScalar_VAL(op, @kind@); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, @NAME@_setitem); + } else { if (op == Py_None) { oop.real = NPY_NAN; @@ -504,10 +516,8 @@ NPY_NO_EXPORT int #endif } - memcpy(ov, &temp, NPY_SIZEOF_@NAME@); - if (ap != NULL && PyArray_ISBYTESWAPPED(ap)) { - byte_swap_vector(ov, 2, sizeof(@ftype@)); - } + @NAME@_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -589,19 +599,17 @@ LONGDOUBLE_getitem(void *ip, void *ap) } NPY_NO_EXPORT int -LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) +LONGDOUBLE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_longdouble temp; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, LONGDOUBLE_setitem); - } - if (PyArray_IsScalar(op, LongDouble)) { temp = PyArrayScalar_VAL(op, LongDouble); } + else if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(descr, op, ov, LONGDOUBLE_setitem); + } else { /* In case something funny happened in PyArray_IsScalar */ if (PyErr_Occurred()) { @@ -612,13 +620,9 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap) if (PyErr_Occurred()) { return -1; } - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_longdouble *)ov) = temp; - } - else { - copy_and_swap(ov, &temp, PyArray_ITEMSIZE(ap), 1, 0, - PyArray_ISBYTESWAPPED(ap)); - } + // Support descr == NULL for scalarmath paths + LONGDOUBLE_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } @@ -664,12 +668,10 @@ UNICODE_getitem(void *ip, void *vap) } static int -UNICODE_setitem(PyObject *op, void *ov, void *vap) +UNICODE_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; - if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem); + return convert_to_scalar_and_retry(descr, op, ov, UNICODE_setitem); } if (PySequence_NoString_Check(op)) { @@ -691,7 +693,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) } /* truncate if needed */ - Py_ssize_t max_len = PyArray_ITEMSIZE(ap) >> 2; + Py_ssize_t max_len = descr->elsize >> 2; Py_ssize_t actual_len = PyUnicode_GetLength(temp); if (actual_len < 0) { Py_DECREF(temp); @@ -708,7 +710,8 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) Py_ssize_t num_bytes = actual_len * 4; char *buffer; - if (!PyArray_ISALIGNED(ap)) { + int aligned = npy_is_aligned(ov, NPY_ALIGNOF(Py_UCS4)); + if (!aligned) { buffer = PyArray_malloc(num_bytes); if (buffer == NULL) { Py_DECREF(temp); @@ -725,16 +728,16 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap) return -1; } - if (!PyArray_ISALIGNED(ap)) { + if (!aligned) { memcpy(ov, buffer, num_bytes); PyArray_free(buffer); } /* Fill in the rest of the space with 0 */ - if (PyArray_ITEMSIZE(ap) > num_bytes) { - memset((char*)ov + num_bytes, 0, (PyArray_ITEMSIZE(ap) - num_bytes)); + if (descr->elsize > num_bytes) { + memset((char*)ov + num_bytes, 0, (descr->elsize - num_bytes)); } - if (PyArray_ISBYTESWAPPED(ap)) { + if (PyDataType_ISBYTESWAPPED(descr)) { byte_swap_vector(ov, actual_len, 4); } Py_DECREF(temp); @@ -762,15 +765,14 @@ STRING_getitem(void *ip, void *vap) } static int -STRING_setitem(PyObject *op, void *ov, void *vap) +STRING_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; char *ptr; Py_ssize_t len; PyObject *temp = NULL; if (PyArray_IsZeroDim(op)) { - return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem); + return convert_to_scalar_and_retry(descr, op, ov, STRING_setitem); } if (PySequence_NoString_Check(op)) { @@ -808,13 +810,13 @@ STRING_setitem(PyObject *op, void *ov, void *vap) Py_DECREF(temp); return -1; } - memcpy(ov, ptr, PyArray_MIN(PyArray_ITEMSIZE(ap),len)); + memcpy(ov, ptr, PyArray_MIN(descr->elsize, len)); /* * If string length is smaller than room in array * Then fill the rest of the element size with NULL */ - if (PyArray_ITEMSIZE(ap) > len) { - memset((char *)ov + len, 0, (PyArray_ITEMSIZE(ap) - len)); + if (descr->elsize > len) { + memset((char *)ov + len, 0, (descr->elsize - len)); } Py_DECREF(temp); return 0; @@ -841,7 +843,7 @@ OBJECT_getitem(void *ip, void *NPY_UNUSED(ap)) static int -OBJECT_setitem(PyObject *op, void *ov, void *NPY_UNUSED(ap)) +OBJECT_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { PyObject *obj; @@ -867,11 +869,9 @@ VOID_getitem(void *input, void *vap) _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(vap); if (PyDataType_HASFIELDS(descr)) { - PyObject *key; PyObject *names; int i, n; PyObject *ret; - PyObject *tup; PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; @@ -882,9 +882,7 @@ VOID_getitem(void *input, void *vap) for (i = 0; i < n; i++) { npy_intp offset; PyArray_Descr *new; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { Py_DECREF(ret); return NULL; } @@ -969,14 +967,10 @@ NPY_NO_EXPORT int _setup_field(int i, _PyArray_LegacyDescr *descr, PyArrayObject *arr, npy_intp *offset_p, char *dstdata) { - PyObject *key; - PyObject *tup; PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(descr->names, i); - tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { return -1; } @@ -1029,18 +1023,17 @@ _copy_and_return_void_setitem(_PyArray_LegacyDescr *dstdescr, char *dstdata, } static int -VOID_setitem(PyObject *op, void *input, void *vap) +VOID_setitem(PyArray_Descr *descr_, PyObject *op, char *ip) { - char *ip = input; - PyArrayObject *ap = vap; - int itemsize = PyArray_ITEMSIZE(ap); + _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)descr_; + int itemsize = descr->elsize; int res; - _PyArray_LegacyDescr *descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); if (PyDataType_HASFIELDS(descr)) { PyObject *errmsg; npy_int i; npy_intp offset; + PyArray_Descr *field_descr; int failed = 0; /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */ @@ -1073,23 +1066,18 @@ VOID_setitem(PyObject *op, void *input, void *vap) return -1; } - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - PyObject *item; - - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + PyObject *item = PyTuple_GetItem(op, i); + if (item == NULL) { failed = 1; break; } - item = PyTuple_GetItem(op, i); - if (item == NULL) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, item) < 0) { + if (NPY_DT_CALL_setitem(field_descr, item, ip + offset) < 0) { failed = 1; break; } @@ -1099,17 +1087,13 @@ VOID_setitem(PyObject *op, void *input, void *vap) /* Otherwise must be non-void scalar. Try to assign to each field */ npy_intp names_size = PyTuple_GET_SIZE(descr->names); - PyArrayObject_fields dummy_fields = get_dummy_stack_array(ap); - PyArrayObject *dummy_arr = (PyArrayObject *)&dummy_fields; - for (i = 0; i < names_size; i++) { - /* temporarily make ap have only this field */ - if (_setup_field(i, descr, dummy_arr, &offset, ip) == -1) { + if (_unpack_field_index(descr, i, &field_descr, &offset) < 0) { failed = 1; break; } /* use setitem to set this field */ - if (PyArray_SETITEM(dummy_arr, ip + offset, op) < 0) { + if (NPY_DT_CALL_setitem(field_descr, op, ip + offset) < 0) { failed = 1; break; } @@ -1139,7 +1123,7 @@ VOID_setitem(PyObject *op, void *input, void *vap) PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, - PyArray_FLAGS(ap), NULL, NULL); + NPY_ARRAY_WRITEABLE, NULL, NULL); npy_free_cache_dim_obj(shape); if (!ret) { return -1; @@ -1217,15 +1201,14 @@ TIMEDELTA_getitem(void *ip, void *vap) } static int -DATETIME_setitem(PyObject *op, void *ov, void *vap) +DATETIME_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_datetime temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1237,27 +1220,20 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap) } /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_datetime *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + DATETIME_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } static int -TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) +TIMEDELTA_setitem(PyArray_Descr *descr, PyObject *op, char *ov) { - PyArrayObject *ap = vap; /* ensure alignment */ npy_timedelta temp = 0; PyArray_DatetimeMetaData *meta = NULL; /* Get the datetime units metadata */ - meta = get_datetime_metadata_from_dtype(PyArray_DESCR(ap)); + meta = get_datetime_metadata_from_dtype(descr); if (meta == NULL) { return -1; } @@ -1268,19 +1244,39 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap) return -1; } - /* Copy the value into the output */ - if (ap == NULL || PyArray_ISBEHAVED(ap)) { - *((npy_timedelta *)ov)=temp; - } - else { - PyDataType_GetArrFuncs(PyArray_DESCR(ap))->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), - ap); - } - + TIMEDELTA_copyswap( + ov, &temp, descr != NULL && PyDataType_ISBYTESWAPPED(descr), NULL); return 0; } +/**begin repeat + * + * #NAME = BOOL, + * BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG, + * HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * OBJECT, STRING, UNICODE, VOID, + * DATETIME, TIMEDELTA# + */ + +/* + * Legacy fallback setitem, should be deprecated, but if anyone calls + * our setitem *without* an array (or stealing it for their dtype) + * they might need it. E.g. a NumPy 3 should probably just dump it all, though. + */ +static int +@NAME@_legacy_setitem(PyObject *value, void *data, void *vap) +{ + // Most builtins allow descr to be NULL traditionally, so assume it's OK + PyArray_Descr *descr = vap == NULL ? NULL : PyArray_DESCR((PyArrayObject *)vap); + return @NAME@_setitem(descr, value, data); +} + +/**end repeat**/ + + /* ***************************************************************************** ** TYPE TO TYPE CONVERSIONS ** @@ -1536,7 +1532,7 @@ static void if (temp == NULL) { return; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1584,7 +1580,7 @@ static void Py_INCREF(Py_False); temp = Py_False; } - if (@to@_setitem(temp, op, aop)) { + if (@to@_setitem(PyArray_DESCR(aop), temp, (char *)op)) { Py_DECREF(temp); return; } @@ -1949,7 +1945,7 @@ _basic_copy(void *dst, void *src, int elsize) { * npy_half, npy_float, npy_double, npy_longdouble, * npy_datetime, npy_timedelta# */ -static void +static inline void @fname@_copyswapn (void *dst, npy_intp dstride, void *src, npy_intp sstride, npy_intp n, int swap, void *NPY_UNUSED(arr)) { @@ -1960,7 +1956,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2042,7 +2038,7 @@ static void /* ignore swap */ } -static void +static inline void @fname@_copyswap (void *dst, void *src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2075,7 +2071,7 @@ static void } } -static void +static inline void @fname@_copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { /* copy first if needed */ @@ -2220,7 +2216,7 @@ OBJECT_copyswapn(PyObject **dst, npy_intp dstride, PyObject **src, return; } -static void +static inline void OBJECT_copyswap(PyObject **dst, PyObject **src, int NPY_UNUSED(swap), void *NPY_UNUSED(arr)) { @@ -2451,7 +2447,7 @@ UNICODE_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride, } -static void +static inline void STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) { assert(arr != NULL); @@ -2462,7 +2458,7 @@ STRING_copyswap(char *dst, char *src, int NPY_UNUSED(swap), PyArrayObject *arr) _basic_copy(dst, src, PyArray_ITEMSIZE(arr)); } -static void +static inline void UNICODE_copyswap (char *dst, char *src, int swap, PyArrayObject *arr) { int itemsize; @@ -3018,9 +3014,8 @@ UNICODE_compare(npy_ucs4 *ip1, npy_ucs4 *ip2, static int VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) { - PyArray_Descr *descr; - PyObject *names, *key; - PyObject *tup; + _PyArray_LegacyDescr *descr; + PyObject *names; PyArrayObject_fields dummy_struct; PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; char *nip1, *nip2; @@ -3033,18 +3028,16 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap) if (mem_handler == NULL) { goto finish; } - descr = PyArray_DESCR(ap); + descr = (_PyArray_LegacyDescr *)PyArray_DESCR(ap); /* * Compare on the first-field. If equal, then * compare on the second-field, etc. */ - names = PyDataType_NAMES(descr); + names = descr->names; for (i = 0; i < PyTuple_GET_SIZE(names); i++) { PyArray_Descr *new; npy_intp offset; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(PyDataType_FIELDS(descr), key); // noqa: borrowed-ref OK - if (_unpack_field(tup, &new, &offset) < 0) { + if (_unpack_field_index(descr, i, &new, &offset) < 0) { goto finish; } /* Set the fields needed by compare or copyswap */ @@ -4029,7 +4022,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4148,7 +4141,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = { @from@_to_VOID }, @from@_getitem, - @from@_setitem, + @from@_legacy_setitem, (PyArray_CopySwapNFunc*)@from@_copyswapn, (PyArray_CopySwapFunc*)@from@_copyswap, (PyArray_CompareFunc*)@from@_compare, @@ -4594,6 +4587,7 @@ set_typeinfo(PyObject *dict) if (dtypemeta == NULL) { return -1; } + NPY_DT_SLOTS(dtypemeta)->setitem = @NAME@_setitem; NPY_DT_SLOTS(dtypemeta)->get_constant = @NAME@_get_constant; /**end repeat**/ diff --git a/numpy/_core/src/multiarray/arraytypes.h.src b/numpy/_core/src/multiarray/arraytypes.h.src index ca8dbeaa67eb..59dc836a2de5 100644 --- a/numpy/_core/src/multiarray/arraytypes.h.src +++ b/numpy/_core/src/multiarray/arraytypes.h.src @@ -44,7 +44,7 @@ small_correlate(const char * d_, npy_intp dstride, */ NPY_NO_EXPORT int -@TYPE@_setitem(PyObject *obj, void *data_ptr, void *arr); +@TYPE@_setitem(PyArray_Descr *descr, PyObject *obj, char *data_ptr); /**end repeat**/ diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 3bae4a28efba..2e9bcbf29e8f 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -328,6 +328,30 @@ _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) return 0; } + +/** + * Unpack a field from a structured dtype. The field index must be valid. + * + * @param descr The dtype to unpack. + * @param index The index of the field to unpack. + * @param odescr will be set to the field's dtype + * @param offset will be set to the field's offset + * + * @return -1 on failure, 0 on success. + */ + NPY_NO_EXPORT int + _unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset) + { + PyObject *key = PyTuple_GET_ITEM(descr->names, index); + PyObject *tup = PyDict_GetItem(descr->fields, key); // noqa: borrowed-ref OK + return _unpack_field(tup, odescr, offset); + } + + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index d6b9ad36588f..db7bc64733db 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -65,12 +65,23 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending); NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); + /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); +/** + * Unpack a field from a structured dtype by index. + */ +NPY_NO_EXPORT int +_unpack_field_index( + _PyArray_LegacyDescr *descr, + npy_intp index, + PyArray_Descr **odescr, + npy_intp *offset); + /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 3f569cc3ccec..692c7cfc2e0a 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -129,6 +129,20 @@ default_get_constant(PyArray_Descr *descr, int constant_id, void *data) } +static int +legacy_fallback_setitem(PyArray_Descr *descr, PyObject *value, char *data) +{ + PyArrayObject_fields arr_fields = { + .flags = NPY_ARRAY_WRITEABLE, /* assume array is not behaved. */ + .descr = descr, + }; + Py_SET_TYPE(&arr_fields, &PyArray_Type); + Py_SET_REFCNT(&arr_fields, 1); + + return PyDataType_GetArrFuncs(descr)->setitem(value, data, &arr_fields); +} + + static int legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) { @@ -138,9 +152,7 @@ legacy_setitem_using_DType(PyObject *obj, void *data, void *arr) "supported for basic NumPy DTypes."); return -1; } - PyArrayDTypeMeta_SetItem *setitem; - setitem = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(arr)))->setitem; - return setitem(PyArray_DESCR(arr), obj, data); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), obj, data); } @@ -1189,7 +1201,12 @@ dtypemeta_wrap_legacy_descriptor( dt_slots->ensure_canonical = ensure_native_byteorder; dt_slots->get_fill_zero_loop = NULL; dt_slots->finalize_descr = NULL; - dt_slots->setitem = NULL; + // May be overwritten, but if not provide fallback via array struct hack. + // `getitem` is a trickier because of structured dtypes returning views. + if (dt_slots->f.setitem == NULL) { + dt_slots->f.setitem = legacy_setitem_using_DType; + } + dt_slots->setitem = legacy_fallback_setitem; dt_slots->getitem = NULL; if (PyTypeNum_ISSIGNED(dtype_class->type_num)) { diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 202116a4cc91..d633af4e7b84 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -288,8 +288,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem( - v, itemptr, arr); + return NPY_DT_CALL_setitem(PyArray_DESCR(arr), v, itemptr); } // Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index e133b46d008a..7d65d972998d 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -519,15 +519,10 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base) if (buff == NULL) { return PyErr_NoMemory(); } - /* copyswap needs an array object, but only actually cares about the - * dtype - */ - PyArrayObject_fields dummy_arr; - if (base == NULL) { - dummy_arr.descr = descr; - base = (PyObject *)&dummy_arr; + memcpy(buff, data, itemsize); + if (swap) { + byte_swap_vector(buff, itemsize / 4, 4); } - copyswap(buff, data, swap, base); /* truncation occurs here */ PyObject *u = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buff, itemsize / 4); diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index fbdbbb8d2375..06f18b5c7259 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -107,7 +107,7 @@ sfloat_getitem(char *data, PyArrayObject *arr) static int -sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) +sfloat_setitem(PyArray_Descr *descr_, PyObject *obj, char *data) { if (!PyFloat_CheckExact(obj)) { PyErr_SetString(PyExc_NotImplementedError, @@ -115,7 +115,7 @@ sfloat_setitem(PyObject *obj, char *data, PyArrayObject *arr) return -1; } - PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)PyArray_DESCR(arr); + PyArray_SFloatDescr *descr = (PyArray_SFloatDescr *)descr_; double value = PyFloat_AsDouble(obj); value /= descr->scaling; @@ -131,9 +131,10 @@ NPY_DType_Slots sfloat_slots = { .default_descr = &sfloat_default_descr, .common_dtype = &sfloat_common_dtype, .common_instance = &sfloat_common_instance, + .setitem = &sfloat_setitem, .f = { .getitem = (PyArray_GetItemFunc *)&sfloat_getitem, - .setitem = (PyArray_SetItemFunc *)&sfloat_setitem, + .setitem = NULL, } }; diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 3c57d6866c56..e2d7c22f5deb 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -1246,7 +1246,7 @@ static PyObject * */ return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1397,7 +1397,7 @@ static PyObject * return PyGenericArrType_Type.tp_as_number->nb_true_divide(a,b); case CONVERT_PYSCALAR: /* This is the special behavior, convert to float64 directly */ - if (DOUBLE_setitem(other, (char *)&other_val, NULL) < 0) { + if (DOUBLE_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1516,7 +1516,7 @@ static PyObject * case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&other_val) < 0) { return NULL; } break; @@ -1924,7 +1924,7 @@ static PyObject* case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: - if (@NAME@_setitem(other, (char *)&arg2, NULL) < 0) { + if (@NAME@_setitem(NULL, other, (char *)&arg2) < 0) { return NULL; } break; diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index a3939daa8904..658c672d5f99 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -266,11 +266,6 @@ def test_scalar_coercion(self, scalar): # Ensure we have a full-precision number if available scalar = type(scalar)((scalar * 2)**0.5) - if type(scalar) is rational: - # Rational generally fails due to a missing cast. In the future - # object casts should automatically be defined based on `setitem`. - pytest.xfail("Rational to object cast is undefined currently.") - # Use casting from object: arr = np.array(scalar, dtype=object).astype(scalar.dtype) From f2dcdb7d3fc972bab47dea530d755bdab055068f Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Mon, 6 Oct 2025 04:07:07 -0700 Subject: [PATCH 0536/1718] PERF: Intern strings used to build global tuples. (#29875) Intern strings of dlpack kwname tuples. Otherwise they may not be interned and typical argument parsing (e.g. including ours) misses the parsing fast-paths, which makes things fairly significantly slower. Our kwarg parsing basically assumes that kwargs are practically always interned strings. This is probably true except in a weird case like this. --- numpy/_core/src/multiarray/npy_static_data.c | 10 ++++++++-- numpy/_core/src/multiarray/npy_static_data.h | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 62e1fd3c1b15..9fd321a375a2 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -64,6 +64,9 @@ intern_strings(void) INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(copy, "copy"); + INTERN_STRING(dl_device, "dl_device"); + INTERN_STRING(max_version, "max_version"); return 0; } @@ -169,7 +172,8 @@ initialize_static_globals(void) return -1; } - npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + npy_static_pydata.kwnames_is_copy = + Py_BuildValue("(O)", npy_interned_str.copy); if (npy_static_pydata.kwnames_is_copy == NULL) { return -1; } @@ -185,7 +189,9 @@ initialize_static_globals(void) } npy_static_pydata.dl_call_kwnames = - Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + Py_BuildValue("(OOO)", npy_interned_str.dl_device, + npy_interned_str.copy, + npy_interned_str.max_version); if (npy_static_pydata.dl_call_kwnames == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 287dc80e4c1f..a6901c858374 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -43,6 +43,9 @@ typedef struct npy_interned_str_struct { PyObject *pyvals_name; PyObject *legacy; PyObject *__doc__; + PyObject *copy; + PyObject *dl_device; + PyObject *max_version; } npy_interned_str_struct; /* From a07a8e7d892be2292333331b9b8362b20f609152 Mon Sep 17 00:00:00 2001 From: Sandro Date: Thu, 25 Sep 2025 20:13:33 +0200 Subject: [PATCH 0537/1718] DOC: Update docstring for `count_nonzero` The return type of the function was changed in 2.3.0, but the docstring remained the same (gh-28774). --- numpy/_core/numeric.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index ec2cbf6dd7fc..b050b81834c6 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -523,11 +523,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): -------- >>> import numpy as np >>> np.count_nonzero(np.eye(4)) - 4 + np.int64(4) >>> a = np.array([[0, 1, 7, 0], ... [3, 0, 2, 19]]) >>> np.count_nonzero(a) - 5 + np.int64(5) >>> np.count_nonzero(a, axis=0) array([1, 1, 2, 1]) >>> np.count_nonzero(a, axis=1) From aa45f1aecd106cbbe1b834b7f2622aee9273a18c Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Tue, 7 Oct 2025 04:49:15 -0400 Subject: [PATCH 0538/1718] ENH, API: New sorting slots for DType API (#29737) * ENH, API: New sorting slots for DType API * DOC: Add sorting context structures and functions to DType API documentation * DOC: Fix type signature syntax * DOC: Update data pointer type in sorting function signatures * DOC: Fix incorrect argsort function name * DOC: Fix formatting of function signature for PyArray_SortFuncWithContext * ENH: Initialize moving to an arraymethod table for user dtype sorts * ENH: Initialize PyArrayMethod_Context to zero in sorting functions * ENH: Add stable sort option to context in user sorts * DOC: Remove deprecated references from DType API sorting documentation * ENH: Refactor sorting parameters to use NPY_SORTKIND in PyArrayMethod_SortParameters * ENH: Remove unnecessary blank line in NPY_DType_Slots structure * ENH: Simplify axis check in PyArray_ArgSort * REF: Simplify PyArray_(Arg)Sort loop discovery logic * BUG: Add missing breaks in loop discovery in sorts * BUG: Simplify context initialization in PyArray_Sort and PyArray_ArgSort * BUG: Fix dangling pointer to context in PyArray_(Arg)Sort * BUG: Fix dangling context pointer in PyArray_(Arg)Sort * REF: Update sorting implementation to use new generalized array method system * ENH: Use dtype slots for sort and argsort arraymethods * STYLE: Add back blank line in methods.h * REF: Simplify conditional logic in PyArray_Sort and PyArray_ArgSort * BUG: Add error handling for strided loop retrieval in PyArray_(Arg)Sort * ENH: Fill out context and strides in sort strided loop calls * ENH: Add correct number of descriptors and strides in sorting loops * BUG: Correct descriptors and strides in argsort implementation * ENH: If-case parameters field in PyArrayMethod_Context * REF: Initialize PyArrayMethod_Context structure directly for sorting functions * ENH: Pass auxdata to dtype sorts * ENH: Update stride calculation for sorting to use item size * ENH: Refactor context initialization and descriptor resolution in sorting functions * ENH: Implement sorting and argsorting methods for scaled float dtype * DOC: Add comment to clarify that we ignore `view_offset` * ENH: Free auxdata and decref descriptors in sorting functions * DOC: Add comments to clarify that method flags are ignored in sorting functions * ENH: Add scaled float tests for sorting and argsorting with scaled and non-contiguous arrays * TST: Update scaled float argsort test to stride array * STYLE: Fix formatting in sfloat test_sort * BUG: Update sorting functions to check for sort method in cleanup instead of strided loop * REF: Fix scope for sort contexts * REF: Initialize sorting context only if taken * DOC: Add parameters member to PyArrayMethod_Context for loop parameterization * BUG: Move declaration of loop_descrs array in PyArray_ArgSort * DOC: Add comment clarifying arraymethod context parameters * DOC: Clarify description of parameters member in PyArrayMethod_Context * REF: Use existing variable to simplify NPY_DTYPE macro * BUG: Add missing output decref in PyArray_ArgSort * BUG: Add assertions to validate parameters in sorting functions of scaled float dtype * REF: Simplify argument passing by removing unnecessary dimensions and strides in casting functions * BUG: Fix castinfo changes * TST: assert exact stride values in sfloat sorts Co-authored-by: Sebastian Berg * TST: assert exact stride values in sfloat sorts Co-authored-by: Sebastian Berg * DOC: move versionchanged in ArrayMethod_Context Co-authored-by: Sebastian Berg * BUG: Fix stride initialization in _new_sortlike and ensure proper decref in PyArray_ArgSort * BUG: Correct dimension and stride initializations in sorting * BUG: Fix strides initialization in sorting Co-authored-by: Sebastian Berg * DOC: clarify function level in parameters for PyArrayMethod_Context Co-authored-by: Sebastian Berg * REF: initialize slots in-line for sfloat sorts * BUG: Remove incorrect dimension assertions in sorting loops * BUG: Correct size to sizeof * STYLE: revert newline deletion * STYLE: remove trailing whitespace * STYLE: revert delete newline * BUG: Fix incorrect stride assignment in _new_sortlike function * STYLE: Remove unnecessary whitespace * DOC: Improve comment Co-authored-by: Sebastian Berg * REF: Remove first branch in resolve_descriptors for sfloat sorts * TST: Add reverse sorting test for non-aligned arrays in sfloat sort tests * DOC: Add TODO comments for future registration method in sfloat_init_sort * ENH: Perform descriptor handling in sorting array method for byte order * DOC: Remove redundant versionchanged note in types-and-structures.rst * TST: Add stable and unstable sorting tests for sfloat arrays * ENH: Update stride assertions in sfloat_default_sort_loop for improved data validation * ENH: Add stride assertions in sfloat_default_argsort_loop for improved data validation * ENH: Change descriptor handling in sfloat_sort_resolve_descriptors and sfloat_argsort_resolve_descriptors for improved byte order management * ENH: Manage reference counting for argsort method in sfloat_init_sort to prevent memory leaks * ENH: Add assertion in sfloat_sort_resolve_descriptors to validate descriptor consistency * ENH: Add assertion in sfloat_argsort_resolve_descriptors to validate second descriptor type * ENH: Move error handling to sfloat_sort_get_loop and sfloat_argsort_get_loop for unsupported sort kinds * ENH: Manage reference counting for sort method in sfloat_init_sort to prevent memory leaks * REF: Clarify stride calculation in PyArray_Sort and PyArray_ArgSort * ENH: Add method flag handling in sorting methods to check for GIL * REF: Simplify descr element size retrieval in PyArray_Sort and PyArray_ArgSort * BUG: Replace NULL return with goto fail in PyArray_ArgSort for error handling * REF: Assert instead of handling non-native byteorder in sfloat sorts * BUG: Initialize return value to NULL in PyArray_ArgSort for error handling * REF: Remove descr-specific thread release in favor of pyapi flag in sorting methods * BUG: Fix needcopy logic in _new_sortlike and _new_argsortlike functions * BUG: Remove unnecessary NULL check for odescr in PyArray_ArgSort --------- Co-authored-by: Sebastian Berg --- .../reference/c-api/types-and-structures.rst | 10 + numpy/_core/include/numpy/dtype_api.h | 11 + numpy/_core/src/multiarray/dtypemeta.h | 6 + numpy/_core/src/multiarray/item_selection.c | 327 +++++++++++++----- numpy/_core/src/umath/_scaled_float_dtype.c | 234 +++++++++++++ numpy/_core/tests/test_custom_dtypes.py | 72 ++++ 6 files changed, 579 insertions(+), 81 deletions(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index fb8efb9c8766..49704913037b 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -728,6 +728,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec PyObject *caller; struct PyArrayMethodObject_tag *method; PyArray_Descr *const *descriptors; + void *parameters; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -744,6 +745,15 @@ PyArrayMethod_Context and PyArrayMethod_Spec An array of descriptors for the ufunc loop, filled in by ``resolve_descriptors``. The length of the array is ``nin`` + ``nout``. + .. c:member:: void *parameters + + A pointer to a structure containing any runtime parameters needed by the + loop. This is ``NULL`` if no parameters are needed. The type of the + struct is specific to the registered function. + + .. versionchanged:: NumPy 2.4 + The `parameters` member was added in NumPy 2.4. + .. c:type:: PyArrayMethod_Spec A struct used to register an ArrayMethod with NumPy. We use the slots diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index cc61a826c103..2222ff342253 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -119,6 +119,13 @@ typedef struct PyArrayMethod_Context_tag { * NPY_ARRAYMETHOD_CONTEXT_FLAGS */ uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + /* Structure may grow (this is harmless for DType authors) */ #endif } PyArrayMethod_Context; @@ -526,4 +533,8 @@ typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *d typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + #endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index d633af4e7b84..bf0acb48b899 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -90,6 +90,12 @@ typedef struct { * dtype instance for backward compatibility. (Keep this at end) */ PyArray_ArrFuncs f; + + /* + * Hidden slots for the sort and argsort arraymethods. + */ + PyArrayMethodObject *sort_meth; + PyArrayMethodObject *argsort_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 5e622ed59e08..a50b8c49c3fa 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -25,6 +25,7 @@ #include "lowlevel_strided_loops.h" #include "array_assign.h" #include "refcount.h" +#include "methods.h" #include "npy_sort.h" #include "npy_partition.h" @@ -1194,6 +1195,8 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, */ static int _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); @@ -1201,8 +1204,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; char *buffer = NULL; @@ -1223,6 +1226,13 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (N <= 1 || PyArray_SIZE(op) == 0) { return 0; } + + if (method_flags != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } PyObject *mem_handler = PyDataMem_GetHandler(); if (mem_handler == NULL) { @@ -1235,6 +1245,26 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { buffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (buffer == NULL) { @@ -1245,14 +1275,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, memset(buffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - NPY_ARRAYMETHOD_FLAGS to_transfer_flags; if (PyArray_GetDTypeTransferFunction( @@ -1270,7 +1292,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *bufptr = it->dataptr; @@ -1295,7 +1319,14 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, */ if (part == NULL) { - ret = sort(bufptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {bufptr, bufptr}; + npy_intp strides[2] = {elsize, elsize}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = sort(bufptr, N, op); + } if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1333,7 +1364,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffer */ if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); @@ -1361,16 +1394,17 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, static PyObject* _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, - PyArray_ArgPartitionFunc *argpart, - npy_intp const *kth, npy_intp nkth) + PyArrayMethod_StridedLoop *strided_loop, PyArrayMethod_Context *context, + NpyAuxData *auxdata, NPY_ARRAYMETHOD_FLAGS *method_flags, + PyArray_ArgPartitionFunc *argpart, npy_intp const *kth, npy_intp nkth) { npy_intp N = PyArray_DIM(op, axis); npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op); npy_intp astride = PyArray_STRIDE(op, axis); int swap = PyArray_ISBYTESWAPPED(op); int is_aligned = IsAligned(op); - int needcopy = !is_aligned || swap || astride != elsize; - int needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + int needcopy = 0; + int needs_api; int needidxbuffer; char *valbuffer = NULL; @@ -1407,6 +1441,13 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); + if (method_flags != NULL) { + needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; + } + else { + needs_api = PyDataType_FLAGCHK(PyArray_DESCR(op), NPY_NEEDS_PYAPI); + } + /* Check if there is any argsorting to do */ if (N <= 1 || PyArray_SIZE(op) == 0) { Py_DECREF(mem_handler); @@ -1422,6 +1463,26 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } size = it->size; + if (strided_loop != NULL) { + // Descriptors have already been resolved + odescr = context->descriptors[0]; + Py_INCREF(odescr); + } + else { + if (swap) { + odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); + } + else { + odescr = descr; + Py_INCREF(odescr); + } + } + + needcopy = !is_aligned || astride != elsize; + if (!PyArray_EquivTypes(descr, odescr)) { + needcopy = 1; + } + if (needcopy) { valbuffer = PyDataMem_UserNEW(N * elsize, mem_handler); if (valbuffer == NULL) { @@ -1432,14 +1493,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, memset(valbuffer, 0, N * elsize); } - if (swap) { - odescr = PyArray_DescrNewByteorder(descr, NPY_SWAP); - } - else { - odescr = descr; - Py_INCREF(odescr); - } - if (PyArray_GetDTypeTransferFunction( is_aligned, astride, elsize, descr, odescr, 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { @@ -1456,7 +1509,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } } - NPY_BEGIN_THREADS_DESCR(descr); + if (!needs_api) { + NPY_BEGIN_THREADS; + } while (size--) { char *valptr = it->dataptr; @@ -1485,7 +1540,14 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } if (argpart == NULL) { - ret = argsort(valptr, idxptr, N, op); + if (strided_loop != NULL) { + char *const data[2] = {valptr, (char *)idxptr}; + npy_intp strides[2] = {elsize, sizeof(npy_intp)}; + ret = strided_loop(context, data, &N, strides, NULL); + } + else { + ret = argsort(valptr, idxptr, N, op); + } /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; @@ -1525,7 +1587,9 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, } fail: - NPY_END_THREADS_DESCR(descr); + if (!needs_api) { + NPY_END_THREADS; + } /* cleanup internal buffers */ if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); @@ -1654,7 +1718,7 @@ PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, return -1; } - ret = _new_sortlike(op, axis, sort, part, + ret = _new_sortlike(op, axis, sort, NULL, NULL, NULL, NULL, part, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -1710,7 +1774,7 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis, return NULL; } - ret = _new_argsortlike(op2, axis, argsort, argpart, + ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, NULL, NULL, argpart, PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl)); Py_DECREF(kthrvl); @@ -3072,9 +3136,19 @@ static PyArray_SortFunc* const generic_sort_table[] = {npy_quicksort, NPY_NO_EXPORT int PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) { + PyArrayMethodObject *sort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + PyArray_SortFunc **sort_table = NULL; PyArray_SortFunc *sort = NULL; + int ret; + if (check_and_adjust_axis(&axis, PyArray_NDIM(op)) < 0) { return -1; } @@ -3086,43 +3160,84 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) // Zero the NPY_HEAPSORT bit, maps NPY_HEAPSORT to NPY_QUICKSORT flags &= ~_NPY_SORT_HEAPSORT; - sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; - switch (flags) { - case NPY_SORT_DEFAULT: - sort = sort_table[NPY_QUICKSORT]; - break; - case NPY_SORT_STABLE: - sort = sort_table[NPY_STABLESORT]; - break; - default: - break; - } - - // Look for appropriate generic function if no type specific version - if (sort == NULL) { - if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); + // Look for type specific functions + sort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->sort_meth; + if (sort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + + PyArray_DTypeMeta *dtypes[2] = {dt, dt}; + PyArray_Descr *given_descrs[2] = {descr, descr}; + // Sort cannot be a view, so view_offset is unused + npy_intp view_offset = 0; + + if (sort_method->resolve_descriptors( + sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for sort"); return -1; } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (sort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + ret = -1; + goto fail; + } + } + else { + sort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->sort; switch (flags) { case NPY_SORT_DEFAULT: - sort = generic_sort_table[NPY_QUICKSORT]; + sort = sort_table[NPY_QUICKSORT]; break; case NPY_SORT_STABLE: - sort = generic_sort_table[NPY_STABLESORT]; + sort = sort_table[NPY_STABLESORT]; break; default: break; } + + // Look for appropriate generic function if no type specific version + if (sort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return -1; + } + switch (flags) { + case NPY_SORT_DEFAULT: + sort = generic_sort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + sort = generic_sort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (sort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current sort function meets the requirements"); + return -1; + } } - if (sort == NULL) { - PyErr_SetString(PyExc_TypeError, - "no current sort function meets the requirements"); - return -1; + ret = _new_sortlike(op, axis, sort, strided_loop, + &context, auxdata, method_flags, NULL, NULL, 0); + +fail: + if (sort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); } - return _new_sortlike(op, axis, sort, NULL, NULL, 0); + return ret; } /* Table of generic argsort function for use by PyArray_ArgSortEx */ @@ -3138,6 +3253,14 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) { PyArrayObject *op2; PyObject *ret; + PyArrayMethodObject *argsort_method = NULL; + PyArrayMethod_StridedLoop *strided_loop = NULL; + PyArrayMethod_SortParameters sort_params = {.flags = flags}; + PyArrayMethod_Context context = {0}; + PyArray_Descr *loop_descrs[2]; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + PyArray_ArgSortFunc **argsort_table = NULL; PyArray_ArgSortFunc *argsort = NULL; @@ -3145,51 +3268,93 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) flags &= ~_NPY_SORT_HEAPSORT; // Look for type specific functions - argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; - switch (flags) { - case NPY_SORT_DEFAULT: - argsort = argsort_table[NPY_QUICKSORT]; - break; - case NPY_SORT_STABLE: - argsort = argsort_table[NPY_STABLESORT]; - break; - default: - break; - } - - // Look for generic function if no type specific version - if (argsort == NULL) { - if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { - PyErr_SetString(PyExc_TypeError, - "type does not have compare function"); + argsort_method = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DESCR(op)))->argsort_meth; + if (argsort_method != NULL) { + PyArray_Descr *descr = PyArray_DESCR(op); + PyArray_Descr *odescr = PyArray_DescrFromType(NPY_INTP); + PyArray_DTypeMeta *dt = NPY_DTYPE(descr); + PyArray_DTypeMeta *odt = NPY_DTYPE(odescr); + + PyArray_DTypeMeta *dtypes[2] = {dt, odt}; + PyArray_Descr *given_descrs[2] = {descr, odescr}; + // we can ignore the view_offset for sorting + npy_intp view_offset = 0; + + int resolve_ret = argsort_method->resolve_descriptors( + argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); + Py_DECREF(odescr); + if (resolve_ret < 0) { + PyErr_SetString(PyExc_RuntimeError, + "unable to resolve descriptors for argsort"); return NULL; } + context.descriptors = loop_descrs; + context.parameters = &sort_params; + + // Arrays are always contiguous for sorting + npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; + + if (argsort_method->get_strided_loop( + &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + ret = NULL; + goto fail; + } + } + else { + argsort_table = PyDataType_GetArrFuncs(PyArray_DESCR(op))->argsort; switch (flags) { case NPY_SORT_DEFAULT: - argsort = generic_argsort_table[NPY_QUICKSORT]; + argsort = argsort_table[NPY_QUICKSORT]; break; case NPY_SORT_STABLE: - argsort = generic_argsort_table[NPY_STABLESORT]; + argsort = argsort_table[NPY_STABLESORT]; break; default: break; } - } - if (argsort == NULL) { - PyErr_SetString(PyExc_TypeError, - "no current argsort function meets the requirements"); - return NULL; + // Look for generic function if no type specific version + if (argsort == NULL) { + if (!PyDataType_GetArrFuncs(PyArray_DESCR(op))->compare) { + PyErr_SetString(PyExc_TypeError, + "type does not have compare function"); + return NULL; + } + switch (flags) { + case NPY_SORT_DEFAULT: + argsort = generic_argsort_table[NPY_QUICKSORT]; + break; + case NPY_SORT_STABLE: + argsort = generic_argsort_table[NPY_STABLESORT]; + break; + default: + break; + } + } + + if (argsort == NULL) { + PyErr_SetString(PyExc_TypeError, + "no current argsort function meets the requirements"); + return NULL; + } } op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0); if (op2 == NULL) { - return NULL; + ret = NULL; + goto fail; } - ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0); - + ret = _new_argsortlike(op2, axis, argsort, strided_loop, + &context, auxdata, method_flags, NULL, NULL, 0); Py_DECREF(op2); + +fail: + if (argsort_method != NULL) { + NPY_AUXDATA_FREE(auxdata); + Py_DECREF(context.descriptors[0]); + Py_DECREF(context.descriptors[1]); + } return ret; } diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 06f18b5c7259..463ccbffae0b 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -21,6 +21,7 @@ #include "array_method.h" #include "common.h" #include "numpy/npy_math.h" +#include "npy_sort.h" #include "convert_datatype.h" #include "dtypemeta.h" #include "dispatching.h" @@ -862,6 +863,235 @@ sfloat_init_ufuncs(void) { } +NPY_NO_EXPORT int +sfloat_stable_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_STABLE); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return timsort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_sort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + assert(data[0] == data[1]); + assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_DEFAULT); + + npy_intp N = dimensions[0]; + char *in = data[0]; + + return quicksort_double(in, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_sort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_sort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +static NPY_CASTING +sfloat_sort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(!(given_descrs[1] != given_descrs[0] && given_descrs[1] != NULL)); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = loop_descrs[0]; + Py_INCREF(loop_descrs[1]); + + return NPY_NO_CASTING; +} + + +NPY_NO_EXPORT int +sfloat_stable_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_STABLE); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return atimsort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_default_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, + const npy_intp *dimensions, + const npy_intp *strides, + NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + assert(parameters->flags == NPY_SORT_DEFAULT); + assert(strides[0] == sizeof(npy_float64)); + assert(strides[1] == sizeof(npy_intp)); + + npy_intp N = dimensions[0]; + char *in = data[0]; + npy_intp *out = (npy_intp *)data[1]; + + return aquicksort_double(in, out, N, NULL); +} + + +NPY_NO_EXPORT int +sfloat_argsort_get_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + + if (PyDataType_FLAGCHK(context->descriptors[0], NPY_NEEDS_PYAPI)) { + *flags |= NPY_METH_REQUIRES_PYAPI; + } + + if (parameters->flags == NPY_SORT_STABLE) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_stable_argsort_loop; + } + else if (parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)sfloat_default_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + + +NPY_NO_EXPORT NPY_CASTING +sfloat_argsort_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *dtypes[2], + PyArray_Descr *given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); + assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); + + loop_descrs[0] = given_descrs[0]; + Py_INCREF(loop_descrs[0]); + loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); + if (loop_descrs[1] == NULL) { + return -1; + } + return NPY_NO_CASTING; +} + + +static int +sfloat_init_sort(void) +{ + PyArray_DTypeMeta *dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_sort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec spec = { + .nin = 1, + .nout = 1, + .dtypes = dtypes, + .slots = slots, + }; + spec.name = "sfloat_sort"; + spec.casting = NPY_NO_CASTING; + spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(&spec, 0); + if (sort_meth == NULL) { + return -1; + } + // TODO: once registration method is in place, use it instead of setting hidden slot + NPY_DT_SLOTS(&PyArray_SFloatDType)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + + spec.name = "sfloat_argsort"; + dtypes[1] = &PyArray_IntpDType; + + slots[0].slot = NPY_METH_resolve_descriptors; + slots[0].pfunc = &sfloat_argsort_resolve_descriptors; + slots[1].slot = NPY_METH_get_loop; + slots[1].pfunc = &sfloat_argsort_get_loop; + + // TODO: once registration method is in place, use it instead of setting hidden slot + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(&spec, 0); + if (argsort_meth == NULL) { + return -1; + } + NPY_DT_SLOTS(&PyArray_SFloatDType)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + + return 0; +} + /* * Python entry point, exported via `umathmodule.h` and `multiarraymodule.c`. * TODO: Should be moved when the necessary API is not internal anymore. @@ -898,6 +1128,10 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } + if (sfloat_init_sort() < 0) { + return NULL; + } + npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 4d2082a949b7..f03e35dca9f2 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -231,6 +231,78 @@ def test_wrapped_and_wrapped_reductions(self): expected = np.hypot.reduce(float_equiv, keepdims=True) assert res.view(np.float64) * 2 == expected + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + def test_astype_class(self): # Very simple test that we accept `.astype()` also on the class. # ScaledFloat always returns the default descriptor, but it does From 30f0891b64e1bfae47e7414ecc96f57d99208586 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Wed, 8 Oct 2025 04:08:51 +1100 Subject: [PATCH 0539/1718] CI: macos-13 --> macos-15-intel (#29886) --- .github/workflows/macos.yml | 9 ++------- .github/workflows/wheels.yml | 8 ++------ 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index ef9e86f6ba70..868dac6dcbbc 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -22,7 +22,7 @@ jobs: name: macOS x86-64 conda # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - runs-on: macos-13 + runs-on: macos-15-intel strategy: fail-fast: false matrix: @@ -113,7 +113,7 @@ jobs: fail-fast: false matrix: build_runner: - - [ macos-13, "macos_x86_64" ] + - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] version: ["3.11", "3.14t-dev"] @@ -128,11 +128,6 @@ jobs: with: python-version: ${{ matrix.version }} - - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 - if: ${{ matrix.build_runner[0] == 'macos-13' }} - with: - xcode-version: '14.3' - - name: Install dependencies run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 8e8c8768cd9c..5294c34cbb63 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -34,7 +34,7 @@ jobs: - [ubuntu-22.04, musllinux_x86_64, ""] - [ubuntu-22.04-arm, manylinux_aarch64, ""] - [ubuntu-22.04-arm, musllinux_aarch64, ""] - - [macos-13, macosx_x86_64, openblas] + - [macos-15-intel, macosx_x86_64, openblas] - [macos-14, macosx_arm64, openblas] - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] @@ -72,7 +72,7 @@ jobs: echo "CIBW_ENVIRONMENT_WINDOWS=PKG_CONFIG_PATH=$CIBW" >> $env:GITHUB_ENV - name: Setup macOS - if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' + if: matrix.buildplat[0] == 'macos-15-intel' || matrix.buildplat[0] == 'macos-14' run: | # Needed due to https://github.com/actions/runner-images/issues/3371 # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md @@ -84,10 +84,6 @@ jobs: # only target Sonoma onwards CIBW="MACOSX_DEPLOYMENT_TARGET=14.0 INSTALL_OPENBLAS=false RUNNER_OS=macOS" echo "CIBW_ENVIRONMENT_MACOS=$CIBW" >> "$GITHUB_ENV" - - # the macos-13 image that's used for building the x86_64 wheel can't test - # a wheel with deployment target >= 14 without further work - echo "CIBW_TEST_SKIP=*-macosx_x86_64" >> "$GITHUB_ENV" else # macosx_x86_64 with OpenBLAS # if INSTALL_OPENBLAS isn't specified then scipy-openblas is automatically installed From 128d684bdc8d756c63341baab0fd7e4df3fbc09c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:51:49 -0600 Subject: [PATCH 0540/1718] MAINT: Bump github/codeql-action from 3.30.6 to 4.30.7 (#29891) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.6 to 4.30.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/64d10c13136e1c5bce3e5fbde8d4906eeaafc885...e296a935590eb16afc0c0108289f68c87e2a89a5) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.30.7 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0b055370119d..d6719c246c0d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 + uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 3ea1074c5a9d..6fb0391abde5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v2.1.27 + uses: github/codeql-action/upload-sarif@e296a935590eb16afc0c0108289f68c87e2a89a5 # v2.1.27 with: sarif_file: results.sarif From dbeb2514431d660ffaa97412958446577e7e0bd0 Mon Sep 17 00:00:00 2001 From: Inessa Pawson Date: Tue, 7 Oct 2025 21:35:46 -0400 Subject: [PATCH 0541/1718] Add Plausible analytics to the NumPy documentation --- doc/source/conf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index eba0bd014fb0..0204818aa094 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -290,6 +290,10 @@ def setup(app): "json_url": "https://numpy.org/doc/_static/versions.json", }, "show_version_warning_banner": True, + "analytics": [ + "plausible_analytics_domain": "numpy.org/doc/stable/", + "plausible_analytics_url": "https://views.scientific-python.org/js/script.file-downloads.hash.outbound-links.js", + ] } html_title = f"{project} v{version} Manual" From 2a9315a84ffc5a79f9a56f749386321acb78d864 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 13:26:35 -0600 Subject: [PATCH 0542/1718] MAINT: Bump astral-sh/setup-uv from 6.8.0 to 7.0.0 (#29895) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 6.8.0 to 7.0.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/d0cc045d04ccac9d8b7881df0226f9e82c39688e...eb1897b8dc4b5d5bfe39a428a8f2304605e0983c) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index c6b7c89b8f5c..8045d6bb3969 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -55,7 +55,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 + - uses: astral-sh/setup-uv@eb1897b8dc4b5d5bfe39a428a8f2304605e0983c # v7.0.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From 4ddd342682e0ae8c28abf8f5509ed4baf93c424e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 9 Oct 2025 01:50:34 +0200 Subject: [PATCH 0543/1718] BUG: Fixup float16 conversion error path and add tests (#29897) I had modified the code to not purely rely on PyErr_Occurred() for error propagation, but missed this conversion call. This also applied to float conversions actually. Signed-off-by: Sebastian Berg --- numpy/_core/src/multiarray/arraytypes.c.src | 6 ++++++ numpy/_core/tests/test_array_coercion.py | 21 +++++++++++++++++++++ numpy/_core/tests/test_half.py | 15 +++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index da00596e7e24..d67bdd046c6d 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -124,6 +124,9 @@ static float MyPyFloat_AsFloat(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } float res = (float)d_val; if (NPY_UNLIKELY(npy_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { @@ -138,6 +141,9 @@ static npy_half MyPyFloat_AsHalf(PyObject *obj) { double d_val = MyPyFloat_AsDouble(obj); + if (error_converting(d_val)) { + return -1; + } npy_half res = npy_double_to_half(d_val); if (NPY_UNLIKELY(npy_half_isinf(res) && !npy_isinf(d_val))) { if (PyUFunc_GiveFloatingpointErrors("cast", NPY_FPE_OVERFLOW) < 0) { diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 658c672d5f99..9b2a7a5bab85 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -904,3 +904,24 @@ def test_empty_string(): assert_array_equal(res, b"") assert res.shape == (2, 10) assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index ef13c5fbcde6..3ced5b466a44 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -93,6 +93,21 @@ def test_half_conversion_to_string(self, string_dt): arr = np.ones(3, dtype=np.float16).astype(string_dt) assert arr.dtype == expected_dt + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + @pytest.mark.parametrize("string_dt", ["S", "U"]) def test_half_conversion_from_string(self, string_dt): string = np.array("3.1416", dtype=string_dt) From 1ac04e0f7f64f672f8c2d2a04a82a2baaefa09eb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 03:02:14 +0200 Subject: [PATCH 0544/1718] TYP: add missing `__slots__` (#29901) --- numpy/__init__.pyi | 2 ++ numpy/exceptions.pyi | 2 ++ numpy/lib/_index_tricks_impl.pyi | 12 ++++++++++++ numpy/lib/mixins.pyi | 2 ++ numpy/ma/extras.pyi | 4 ++++ 5 files changed, 22 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d29bb196a070..aed762195a76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5736,6 +5736,8 @@ pow = power true_divide = divide class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + def __init__( self, *, diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 9ed50927d070..9bcc097dfc0f 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -17,6 +17,8 @@ class TooHardError(RuntimeError): ... class DTypePromotionError(TypeError): ... class AxisError(ValueError, IndexError): + __slots__ = "_msg", "axis", "ndim" + axis: int | None ndim: int | None @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index c9ee8a5b0bb7..b624cfff4481 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -106,6 +106,8 @@ class ndindex: def ndincr(self, /) -> None: ... class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + sparse: _BoolT_co def __init__(self, sparse: _BoolT_co = ...) -> None: ... @overload @@ -115,10 +117,14 @@ class nd_grid(Generic[_BoolT_co]): @final class MGridClass(nd_grid[L[False]]): + __slots__ = () + def __init__(self) -> None: ... @final class OGridClass(nd_grid[L[True]]): + __slots__ = () + def __init__(self) -> None: ... class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): @@ -155,13 +161,19 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () + def __init__(self, /) -> None: ... @final class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + def __init__(self, /) -> None: ... class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) + maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index f23c58fa6586..1572f6bee289 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -13,6 +13,8 @@ __all__ = ["NDArrayOperatorsMixin"] # As such, only little type safety can be provided here. class NDArrayOperatorsMixin(ABC): + __slots__ = () + @abstractmethod def __array_ufunc__( self, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 5a7b2b399ea8..034b309af080 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -110,12 +110,16 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... class MAxisConcatenator(AxisConcatenator): + __slots__ = () + @staticmethod def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): + __slots__ = () + def __init__(self) -> None: ... mr_: mr_class From 8e30aa91b17d1cb466a2341aeba1b039d1cf6592 Mon Sep 17 00:00:00 2001 From: Swayam Date: Thu, 9 Oct 2025 15:00:37 +0530 Subject: [PATCH 0545/1718] BUG: Ensure backwards compatibility for patching finfo (#29899) Ensure attributes are settable. JAX uses this to subclass and then patch it's own finfo objects --------- Co-authored-by: Sebastian Berg --- numpy/_core/getlimits.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 1317c8b1ea0a..b8b6f69cd5b7 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -258,11 +258,11 @@ def iexp(self): return math.ceil(math.log2(self.maxexp - self.minexp)) def __str__(self): - if self._fmt is not None: - return self._fmt + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt def get_str(name, pad=None): - if (val := getattr(self, name)) is None: + if (val := getattr(self, name, None)) is None: return "" if pad is not None: s = str(val).ljust(pad) @@ -303,8 +303,8 @@ def get_str(name, pad=None): return fmt def __repr__(self): - if self._repr is not None: - return self._repr + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str c = self.__class__.__name__ @@ -324,7 +324,7 @@ def __repr__(self): self._repr = repr_str return repr_str - @property + @cached_property def tiny(self): """Return the value for tiny, alias of smallest_normal. From 50ddf5be5ecade9e59dfdfbaed6a04a417cb4759 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 9 Oct 2025 13:26:24 +0300 Subject: [PATCH 0546/1718] BLD: do not use matplotlib 3.10.6 [skip azp][skip actions][skip cirrus] (#29906) --- requirements/doc_requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 9d006b961923..b8f5cb2bd8fd 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -5,8 +5,7 @@ pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design scipy -matplotlib -pyparsing<3.3 +matplotlib!=3.10.6 pandas breathe>4.33.0 ipython!=8.1.0 From 5fe21fbae8125c032b4e23606be5b3a11d25dd4a Mon Sep 17 00:00:00 2001 From: "hpkfft.com" Date: Thu, 9 Oct 2025 04:26:35 -0700 Subject: [PATCH 0547/1718] ENH: Set DLPack tensor `shape` and `strides` to NULL iff `ndim == 0`. (#29872) This assumes a slight shift in the dlpack interpretation: https://github.com/dmlc/dlpack/issues/177 --- DLPack allows, but does not require, DLTensor.strides to be NULL if PyArray_IS_C_CONTIGUOUS. Previously, in this case, NumPy set the strides pointer to NULL. Now, NumPy will supply a valid pointer to correctly initialized strides when ndim > 0. This change can improve performance for downstream users. For example, nanobind can be used to import a NumPy array using the DLPack protocol to make the array available to C++ code. If DLTensor.strides is NULL, nanobind allocates and initializes a strides array. This redundant work is now avoided since NumPy provides a pointer to the strides, which it allocated and initialized anyway. If ndim == 0, this PR sets both shape and strides to NULL. Previously, strides was was set to NULL (since PyArray_IS_C_CONTIGUOUS), but shape pointed past the end of allocated memory. For security and debugability, NULL is preferred. --- numpy/_core/src/multiarray/dlpack.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index ac37a04c30c6..62cd137daa7c 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -285,10 +285,6 @@ fill_dl_tensor_information( } dl_tensor->ndim = ndim; - if (PyArray_IS_C_CONTIGUOUS(self)) { - /* No need to pass strides, so just NULL it again */ - dl_tensor->strides = NULL; - } dl_tensor->byte_offset = 0; return 0; @@ -351,9 +347,8 @@ create_dlpack_capsule( dl_tensor = &managed->dl_tensor; } - dl_tensor->shape = (int64_t *)((char *)ptr + offset); - /* Note that strides may be set to NULL later if C-contiguous */ - dl_tensor->strides = dl_tensor->shape + ndim; + dl_tensor->shape = (ndim > 0) ? (int64_t *)((char *)ptr + offset) : NULL; + dl_tensor->strides = (ndim > 0) ? dl_tensor->shape + ndim : NULL; if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { PyMem_Free(ptr); From 7b80c99ef668f49ddc0bbb94029454e81c83371d Mon Sep 17 00:00:00 2001 From: William Pursell Date: Thu, 9 Oct 2025 05:32:44 -0600 Subject: [PATCH 0548/1718] BLD: Add missing (#29704) This is required for the vendored-meson build on MacOS with some versions of clang. --- numpy/_core/src/multiarray/unique.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index d31fed6f6908..979fb8fae0a4 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "numpy/arrayobject.h" From 1361720c6d6e7429a4bfa1d0712fd0306ca449d3 Mon Sep 17 00:00:00 2001 From: Anik Chand Date: Thu, 9 Oct 2025 20:57:50 +0530 Subject: [PATCH 0549/1718] DEP: add release note for linalg/fft deprecation finalization --- doc/release/upcoming_changes/29000.expiring.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/29000.expiring.rst diff --git a/doc/release/upcoming_changes/29000.expiring.rst b/doc/release/upcoming_changes/29000.expiring.rst new file mode 100644 index 000000000000..2353f38a7083 --- /dev/null +++ b/doc/release/upcoming_changes/29000.expiring.rst @@ -0,0 +1,10 @@ +Deprecated features removed in 2.2.0 +==================================== + +The following deprecated functions and classes have been removed: + +* :mod:`numpy.linalg.linalg` + Previously deprecated, use :mod:`numpy.linalg` instead. + +* :mod:`numpy.fft.helpers` + Previously deprecated, use :mod:`numpy.fft` instead. \ No newline at end of file From 53b0d99f9c01bc9837bda4b4fa6c2d8752c09c76 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Thu, 9 Oct 2025 18:40:11 +0300 Subject: [PATCH 0550/1718] BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The failures are triggered when the Intel x86 sort AVX‑512 kernels for 16‑bit are enabled at build time and the CPU/OS also supports them. A quick look at the `zmm_vector::ge(reg_t, reg_t)` seems to not correctly generate the instructions for it. This patch does not actually fix the underlying bug; instead, it disables these kernels on 32‑bit MSVC builds as a stop‑gap, since the issue requires further investigation and an upstream fix. Note: Newer NumPy releases may drop the entire AVX‑512 support on 32‑bit for all compilers and will enable at most AVX2 as part of gh-28896 --- numpy/_core/meson.build | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 0d8c513d4fde..35a792c29909 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -867,7 +867,9 @@ foreach gen_mtargets : [ [ 'x86_simd_qsort_16bit.dispatch.h', 'src/npysort/x86_simd_qsort_16bit.dispatch.cpp', - use_intel_sort ? [AVX512_SPR, AVX512_ICL] : [] + # Do not enable AVX-512 on MSVC 32-bit (x86): it’s buggy there; + # Ref: NumPy issue numpy/numpy#29808 + use_intel_sort and not (cc.get_id() == 'msvc' and cpu_family == 'x86') ? [AVX512_SPR, AVX512_ICL] : [] ], [ 'highway_qsort.dispatch.h', From e89ec589000e471f04c71b676c866baec05ecd7d Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Thu, 9 Oct 2025 22:42:35 +0530 Subject: [PATCH 0551/1718] BUG: Fix INT_MIN % -1 to return 0 for all signed integer types (#29893) * BUG: Fix INT_MIN % -1 to return 0 for all signed integer types - Explicitly check for INT_MIN % -1 in scalar tail loops for fmod and remainder kernels. - Set result to 0 to avoid undefined behavior and match NumPy/Python expectations. - Ensures correct, portable behavior on all platforms (e.g., PPC64LE). * Apply suggestion from @seberg --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/umath/loops_modulo.dispatch.c.src | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/umath/loops_modulo.dispatch.c.src b/numpy/_core/src/umath/loops_modulo.dispatch.c.src index 032cc3344060..4645fe14a487 100644 --- a/numpy/_core/src/umath/loops_modulo.dispatch.c.src +++ b/numpy/_core/src/umath/loops_modulo.dispatch.c.src @@ -490,12 +490,16 @@ vsx4_simd_@func@_by_scalar_contig_@sfx@(char **args, npy_intp len) #else /* fmod and remainder */ for (; len > 0; --len, ++src1, ++dst1) { const npyv_lanetype_@sfx@ a = *src1; - *dst1 = a % scalar; + if (NPY_UNLIKELY(a == NPY_MIN_INT@len@ && scalar == -1)) { + *dst1 = 0; + } else { + *dst1 = a % scalar; #if @id@ == 1 /* remainder */ - if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { - *dst1 += scalar; - } + if (!((a > 0) == (scalar > 0) || *dst1 == 0)) { + *dst1 += scalar; + } #endif + } } #endif npyv_cleanup(); From e7d96553eb02a9093a08f5f0913e0ab72042ef0f Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Thu, 9 Oct 2025 23:10:22 +0530 Subject: [PATCH 0552/1718] DEP: remove linalg.py (finalize deprecation) --- numpy/linalg/linalg.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 81c80d0fd690..8b137891791f 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1,17 +1 @@ -def __getattr__(attr_name): - import warnings - from numpy.linalg import _linalg - ret = getattr(_linalg, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.linalg.linalg' has no attribute {attr_name}") - warnings.warn( - "The numpy.linalg.linalg has been made private and renamed to " - "numpy.linalg._linalg. All public functions exported by it are " - f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " - "instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret From 41ba30f4556f60975068864aef93df24bbcf1a74 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Thu, 9 Oct 2025 23:11:49 +0530 Subject: [PATCH 0553/1718] DEP: remove fft/helpers.py (finalize deprecation) --- numpy/fft/helper.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py index 08d5662c6d17..8b137891791f 100644 --- a/numpy/fft/helper.py +++ b/numpy/fft/helper.py @@ -1,17 +1 @@ -def __getattr__(attr_name): - import warnings - from numpy.fft import _helper - ret = getattr(_helper, attr_name, None) - if ret is None: - raise AttributeError( - f"module 'numpy.fft.helper' has no attribute {attr_name}") - warnings.warn( - "The numpy.fft.helper has been made private and renamed to " - "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " - "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " - f"Please use numpy.fft.{attr_name} instead.", - DeprecationWarning, - stacklevel=3 - ) - return ret From eba3d75e401bb1090b6ec389fd81aa65706a467a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:53:38 +0200 Subject: [PATCH 0554/1718] TYP: parameters with missing default value (#29905) --- numpy/__init__.pyi | 74 +++---- numpy/_core/defchararray.pyi | 30 +-- numpy/_core/multiarray.pyi | 74 +++---- numpy/_pytesttester.pyi | 14 +- numpy/_typing/_ufunc.pyi | 4 +- numpy/exceptions.pyi | 4 +- numpy/fft/_pocketfft.pyi | 8 +- numpy/lib/_datasource.pyi | 6 +- numpy/lib/_function_base_impl.pyi | 4 +- numpy/lib/_index_tricks_impl.pyi | 10 +- numpy/lib/_npyio_impl.pyi | 6 +- numpy/lib/_stride_tricks_impl.pyi | 2 +- numpy/lib/_twodim_base_impl.pyi | 4 +- numpy/linalg/_linalg.pyi | 8 +- numpy/polynomial/_polybase.pyi | 10 +- numpy/polynomial/_polytypes.pyi | 6 +- numpy/polynomial/chebyshev.pyi | 6 +- numpy/random/_generator.pyi | 313 ++++++++++++++--------------- numpy/random/_mt19937.pyi | 2 +- numpy/random/_pcg64.pyi | 4 +- numpy/random/_philox.pyi | 2 +- numpy/random/mtrand.pyi | 318 +++++++++++++++--------------- numpy/testing/_private/utils.pyi | 8 +- 23 files changed, 459 insertions(+), 458 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index aed762195a76..7d0bd028c826 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1641,11 +1641,11 @@ class flatiter(Generic[_ArrayT_co]): value: Any, ) -> None: ... @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ... + def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = None, /) -> ndarray[_1DShapeT, _DTypeT]: ... @overload def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... @overload - def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... + def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = None, /) -> ndarray[_AnyShape, _DTypeT]: ... @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... @@ -2346,9 +2346,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = ...) -> Any: ... + def dot(self, b: ArrayLike, out: None = None) -> Any: ... @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -2391,7 +2391,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike | None = ..., - out: None = ..., + out: None = None, ) -> Any: ... @overload def trace( @@ -2418,7 +2418,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: NDArray[_ScalarT], indices: _IntLike_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> _ScalarT: ... @overload @@ -2426,7 +2426,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload @@ -3660,7 +3660,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _IntLike_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> Self: ... @overload @@ -3668,7 +3668,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, mode: _ModeKind = ..., ) -> NDArray[Self]: ... @overload @@ -5742,11 +5742,11 @@ class errstate: self, *, call: _ErrCall = ..., - all: _ErrKind | None = ..., - divide: _ErrKind | None = ..., - over: _ErrKind | None = ..., - under: _ErrKind | None = ..., - invalid: _ErrKind | None = ..., + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, ) -> None: ... def __enter__(self) -> None: ... def __exit__( @@ -5877,37 +5877,37 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[uint8] = ..., - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: _DTypeLike[_ScalarT], - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype[_ScalarT]]: ... @overload def __new__( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, - mode: _MemMapModeKind = ..., - offset: int = ..., - shape: int | tuple[int, ...] | None = ..., - order: _OrderKACF = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, array: memmap[_ShapeT_co, _DTypeT_co], - context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: builtins.bool = False, ) -> Any: ... def flush(self) -> None: ... @@ -5923,11 +5923,11 @@ class vectorize: def __init__( self, pyfunc: Callable[..., Any], - otypes: str | Iterable[DTypeLike] | None = ..., - doc: str | None = ..., - excluded: Iterable[int | str] | None = ..., - cache: builtins.bool = ..., - signature: str | None = ..., + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: builtins.bool = False, + signature: str | None = None, ) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... @@ -5980,8 +5980,8 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = ..., - variable: str | None = ..., + r: builtins.bool = False, + variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... @@ -6011,8 +6011,8 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __new__( subtype, # pyright: ignore[reportSelfClsParameterName] data: ArrayLike, - dtype: DTypeLike | None = ..., - copy: builtins.bool = ..., + dtype: DTypeLike | None = None, + copy: builtins.bool = True, ) -> matrix[_2D, Incomplete]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 9b86c2a33e35..815d1397b39c 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -106,12 +106,12 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., - unicode: L[False] = ..., - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + itemsize: SupportsIndex | SupportsInt = 1, + unicode: L[False] = False, + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[bytes_]: ... @overload def __new__( @@ -119,22 +119,22 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[str_]: ... @overload def __new__( subtype, shape: _ShapeLike, - itemsize: SupportsIndex | SupportsInt = ..., + itemsize: SupportsIndex | SupportsInt = 1, *, unicode: L[True], - buffer: _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: _ShapeLike = ..., - order: _OrderKACF = ..., + buffer: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 8ab52b6fe74d..b90453d66d02 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -249,7 +249,7 @@ class _ConstructorEmpty(Protocol): self, /, shape: SupportsIndex, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> _Array1D[float64]: ... @@ -287,7 +287,7 @@ class _ConstructorEmpty(Protocol): self, /, shape: _AnyShapeT, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> _Array[_AnyShapeT, float64]: ... @@ -324,7 +324,7 @@ class _ConstructorEmpty(Protocol): def __call__( self, /, shape: _ShapeLike, - dtype: None = ..., + dtype: None = None, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[float64]: ... @@ -400,7 +400,7 @@ empty: Final[_ConstructorEmpty] @overload def empty_like( prototype: _ArrayT, - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., subok: bool = ..., shape: _ShapeLike | None = ..., @@ -410,7 +410,7 @@ def empty_like( @overload def empty_like( prototype: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., subok: bool = ..., shape: _ShapeLike | None = ..., @@ -527,9 +527,9 @@ def concatenate( # type: ignore[misc] arrays: _ArrayLike[_ScalarT], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, - dtype: None = ..., + dtype: None = None, casting: _CastingKind | None = ... ) -> NDArray[_ScalarT]: ... @overload @@ -537,7 +537,7 @@ def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, dtype: _DTypeLike[_ScalarT], casting: _CastingKind | None = ... @@ -547,7 +547,7 @@ def concatenate( # type: ignore[misc] arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = ..., - out: None = ..., + out: None = None, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = ... @@ -608,7 +608,7 @@ def min_scalar_type(a: ArrayLike, /) -> dtype: ... def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Any: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @@ -682,7 +682,7 @@ def may_share_memory( @overload def asarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -713,7 +713,7 @@ def asarray( @overload def asanyarray( a: _ArrayT, # Preserve subclass-information - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -723,7 +723,7 @@ def asanyarray( @overload def asanyarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., @@ -754,7 +754,7 @@ def asanyarray( @overload def ascontiguousarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., ) -> NDArray[_ScalarT]: ... @@ -776,7 +776,7 @@ def ascontiguousarray( @overload def asfortranarray( a: _ArrayLike[_ScalarT], - dtype: None = ..., + dtype: None = None, *, like: _SupportsArrayFunc | None = ..., ) -> NDArray[_ScalarT]: ... @@ -801,7 +801,7 @@ def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... @overload def fromstring( string: str | bytes, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., *, sep: str, @@ -832,7 +832,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[1], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -848,7 +848,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: L[2], nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -864,7 +864,7 @@ def frompyfunc( # type: ignore[overload-overlap] nin: _Nin, nout: L[1], *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... @overload def frompyfunc( # type: ignore[overload-overlap] @@ -880,7 +880,7 @@ def frompyfunc( nin: _Nin, nout: _Nout, *, - identity: None = ..., + identity: None = None, ) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... @overload def frompyfunc( @@ -902,7 +902,7 @@ def frompyfunc( @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., @@ -950,7 +950,7 @@ def fromiter( @overload def frombuffer( buffer: _SupportsBuffer, - dtype: None = ..., + dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., *, @@ -979,7 +979,7 @@ def frombuffer( def arange( # type: ignore[misc] stop: _IntLike_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[signedinteger]: ... @@ -988,7 +988,7 @@ def arange( # type: ignore[misc] start: _IntLike_co, stop: _IntLike_co, step: _IntLike_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -997,7 +997,7 @@ def arange( # type: ignore[misc] def arange( # type: ignore[misc] stop: _FloatLike_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[floating]: ... @@ -1006,7 +1006,7 @@ def arange( # type: ignore[misc] start: _FloatLike_co, stop: _FloatLike_co, step: _FloatLike_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1015,7 +1015,7 @@ def arange( # type: ignore[misc] def arange( stop: _TD64Like_co, /, *, - dtype: None = ..., + dtype: None = None, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., ) -> _Array1D[timedelta64]: ... @@ -1024,7 +1024,7 @@ def arange( start: _TD64Like_co, stop: _TD64Like_co, step: _TD64Like_co = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1034,7 +1034,7 @@ def arange( # both start and stop must always be specified for datetime64 start: datetime64, stop: datetime64, step: datetime64 = ..., - dtype: None = ..., + dtype: None = None, *, device: L["cpu"] | None = ..., like: _SupportsArrayFunc | None = ..., @@ -1090,7 +1090,7 @@ def busday_count( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> int_: ... @overload def busday_count( # type: ignore[misc] @@ -1099,7 +1099,7 @@ def busday_count( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[int_]: ... @overload def busday_count( @@ -1130,7 +1130,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] @@ -1140,7 +1140,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( # type: ignore[misc] @@ -1171,7 +1171,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] @@ -1181,7 +1181,7 @@ def busday_offset( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( @@ -1211,7 +1211,7 @@ def is_busday( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> np.bool: ... @overload def is_busday( # type: ignore[misc] @@ -1219,7 +1219,7 @@ def is_busday( # type: ignore[misc] weekmask: ArrayLike = ..., holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., busdaycal: busdaycalendar | None = ..., - out: None = ..., + out: None = None, ) -> NDArray[np.bool]: ... @overload def is_busday( diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index a12abb1c1a10..bd71239314b4 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -8,11 +8,11 @@ class PytestTester: def __init__(self, module_name: str) -> None: ... def __call__( self, - label: L["fast", "full"] = ..., - verbose: int = ..., - extra_argv: Iterable[str] | None = ..., - doctests: L[False] = ..., - coverage: bool = ..., - durations: int = ..., - tests: Iterable[str] | None = ..., + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, ) -> bool: ... diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 3f58801004d1..bcb423e58110 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -346,7 +346,7 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, x1: _ScalarLike_co, out1: EllipsisType | None = ..., - out2: None = ..., + out2: None = None, /, *, out: EllipsisType | None = ..., @@ -421,7 +421,7 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, out1: EllipsisType | None = ..., - out2: None = ..., + out2: None = None, /, *, out: EllipsisType | None = ..., diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 9bcc097dfc0f..4cc4eff5d321 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -22,6 +22,6 @@ class AxisError(ValueError, IndexError): axis: int | None ndim: int | None @overload - def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ... @overload - def __init__(self, axis: int, ndim: int, msg_prefix: str | None = ...) -> None: ... + def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 81064bb174fe..3234c64ed169 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -107,7 +107,7 @@ def irfftn( def fft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -115,7 +115,7 @@ def fft2( def ifft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -123,7 +123,7 @@ def ifft2( def rfft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[complex128] | None = None, ) -> NDArray[complex128]: ... @@ -131,7 +131,7 @@ def rfft2( def irfft2( a: ArrayLike, s: Sequence[int] | None = None, - axes: Sequence[int] | None = ..., # = (-2, -1) + axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index ad52b7f67af0..dba0434a5fab 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -8,7 +8,7 @@ _Mode: TypeAlias = OpenBinaryMode | OpenTextMode # exported in numpy.lib.nppyio class DataSource: - def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __init__(self, /, destpath: Path | str | None = ".") -> None: ... def __del__(self, /) -> None: ... def abspath(self, /, path: str) -> str: ... def exists(self, /, path: str) -> bool: ... @@ -18,13 +18,13 @@ class DataSource: def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... class Repository(DataSource): - def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def __init__(self, /, baseurl: str, destpath: str | None = ".") -> None: ... def listdir(self, /) -> list[str]: ... def open( path: str, mode: _Mode = "r", - destpath: str | None = ..., + destpath: str | None = ".", encoding: str | None = None, newline: str | None = None, ) -> IO[Any]: ... diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 8e58cf342ae0..9c2e2b1e28b0 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -123,13 +123,13 @@ class _TrimZerosSequence(Protocol[_T_co]): def rot90( m: _ArrayLike[_ScalarT], k: int = 1, - axes: tuple[int, int] = ..., + axes: tuple[int, int] = (0, 1), ) -> NDArray[_ScalarT]: ... @overload def rot90( m: ArrayLike, k: int = 1, - axes: tuple[int, int] = ..., + axes: tuple[int, int] = (0, 1), ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index b624cfff4481..3c3074b1c1a4 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -109,7 +109,7 @@ class nd_grid(Generic[_BoolT_co]): __slots__ = ("sparse",) sparse: _BoolT_co - def __init__(self, sparse: _BoolT_co = ...) -> None: ... + def __init__(self, sparse: _BoolT_co = ...) -> None: ... # stubdefaulter: ignore[missing-default] @overload def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... @overload @@ -141,10 +141,10 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] def __init__( self, /, - axis: _AxisT_co = ..., - matrix: _MatrixT_co = ..., - ndmin: _NDMinT_co = ..., - trans1d: _Trans1DT_co = ..., + axis: _AxisT_co = 0, + matrix: _MatrixT_co = False, + ndmin: _NDMinT_co = 1, + trans1d: _Trans1DT_co = -1, ) -> None: ... # TODO(jorenham): annotate this diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index cdef1003cff5..8a4dfa27ed9b 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -215,7 +215,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, @@ -244,7 +244,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, @@ -273,7 +273,7 @@ def genfromtxt( usecols: Sequence[int] | None = None, names: L[True] | str | Collection[str] | None = None, excludelist: Sequence[str] | None = None, - deletechars: str = ..., + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", replace_space: str = "_", autostrip: bool = False, case_sensitive: bool | L["upper", "lower"] = True, diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 51d377e06a18..94651dff36ed 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -14,7 +14,7 @@ class DummyArray: def __init__( self, interface: dict[str, Any], - base: NDArray[Any] | None = ..., + base: NDArray[Any] | None = None, ) -> None: ... @overload diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index fba688b26206..7396fa1b0370 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -86,7 +86,7 @@ def eye( N: int, M: int | None = None, k: int = 0, - dtype: None = ..., # = float + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] order: _OrderCF = "C", *, device: L["cpu"] | None = None, @@ -141,7 +141,7 @@ def tri( N: int, M: int | None = None, k: int = 0, - dtype: None = ..., # = float + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, like: _SupportsArrayFunc | None = None ) -> NDArray[float64]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 876384d6cacc..4b8ac3da0ee2 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -460,7 +460,7 @@ def cross( x2: _ArrayLikeUInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[unsignedinteger]: ... @overload def cross( @@ -468,7 +468,7 @@ def cross( x2: _ArrayLikeInt_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[signedinteger]: ... @overload def cross( @@ -476,7 +476,7 @@ def cross( x2: _ArrayLikeFloat_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[floating]: ... @overload def cross( @@ -484,7 +484,7 @@ def cross( x2: _ArrayLikeComplex_co, /, *, - axis: int = ..., + axis: int = -1, ) -> NDArray[complexfloating]: ... @overload diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index e82e441e4c64..d922a08ffa9d 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -72,9 +72,9 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, coef: _SeriesLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., - window: _SeriesLikeCoef_co | None = ..., - symbol: str = ..., + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", ) -> None: ... @overload @@ -183,7 +183,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, m: SupportsIndex = 1, - k: _CoefLike_co | _SeriesLikeCoef_co = ..., # = [] + k: _CoefLike_co | _SeriesLikeCoef_co = [], lbnd: _CoefLike_co | None = None, ) -> Self: ... @@ -246,7 +246,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def fromroots( cls, roots: _ArrayLikeCoef_co, - domain: _SeriesLikeCoef_co | None = ..., # = [] + domain: _SeriesLikeCoef_co | None = [], window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 241a65be2fa2..7b003c5742c6 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -729,7 +729,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeFloat_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _FloatArray: ... @overload @@ -764,7 +764,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeComplex_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ComplexArray: ... @overload @@ -799,7 +799,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): y: _ArrayLikeCoef_co, deg: int | _SeriesLikeInt_co, rcond: float | None = ..., - full: Literal[False] = ..., + full: Literal[False] = False, w: _SeriesLikeFloat_co | None = ..., ) -> _ObjectArray: ... @overload diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 30db7dfb9e6b..71f8021bccc3 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -128,13 +128,13 @@ _RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) def chebinterpolate( func: np.ufunc, deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( func: Callable[[npt.NDArray[np.float64]], _RT], deg: _IntLike_co, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> npt.NDArray[_RT]: ... @overload def chebinterpolate( @@ -151,7 +151,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): func: Callable[[npt.NDArray[np.float64]], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None = None, - args: tuple[()] = ..., + args: tuple[()] = (), ) -> Self: ... @overload @classmethod diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index b090663a104f..b81f5d322416 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -68,134 +68,134 @@ class Generator: @overload def standard_normal( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_normal( # type: ignore[misc] self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... + def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... @overload - def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... + def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... @overload def standard_exponential( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: None = ..., + method: Literal["zig", "inv"] = "zig", + out: None = None, ) -> float: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float32] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_exponential( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = ..., - out: NDArray[float64] | None = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( # type: ignore[misc] self, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def random( self, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, *, - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def random( self, - size: _ShapeLike = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def beta( self, a: _FloatLike_co, b: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload - def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... # @overload @@ -481,135 +481,136 @@ class Generator: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[int64]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., - axis: int = ..., - shuffle: bool = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, ) -> NDArray[Any]: ... @overload def uniform( self, - low: _FloatLike_co = ..., - high: _FloatLike_co = ..., - size: None = ..., + low: _FloatLike_co = 0.0, + high: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def normal( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: _FloatLike_co, - size: None = ..., + size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = ..., + out: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, *, - out: NDArray[float64] = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = ..., + out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = ..., + out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( self, dfnum: _FloatLike_co, dfden: _FloatLike_co, - nonc: _FloatLike_co, size: None = ... + nonc: _FloatLike_co, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def noncentral_f( @@ -617,140 +618,140 @@ class Generator: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( self, - loc: _FloatLike_co = ..., - scale: _FloatLike_co = ..., - size: None = ..., + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( self, - mean: _FloatLike_co = ..., - sigma: _FloatLike_co = ..., - size: None = ..., + mean: _FloatLike_co = 0.0, + sigma: _FloatLike_co = 1.0, + size: None = None, ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None ) -> float: ... # type: ignore[misc] @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( @@ -758,7 +759,7 @@ class Generator: left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, - size: None = ..., + size: None = None, ) -> float: ... # type: ignore[misc] @overload def triangular( @@ -766,46 +767,46 @@ class Generator: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + self, n: _FloatLike_co, p: _FloatLike_co, size: None = None ) -> int: ... # type: ignore[misc] @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... + self, ngood: int, nbad: int, nsample: int, size: None = None ) -> int: ... # type: ignore[misc] @overload def hypergeometric( @@ -813,44 +814,44 @@ class Generator: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[int64]: ... @overload - def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - method: Literal["svd", "eigh", "cholesky"] = ..., + method: Literal["svd", "eigh", "cholesky"] = "svd", ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[int64]: ... def multivariate_hypergeometric( self, colors: _ArrayLikeInt_co, nsample: int, - size: _ShapeLike | None = ..., - method: Literal["marginals", "count"] = ..., + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", ) -> NDArray[int64]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def permuted( - self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ... + self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + def shuffle(self, x: ArrayLike, axis: int = 0) -> None: ... def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ... + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None ) -> Generator: ... diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 70b2506da7af..99e10677c3a2 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -18,7 +18,7 @@ class _MT19937State(TypedDict): class MT19937(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... - def jumped(self, jumps: int = ...) -> MT19937: ... + def jumped(self, jumps: int = 1) -> MT19937: ... @property def state(self) -> _MT19937State: ... @state.setter diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 5dc7bb66321b..0326781fd43a 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -17,7 +17,7 @@ class _PCG64State(TypedDict): class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64: ... + def jumped(self, jumps: int = 1) -> PCG64: ... @property def state( self, @@ -31,7 +31,7 @@ class PCG64(BitGenerator): class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... @property def state( self, diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index d8895bba67cf..353a0adb4861 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -35,5 +35,5 @@ class Philox(BitGenerator): self, value: _PhiloxState, ) -> None: ... - def jumped(self, jumps: int = ...) -> Philox: ... + def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 54bb1462fb5f..3221048a8af7 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -50,45 +50,45 @@ class RandomState: def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 - def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ... + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... @overload - def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload def get_state( - self, legacy: Literal[True] = ... + self, legacy: Literal[True] = True ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... def set_state( self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = ...) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... # type: ignore[misc] @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] @overload def beta( self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @@ -96,222 +96,222 @@ class RandomState: def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[bool] = ..., ) -> bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[np.bool] = ..., ) -> np.bool: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: type[int] = ..., ) -> int: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload def randint( # type: ignore[misc] self, low: int, - high: int | None = ..., - size: None = ..., + high: int | None = None, + size: None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> NDArray[ulong]: ... def bytes(self, length: int) -> builtins.bytes: ... @@ -319,44 +319,44 @@ class RandomState: def choice( self, a: int, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> int: ... @overload def choice( self, a: int, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[long]: ... @overload def choice( self, a: ArrayLike, - size: None = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> Any: ... @overload def choice( self, a: ArrayLike, - size: _ShapeLike = ..., - replace: bool = ..., - p: _ArrayLikeFloat_co | None = ..., + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... @overload def uniform( - self, low: float = ..., high: float = ..., size: None = ... + self, low: float = 0.0, high: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def uniform( self, - low: _ArrayLikeFloat_co = ..., - high: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def rand(self) -> float: ... @@ -368,65 +368,65 @@ class RandomState: def randn(self, *args: int) -> NDArray[float64]: ... @overload def random_integers( - self, low: int, high: int | None = ..., size: None = ... + self, low: int, high: int | None = None, size: None = None ) -> int: ... # type: ignore[misc] @overload def random_integers( self, low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = ..., - size: _ShapeLike | None = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_normal( # type: ignore[misc] - self, size: _ShapeLike = ... + self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def normal( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def normal( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def standard_gamma( # type: ignore[misc] self, shape: float, - size: None = ..., + size: None = None, ) -> float: ... @overload def standard_gamma( self, shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def gamma( self, shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] @overload def f( self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_f( - self, dfnum: float, dfden: float, nonc: float, size: None = ... + self, dfnum: float, dfden: float, nonc: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_f( @@ -434,128 +434,128 @@ class RandomState: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def noncentral_chisquare( - self, df: float, nonc: float, size: None = ... + self, df: float, nonc: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: None = ... + self, df: _ArrayLikeFloat_co, size: None = None ) -> NDArray[float64]: ... @overload def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] @overload def vonmises( self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] @overload def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] @overload - def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def laplace( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def gumbel( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def gumbel( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def logistic( - self, loc: float = ..., scale: float = ..., size: None = ... + self, loc: float = 0.0, scale: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def logistic( self, - loc: _ArrayLikeFloat_co = ..., - scale: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def lognormal( - self, mean: float = ..., sigma: float = ..., size: None = ... + self, mean: float = 0.0, sigma: float = 1.0, size: None = None ) -> float: ... # type: ignore[misc] @overload def lognormal( self, - mean: _ArrayLikeFloat_co = ..., - sigma: _ArrayLikeFloat_co = ..., - size: _ShapeLike | None = ..., + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] @overload def rayleigh( - self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] @overload def wald( self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def triangular( - self, left: float, mode: float, right: float, size: None = ... + self, left: float, mode: float, right: float, size: None = None ) -> float: ... # type: ignore[misc] @overload def triangular( @@ -563,50 +563,50 @@ class RandomState: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload def binomial( - self, n: int, p: float, size: None = ... + self, n: int, p: float, size: None = None ) -> int: ... # type: ignore[misc] @overload def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def negative_binomial( - self, n: float, p: float, size: None = ... + self, n: float, p: float, size: None = None ) -> int: ... # type: ignore[misc] @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def poisson( - self, lam: float = ..., size: None = ... + self, lam: float = 1.0, size: None = None ) -> int: ... # type: ignore[misc] @overload def poisson( - self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] @overload def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = ... + self, ngood: int, nbad: int, nsample: int, size: None = None ) -> int: ... # type: ignore[misc] @overload def hypergeometric( @@ -614,29 +614,29 @@ class RandomState: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = ..., + size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] @overload def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... def multivariate_normal( self, mean: _ArrayLikeFloat_co, cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = ..., - check_valid: Literal["warn", "raise", "ignore"] = ..., - tol: float = ..., + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, ) -> NDArray[float64]: ... def multinomial( self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = ... + size: _ShapeLike | None = None ) -> NDArray[long]: ... def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... def shuffle(self, x: ArrayLike) -> None: ... @overload diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 31c7fab7bdf8..69572aaa4c84 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -162,14 +162,14 @@ class suppress_warnings: # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": - def memusage(processName: str = ..., instance: int = ...) -> int: ... + def memusage(processName: str = "python", instance: int = 0) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] = []) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... @@ -179,7 +179,7 @@ def build_err_msg( err_msg: object, header: str = "Items are not equal:", verbose: bool = True, - names: Sequence[str] = ..., # = ('ACTUAL', 'DESIRED') + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') precision: SupportsIndex | None = 8, ) -> str: ... From 4206c664671bcaef276ba8dfc86a0df34d29271d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:54:48 +0200 Subject: [PATCH 0555/1718] MAINT: bump ``ruff`` from ``0.12.0`` to ``0.14.0`` (#29904) * MAINT: bump ``ruff`` to ``0.14.0`` * STY: fix new ``ruff`` errors * MAINT: ignored ruff rule ``PD901`` has been removed * TST: expect more specific f2py exceptions, resolving ``B017`` --- environment.yml | 2 +- numpy/f2py/tests/test_return_integer.py | 4 ++-- numpy/f2py/tests/test_return_real.py | 4 ++-- numpy/tests/test_configtool.py | 1 - requirements/linter_requirements.txt | 2 +- ruff.toml | 2 -- 6 files changed, 6 insertions(+), 9 deletions(-) diff --git a/environment.yml b/environment.yml index 3bf6fcbc4319..c5ee0c381bb3 100644 --- a/environment.yml +++ b/environment.yml @@ -46,7 +46,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.12.0 + - ruff=0.14.0 - gitpython # Used in some tests - cffi diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 13a9f862f311..50309d5dadaf 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -29,8 +29,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index c871ed3d4fc2..4339657aa013 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -39,8 +39,8 @@ def check_function(self, t, tname): pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) - pytest.raises(Exception, t, t) - pytest.raises(Exception, t, {}) + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) try: r = t(10**400) diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index c4e9a9551c0c..917bbf55712f 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -1,4 +1,3 @@ -import importlib import importlib.metadata import os import pathlib diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index a28b989fbb03..da6bac6f7b84 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.12.0 +ruff==0.14.0 GitPython>=3.1.30 diff --git a/ruff.toml b/ruff.toml index e0f0dd98872e..8b4d8358ba4a 100644 --- a/ruff.toml +++ b/ruff.toml @@ -54,8 +54,6 @@ ignore = [ "ISC002", # Implicitly concatenated string literals over multiple lines # flake8-pie "PIE790", # Unnecessary `pass` statement - # pandas-vet - "PD901", # Avoid using the generic variable name `df` for DataFrames # perflint "PERF401", # Use a list comprehension to create a transformed list # pycodestyle/error From a6f6f057c0f962b2d6b20de248a36aec6e97b6f7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 9 Oct 2025 19:57:52 +0200 Subject: [PATCH 0556/1718] TYP: fix incorrect ``ma.sort`` arg default for ``stable`` (#29903) --- numpy/ma/core.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 9adab779776a..c9c4ab152030 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2381,7 +2381,7 @@ def sort( endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, + stable: Literal[False] | None = None, ) -> _ArrayT: ... @overload def sort( @@ -2392,7 +2392,7 @@ def sort( endwith: bool | None = True, fill_value: _ScalarLike_co | None = None, *, - stable: Literal[False] | None = False, + stable: Literal[False] | None = None, ) -> NDArray[Any]: ... @overload def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... From 30e4cfcbfb1acb05b963ce6234b0c84ec9200115 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Thu, 9 Oct 2025 11:19:28 -0700 Subject: [PATCH 0557/1718] DOC: Correct typos [skip actions][skip azp][skip cirrus] --- doc/source/dev/development_advanced_debugging.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 07c80314da1b..125f469f88de 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -170,7 +170,7 @@ Valgrind helps: Python allocators.) Even though using valgrind for memory leak detection is slow and less sensitive -it can be a convenient: you can run most programs with valgrind without +it can be convenient: you can run most programs with valgrind without modification. Things to be aware of: @@ -233,7 +233,7 @@ The NumPy developers often use both ``gdb`` and ``lldb`` to debug Numpy. As a rule of thumb, ``gdb`` is often easier to use on Linux while ``lldb`` is easier to use on a Mac environment. They have disjoint user interfaces, so you will need to learn how to use whichever one you land on. The ``gdb`` to ``lldb`` `command map -`_ is a convnient reference for how to +`_ is a convenient reference for how to accomplish common recipes in both debuggers. From 98ae47d77ed2542cdf5a8419a26cd69f9992b4c3 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Thu, 9 Oct 2025 11:27:02 -0700 Subject: [PATCH 0558/1718] DOC: Add URL to valgrind tool [skip actions][skip azp][skip cirrus] --- doc/source/dev/development_advanced_debugging.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 125f469f88de..9696639c1835 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -36,7 +36,8 @@ However, you can ensure that we can track down such issues more easily: consider creating an additional simpler test as well. This can be helpful, because often it is only easy to find which test triggers an issue and not which line of the test. -* Never use ``np.empty`` if data is read/used. ``valgrind`` will notice this +* Never use ``np.empty`` if data is read/used. + `Valgrind `_ will notice this and report an error. When you do not care about values, you can generate random values instead. @@ -131,7 +132,8 @@ to mark them, but expect some false positives. ``valgrind`` ============ -Valgrind is a powerful tool to find certain memory access problems and should +`Valgrind `_ is a powerful tool +to find certain memory access problems and should be run on complicated C code. Basic use of ``valgrind`` usually requires no more than:: From f0649aa6873d7136ed2989e0a45591b9f91c441c Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:35:19 +0530 Subject: [PATCH 0559/1718] DEP: remove helpers.pyi (finalize deprecation) --- numpy/fft/helper.pyi | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 5147652172a6..8b137891791f 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,20 +1 @@ -from typing import Any, Literal as L -from typing_extensions import deprecated -import numpy as np -from numpy._typing import ArrayLike, NDArray, _ShapeLike - -from ._helper import integer_types as integer_types - -__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] - -### - -@deprecated("Please use `numpy.fft.fftshift` instead.") -def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.ifftshift` instead.") -def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.fftfreq` instead.") -def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... -@deprecated("Please use `numpy.fft.rfftfreq` instead.") -def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... From f618698988260e4839d47433694f59f093a17838 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 02:36:09 +0530 Subject: [PATCH 0560/1718] DEP: remove linalg.pyi (finalize deprecation) --- numpy/linalg/linalg.pyi | 68 ----------------------------------------- 1 file changed, 68 deletions(-) diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi index dbe9becfb8d5..8b137891791f 100644 --- a/numpy/linalg/linalg.pyi +++ b/numpy/linalg/linalg.pyi @@ -1,69 +1 @@ -from ._linalg import ( - LinAlgError, - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] From f7a1b8fb404cdaebe63814e8f5eeeef873b7065d Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 03:31:17 +0530 Subject: [PATCH 0561/1718] __init__.pyi removed --- numpy/linalg/__init__.pyi | 70 --------------------------------------- 1 file changed, 70 deletions(-) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 53c115f7bd65..8b137891791f 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,71 +1 @@ -from . import _linalg as _linalg, _umath_linalg as _umath_linalg, linalg as linalg -from ._linalg import ( - cholesky, - cond, - cross, - det, - diagonal, - eig, - eigh, - eigvals, - eigvalsh, - inv, - lstsq, - matmul, - matrix_norm, - matrix_power, - matrix_rank, - matrix_transpose, - multi_dot, - norm, - outer, - pinv, - qr, - slogdet, - solve, - svd, - svdvals, - tensordot, - tensorinv, - tensorsolve, - trace, - vecdot, - vector_norm, -) -__all__ = [ - "LinAlgError", - "cholesky", - "cond", - "cross", - "det", - "diagonal", - "eig", - "eigh", - "eigvals", - "eigvalsh", - "inv", - "lstsq", - "matmul", - "matrix_norm", - "matrix_power", - "matrix_rank", - "matrix_transpose", - "multi_dot", - "norm", - "outer", - "pinv", - "qr", - "slogdet", - "solve", - "svd", - "svdvals", - "tensordot", - "tensorinv", - "tensorsolve", - "trace", - "vecdot", - "vector_norm", -] - -class LinAlgError(ValueError): ... From e55674e36f907c13a0b6311c8a46048059d7cb0c Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 03:31:50 +0530 Subject: [PATCH 0562/1718] __init__.pyi removed --- numpy/fft/__init__.pyi | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 893a697f1398..8b137891791f 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,38 +1 @@ -from ._helper import fftfreq, fftshift, ifftshift, rfftfreq -from ._pocketfft import ( - fft, - fft2, - fftn, - hfft, - ifft, - ifft2, - ifftn, - ihfft, - irfft, - irfft2, - irfftn, - rfft, - rfft2, - rfftn, -) -__all__ = [ - "fft", - "ifft", - "rfft", - "irfft", - "hfft", - "ihfft", - "rfftn", - "irfftn", - "rfft2", - "irfft2", - "fft2", - "ifft2", - "fftn", - "ifftn", - "fftshift", - "ifftshift", - "fftfreq", - "rfftfreq", -] From 107200bf5526c5f297ed42b17caa3542ef64967c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 10 Oct 2025 01:10:00 +0200 Subject: [PATCH 0563/1718] TYP: wrong argument defaults in ``testing._private`` (#29902) * TYP: fix `testing._private.utils.jiffies` signature on linux * TYP: fix incorrect function arg defaults in ``testing._private.extbuild`` --- numpy/testing/_private/extbuild.pyi | 6 +++--- numpy/testing/_private/utils.pyi | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi index 609a45e79d16..c1ae507d6a49 100644 --- a/numpy/testing/_private/extbuild.pyi +++ b/numpy/testing/_private/extbuild.pyi @@ -10,7 +10,7 @@ def build_and_import_extension( *, prologue: str = "", build_dir: pathlib.Path | None = None, - include_dirs: Sequence[str] = [], + include_dirs: Sequence[str] | None = None, more_init: str = "", ) -> types.ModuleType: ... @@ -20,6 +20,6 @@ def compile_extension_module( builddir: pathlib.Path, include_dirs: Sequence[str], source_string: str, - libraries: Sequence[str] = [], - library_dirs: Sequence[str] = [], + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, ) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 69572aaa4c84..1b298132c8ef 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -169,7 +169,7 @@ else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] = []) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... else: def jiffies(_load_time: list[float] = []) -> int: ... From 9167aba1d2e3e64187cd0212398d048d772257ca Mon Sep 17 00:00:00 2001 From: Inessa Pawson Date: Thu, 9 Oct 2025 20:29:34 -0400 Subject: [PATCH 0564/1718] Fix Plausible link and syntax in conf.py --- doc/source/conf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 0204818aa094..af431db44351 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -290,10 +290,10 @@ def setup(app): "json_url": "https://numpy.org/doc/_static/versions.json", }, "show_version_warning_banner": True, - "analytics": [ + "analytics": { "plausible_analytics_domain": "numpy.org/doc/stable/", - "plausible_analytics_url": "https://views.scientific-python.org/js/script.file-downloads.hash.outbound-links.js", - ] + "plausible_analytics_url": ("https://views.scientific-python.org/js/script.js"), + }, } html_title = f"{project} v{version} Manual" From 71a15027c4f2ead27480a6193064a5f3a02368ba Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Fri, 10 Oct 2025 14:28:11 +0900 Subject: [PATCH 0565/1718] BUG: Avoid segfault when calling `numpy._core.strings._expandtabs_length.reduce` Fixes the first item of numpy#28829. Use `Py_XINCREF` instead of `Py_INCREF` to safely handle the case where `op_dtypes[0]` is NULL --- numpy/_core/src/umath/string_ufuncs.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 95f30ccb109e..9b3d86c25301 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -941,7 +941,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { - Py_INCREF(op_dtypes[0]); + Py_XINCREF(op_dtypes[0]); new_op_dtypes[0] = op_dtypes[0]; new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); new_op_dtypes[2] = PyArray_DTypeFromTypeNum(NPY_DEFAULT_INT); From a3f1ad9a9b5b7aae6be9c3f16c61e08ef9adec60 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 10 Oct 2025 08:51:10 +0200 Subject: [PATCH 0566/1718] TYP: minor fixes related to ``errstate`` (#29914) --- numpy/__init__.pyi | 28 +------------------- numpy/_core/_ufunc_config.pyi | 50 ++++++++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 33 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7d0bd028c826..c18bc91ff4ba 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -292,8 +292,7 @@ from numpy._core._ufunc_config import ( getbufsize, seterrcall, geterrcall, - _ErrKind, - _ErrCall, + errstate, ) from numpy._core.arrayprint import ( @@ -757,8 +756,6 @@ _T_contra = TypeVar("_T_contra", contravariant=True) _RealT_co = TypeVar("_RealT_co", covariant=True) _ImagT_co = TypeVar("_ImagT_co", covariant=True) -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) - _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) @@ -5735,29 +5732,6 @@ permute_dims = transpose pow = power true_divide = divide -class errstate: - __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" - - def __init__( - self, - *, - call: _ErrCall = ..., - all: _ErrKind | None = None, - divide: _ErrKind | None = None, - over: _ErrKind | None = None, - under: _ErrKind | None = None, - invalid: _ErrKind | None = None, - ) -> None: ... - def __enter__(self) -> None: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - /, - ) -> None: ... - def __call__(self, func: _CallableT) -> _CallableT: ... - # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet # as we lack variadics (PEP 646). diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index 1cc3595d5ba0..f1f0d88fe165 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,12 +1,22 @@ from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, type_check_only +from types import TracebackType +from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only -from numpy import errstate as errstate +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc: TypeAlias = Callable[[str, int], Any] -_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) @type_check_only class _ErrDict(TypedDict): @@ -15,6 +25,36 @@ class _ErrDict(TypedDict): under: _ErrKind invalid: _ErrKind +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + def seterr( all: _ErrKind | None = None, divide: _ErrKind | None = None, @@ -27,5 +67,3 @@ def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... def geterrcall() -> _ErrCall | None: ... - -# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` From d2678ee105eeb22c2da66b1b79d7f1acaa34a93a Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Fri, 10 Oct 2025 16:05:50 +0900 Subject: [PATCH 0567/1718] TST: add tests to check `numpy._core.strings._expandtabs_length` --- numpy/_core/tests/test_strings.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 756c6e1bb549..939b7fbd465d 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -4,6 +4,7 @@ import pytest import numpy as np +from numpy._core._exceptions import _UFuncNoLoopError from numpy.testing import IS_PYPY, assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory @@ -821,6 +822,20 @@ def test_expandtabs_raises_overflow(self, dt): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + FILL_ERROR = "The fill character must be exactly one character long" def test_center_raises_multiple_character_fill(self, dt): From 0a546cc17f95cd307a609d0fab249f9d5831f991 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:23:40 +0530 Subject: [PATCH 0568/1718] Update __init__.pyi --- numpy/linalg/__init__.pyi | 70 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 8b137891791f..4d6aca760637 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1 +1,71 @@ +from . import _linalg as _linalg, _umath_linalg as _umath_linalg +from ._linalg import ( + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] + +class LinAlgError(ValueError): ... From ef1afa0156d261119fedcb9591c1402705abf609 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:27:55 +0530 Subject: [PATCH 0569/1718] Update __init__.pyi From 148d85c15b0ff35f2a1d608fa3f14aaa515f9016 Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 13:30:19 +0530 Subject: [PATCH 0570/1718] Update __init__.pyi --- numpy/fft/__init__.pyi | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 8b137891791f..893a697f1398 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1 +1,38 @@ +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, +) +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] From 7e290de02f689ce8097f828cd6b3b058aaa2d28b Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 14:12:14 +0530 Subject: [PATCH 0571/1718] Rename 29000.expiring.rst to 29009.expiring.rst --- .../upcoming_changes/{29000.expiring.rst => 29009.expiring.rst} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename doc/release/upcoming_changes/{29000.expiring.rst => 29009.expiring.rst} (82%) diff --git a/doc/release/upcoming_changes/29000.expiring.rst b/doc/release/upcoming_changes/29009.expiring.rst similarity index 82% rename from doc/release/upcoming_changes/29000.expiring.rst rename to doc/release/upcoming_changes/29009.expiring.rst index 2353f38a7083..b1f7e0bdd7a3 100644 --- a/doc/release/upcoming_changes/29000.expiring.rst +++ b/doc/release/upcoming_changes/29009.expiring.rst @@ -7,4 +7,4 @@ The following deprecated functions and classes have been removed: Previously deprecated, use :mod:`numpy.linalg` instead. * :mod:`numpy.fft.helpers` - Previously deprecated, use :mod:`numpy.fft` instead. \ No newline at end of file + Previously deprecated, use :mod:`numpy.fft` instead. From 47db4f1b0a7dba2f1808e0d812a9e7b30747520b Mon Sep 17 00:00:00 2001 From: ANIK CHAND <161185149+anikchand461@users.noreply.github.com> Date: Fri, 10 Oct 2025 16:50:23 +0530 Subject: [PATCH 0572/1718] Update doc/release/upcoming_changes/29009.expiring.rst Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- doc/release/upcoming_changes/29009.expiring.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29009.expiring.rst b/doc/release/upcoming_changes/29009.expiring.rst index b1f7e0bdd7a3..efe0013e40a5 100644 --- a/doc/release/upcoming_changes/29009.expiring.rst +++ b/doc/release/upcoming_changes/29009.expiring.rst @@ -6,5 +6,5 @@ The following deprecated functions and classes have been removed: * :mod:`numpy.linalg.linalg` Previously deprecated, use :mod:`numpy.linalg` instead. -* :mod:`numpy.fft.helpers` +* :mod:`numpy.fft.helper` Previously deprecated, use :mod:`numpy.fft` instead. From fe33f9aaac986c20cd568a9247243c506c3a27ad Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 10 Oct 2025 16:11:31 +0300 Subject: [PATCH 0573/1718] fix file name --- .../upcoming_changes/{29009.expiring.rst => 29909.expired.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/release/upcoming_changes/{29009.expiring.rst => 29909.expired.rst} (100%) diff --git a/doc/release/upcoming_changes/29009.expiring.rst b/doc/release/upcoming_changes/29909.expired.rst similarity index 100% rename from doc/release/upcoming_changes/29009.expiring.rst rename to doc/release/upcoming_changes/29909.expired.rst From 801f86d1f28e6bdd08ef8a4ce5f56d268e0eda9f Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 10 Oct 2025 16:37:11 +0300 Subject: [PATCH 0574/1718] DOC: reformat release note [skip actions][skip azp][skip cirrus] --- doc/release/upcoming_changes/29909.expired.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst index efe0013e40a5..e3cb557e2c28 100644 --- a/doc/release/upcoming_changes/29909.expired.rst +++ b/doc/release/upcoming_changes/29909.expired.rst @@ -1,10 +1,10 @@ -Deprecated features removed in 2.2.0 -==================================== +Remove numpy.linalg.linag and numpy.fft.helper +---------------------------------------------- -The following deprecated functions and classes have been removed: +The following were deprecated in NumPy 2.0 and have been moved to private modules -* :mod:`numpy.linalg.linalg` - Previously deprecated, use :mod:`numpy.linalg` instead. +* ``numpy.linalg.linalg`` + Use :mod:`numpy.linalg` instead. -* :mod:`numpy.fft.helper` - Previously deprecated, use :mod:`numpy.fft` instead. +* ``numpy.fft.helper`` + Use :mod:`numpy.fft` instead. From 7f9a77ad70244760cab0dadca4450ca3736e18c6 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 10 Oct 2025 18:13:51 +0300 Subject: [PATCH 0575/1718] TST: use requirements/test_requirements across CI (#29919) * TST: use requirements/test_requirements across CI * fixes * fixes * fixes * fixes * fixes --- .github/meson_actions/action.yml | 3 +-- .github/workflows/linux-ppc64le.yml | 5 ++--- .github/workflows/linux.yml | 2 +- .github/workflows/linux_blas.yml | 23 ++++++++++------------- .github/workflows/linux_simd.yml | 9 +++------ .github/workflows/macos.yml | 4 +--- 6 files changed, 18 insertions(+), 28 deletions(-) diff --git a/.github/meson_actions/action.yml b/.github/meson_actions/action.yml index 66868cbc3be0..476c0bbd7950 100644 --- a/.github/meson_actions/action.yml +++ b/.github/meson_actions/action.yml @@ -30,8 +30,7 @@ runs: TERM: xterm-256color run: | echo "::group::Installing Test Dependencies" - pip install pytest pytest-xdist pytest-timeout hypothesis typing_extensions - pip install -r requirements/setuptools_requirement.txt + python -m pip install -r requirements/test_requirements.txt echo "::endgroup::" echo "::group::Test NumPy" spin test -- --durations=10 --timeout=600 diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index c561c3be4611..f54b5dc74060 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -35,8 +35,7 @@ jobs: sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ build-essential libopenblas-dev liblapack-dev pkg-config pip install --upgrade pip - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt echo "/home/runner/.local/bin" >> $GITHUB_PATH - name: Meson Build @@ -49,4 +48,4 @@ jobs: - name: Run Tests run: | - spin test -- --timeout=60 --durations=10 \ No newline at end of file + spin test -- --timeout=60 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b33213449561..42a9601f33e1 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -236,7 +236,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas + # pip install -r requirements/doc_requirements.txt -r requirements/test_requirements.txt # spin check-docs -v # spin check-tutorials -v diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 570e90437e1a..98d45ab8c435 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -76,7 +76,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # Install OpenBLAS if [[ $USE_NIGHTLY_OPENBLAS == "true" ]]; then python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple scipy-openblas32 @@ -113,7 +113,6 @@ jobs: env: TERM: xterm-256color run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- --timeout=600 --durations=10 @@ -135,8 +134,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build (LP64) run: spin build -- -Dblas=openblas -Dlapack=openblas -Ddisable-optimization=true -Dallow-noblas=false @@ -171,8 +169,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false @@ -205,8 +202,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libopenblas-dev cmake sudo apt-get remove pkg-config @@ -234,7 +230,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install liblapack-dev pkg-config @@ -244,7 +240,6 @@ jobs: - name: Test run: | - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -276,6 +271,8 @@ jobs: - name: Test run: | + # do not use test_requirements.txt, it includes coverage which requires + # sqlite3, which is not available on OpenSUSE python pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 @@ -297,7 +294,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt pip install mkl mkl-devel - name: Repair MKL pkg-config files and symlinks @@ -361,7 +358,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libblis-dev libopenblas-dev pkg-config @@ -398,7 +395,7 @@ jobs: - name: Install dependencies run: | pip install -r requirements/build_requirements.txt - pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt sudo apt-get update sudo apt-get install libatlas-base-dev pkg-config diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 5bd1eab7f797..c000c7f05d86 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -132,8 +132,7 @@ jobs: python-version: '3.11' - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: | spin build -- ${{ matrix.config.args }} @@ -208,8 +207,7 @@ jobs: - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' @@ -259,8 +257,7 @@ jobs: - name: Install dependencies run: | - python -m pip install -r requirements/build_requirements.txt - python -m pip install pytest pytest-xdist hypothesis typing_extensions + python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_spr diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 868dac6dcbbc..d747ab959dd6 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -130,9 +130,7 @@ jobs: - name: Install dependencies run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/setuptools_requirement.txt - pip install pytest pytest-xdist pytest-timeout hypothesis + pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt - name: Build against Accelerate (LP64) run: spin build -- -Ddisable-optimization=true -Dallow-noblas=false From dd0d9d6d131fc48ad0d69a9e55cf52ed898511fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 20:43:29 +0200 Subject: [PATCH 0576/1718] MAINT: Bump github/codeql-action from 4.30.7 to 4.30.8 (#29924) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d6719c246c0d..0d52c4126be6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/init@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/autobuild@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e296a935590eb16afc0c0108289f68c87e2a89a5 # v4.30.7 + uses: github/codeql-action/analyze@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6fb0391abde5..9a0c80aa3342 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@e296a935590eb16afc0c0108289f68c87e2a89a5 # v2.1.27 + uses: github/codeql-action/upload-sarif@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v2.1.27 with: sarif_file: results.sarif From 0f9ea0ef45968b1aa659665e0018b78a1c059391 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Godoy=20Gutierrez?= Date: Fri, 10 Oct 2025 11:20:49 +0200 Subject: [PATCH 0577/1718] Fix memory leak in import_array(). Move import_array() and import_umath() calls before PyModule_Create() to prevent memory leaks when imports fail. The import macros contain a return NULL statement that bypasses cleanup code. Fixes in: - numpy/_core/src/umath/_operand_flag_tests.c - numpy/_core/src/umath/_struct_ufunc_tests.c --- numpy/_core/src/umath/_operand_flag_tests.c | 6 +++--- numpy/_core/src/umath/_struct_ufunc_tests.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index 5cdff6220280..c97668c4b118 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -57,14 +57,14 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) PyObject *m = NULL; PyObject *ufunc; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { goto fail; } - import_array(); - import_umath(); - ufunc = PyUFunc_FromFuncAndData(funcs, data, types, 1, 2, 0, PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 56c4be117e44..e85c67f9d903 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -123,15 +123,15 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyArray_Descr *dtype; PyArray_Descr *dtypes[3]; + import_array(); + import_umath(); + m = PyModule_Create(&moduledef); if (m == NULL) { return NULL; } - import_array(); - import_umath(); - add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1, PyUFunc_None, "add_triplet", NULL, 0); From 0ce0ef0bae095e44770a20b64177f07f11758715 Mon Sep 17 00:00:00 2001 From: Maaz <76714503+mmaaz-git@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:50:22 -0400 Subject: [PATCH 0578/1718] BUG: fix negative samples generated by Wald distribution (#29609) In numpy.random, when generating Wald samples, if the mean and scale have a large discrepancy, the current implementation suffers from catastrophic cancellation. An explicit example of this has been added to test_generator_mt19937.py. The key line implicated in distributions.c: `X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));` which has numerical issues when Y >> scale. I have replaced this with the equivalent rationalized form: ``` d = Y + sqrt(Y) * sqrt(Y + 4 * scale); X = mean - (2 * mean * Y) / d; ``` And now the test passes. --- numpy/random/src/distributions/distributions.c | 6 +++--- numpy/random/tests/test_generator_mt19937.py | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 8102fee72323..79cacb2df4a4 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -848,12 +848,12 @@ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double random_wald(bitgen_t *bitgen_state, double mean, double scale) { double U, X, Y; - double mu_2l; + double d; - mu_2l = mean / (2 * scale); Y = random_standard_normal(bitgen_state); Y = mean * Y * Y; - X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y)); + d = 1 + sqrt(1 + 4 * scale / Y); + X = mean * (1 - 2 / d); U = next_double(bitgen_state); if (U <= mean / (mean + X)) { return X; diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 6594f6008c8e..51065f24868d 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1874,6 +1874,11 @@ def test_wald(self): [2.07093587449261, 0.73073890064369]]) assert_array_almost_equal(actual, desired, decimal=14) + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + def test_weibull(self): random = Generator(MT19937(self.seed)) actual = random.weibull(a=1.23, size=(3, 2)) From 5c77403a6c0a70254976245f2e488e13fbee75c7 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sat, 11 Oct 2025 17:40:39 +0530 Subject: [PATCH 0579/1718] =?UTF-8?q?DOC:=20Update=20Meson=20build=20examp?= =?UTF-8?q?les=20in=20usage.rst=20[skip=20actions][skip=20azp=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/f2py/usage.rst | 75 ++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index d6e042863b55..6a8ffc9b75fe 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -342,53 +342,54 @@ To build a Fortran extension using f2py and Meson, you can use Meson's `custom_target` to invoke f2py and generate the extension module. The following minimal example demonstrates how to do this: +This example shows how to build the previously described `add` extension +(from `add.pyf` and `add.f`) using Meson instead of distutils. + +Project layout: + + f2py_examples/ + meson.build + add.f + add.pyf + __init__.py (can be empty) + +Example `meson.build`: + .. code-block:: meson - # List your Fortran source files - fortran_sources = files('your_module.f90') + project('f2py_examples', 'fortran') - # Find the Python installation py = import('python').find_installation() - # Create a custom target to build the extension with f2py - f2py_wrapper = custom_target( - 'your_module_wrapper', - output: 'your_module.so', - input: fortran_sources, + # List your Fortran source files + sources = files('add.pyf', 'add.f') + + # Build the extension by invoking f2py via a custom target + add_mod = custom_target( + 'add_extension', + input: sources, + output: ['add' + py.extension_suffix()], command: [ py.full_path(), '-m', 'numpy.f2py', - '-c', '@INPUT@', '-m', 'your_module' - ] + '-c', 'add.pyf', 'add.f', + '-m', 'add' + ], + build_by_default: true ) - # Install the built extension to the Python site-packages directory - install_data(f2py_wrapper, install_dir: py.site_packages_dir()) + # Install into site-packages under the f2py_examples package + install_subdir('.', install_dir: py.site_packages_dir() / 'f2py_examples', + strip_directory: false, + exclude_files: ['meson.build']) + + # Also install the built extension (place it beside __init__.py) + install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') + +.. note:: + This uses a simple custom_target. For larger projects you may wish to + stage build outputs in a subdirectory and control install steps more + explicitly. For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build - -Building NumPy ufunc Extensions with Meson -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To build a NumPy ufunc extension (C API) using Meson, you can use the -following template: - -.. code-block:: meson - - # List your C source files - c_sources = files('your_ufunc_module.c') - - # Find the Python installation - py = import('python').find_installation() - - # Create an extension module - extension_module = py.extension_module( - 'your_ufunc_module', - c_sources, - dependencies: py.dependency(), - install: true - ) - -For more information on writing NumPy ufunc extensions, see the official -NumPy documentation: https://numpy.org/doc/stable/reference/c-api.ufunc.html From 06102c990de8437d9d5b4934470cb6aa98c58622 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sun, 12 Oct 2025 10:33:47 +0530 Subject: [PATCH 0580/1718] DOC: Make Meson/f2py example concrete, remove redundancy, and fix formatting [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/usage.rst | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 6a8ffc9b75fe..ae510c20b01a 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -339,18 +339,19 @@ modules, especially starting with Python 3.12 and NumPy 2.x. Meson provides a robust and maintainable way to build Fortran extensions with f2py. To build a Fortran extension using f2py and Meson, you can use Meson's -`custom_target` to invoke f2py and generate the extension module. The +``custom_target`` to invoke f2py and generate the extension module. The following minimal example demonstrates how to do this: -This example shows how to build the previously described `add` extension -(from `add.pyf` and `add.f`) using Meson instead of distutils. +This example shows how to build the ``add`` extension from the ``add.f`` and ``add.pyf`` +files described in the :ref:`f2py-examples` (note that you do not always need +a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). Project layout: f2py_examples/ meson.build add.f - add.pyf + add.pyf (optional) __init__.py (can be empty) Example `meson.build`: @@ -385,11 +386,6 @@ Example `meson.build`: # Also install the built extension (place it beside __init__.py) install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') -.. note:: - This uses a simple custom_target. For larger projects you may wish to - stage build outputs in a subdirectory and control install steps more - explicitly. - For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world examples: https://github.com/scipy/scipy/tree/main/meson.build From e48f9b27611ec11b2ee6c5942b95783377421bf7 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Sun, 12 Oct 2025 20:03:12 +0530 Subject: [PATCH 0581/1718] DOC: Make Meson/f2py example concrete, remove redundancy, and fix formatting [skip actions] [skip azp] [skip cirrus] --- doc/source/f2py/usage.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index ae510c20b01a..efcf2bec5266 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -354,7 +354,7 @@ Project layout: add.pyf (optional) __init__.py (can be empty) -Example `meson.build`: +Example ``meson.build``: .. code-block:: meson @@ -379,12 +379,12 @@ Example `meson.build`: ) # Install into site-packages under the f2py_examples package - install_subdir('.', install_dir: py.site_packages_dir() / 'f2py_examples', + install_subdir('.', install_dir: join_paths(py.site_packages_dir(), 'f2py_examples'), strip_directory: false, exclude_files: ['meson.build']) # Also install the built extension (place it beside __init__.py) - install_data(add_mod, install_dir: py.site_packages_dir() / 'f2py_examples') + install_data(add_mod, install_dir: join_paths(py.site_packages_dir(), 'f2py_examples')) For more details and advanced usage, see the Meson build guide in the user documentation or refer to SciPy's Meson build files for real-world From edd072fa5debe09a9a23921b53599850925edf80 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 8 Oct 2025 17:18:43 -0400 Subject: [PATCH 0582/1718] ENH: Add registration for sort and argsort in new convenience ufunc registration function Includes the following commits: DOC: Document `PyUFunc_AddLoopsFromSpecs` and new sorting / argsorting arraymethods ENH: Update field names in PyUFunc_LoopSlot struct ENH: Update C-API hash (15, NumPy 2.4) ENH: Add 'PyUFunc_LoopSlots' to ufunc_funcs_api with MinVersion "2.4" ENH: Update hash for version 21 (NumPy 2.4.0) ENH: Move PyUFunc_LoopSlot struct definition and update version hash for NumPy 2.4.0 REF: Move PyUFunc_LoopSlot struct definition to dtype_api.h DOC: Clarify ArrayMethod API registration process in array.rst DOC: Update sorting parameters struct in array.rst to use correct member (flags) ENH: Clarify version 21 description in cversions REF: Remove unnecessary error message for non-ufunc attributes in PyUFunc_AddLoopsFromSpecs REF: Refactor ufunc initialization and consolidate sort loops in sfloat_init_ufuncs REF: Simplify ufunc import by replacing PyObject_GetAttrString with npy_import REF: Rename ufunc_name to name in PyUFunc_LoopSlot REF: Initialize res variable to -1 in sfloat_init_ufuncs for clarity STYLE: Refactor error message formatting in PyUFunc_AddLoopsFromSpecs for clarity REF: Simplify redundant Py_DECREF call in PyUFunc_AddLoopsFromSpecs DOC: Improve documentation for PyUFunc_AddLoopsFromSpecs and sorting ENH: Optimize ufunc handling in PyUFunc_AddLoopsFromSpecs by using cached imports for sort and argsort DOC: Clarify pointer type Co-authored-by: Sebastian Berg REF: Optimize array definition of loop slots for sfloat Co-authored-by: Sebastian Berg REF: Improve error message for missing ufunc by removing module name BUG: Fix incorrect use of `npy_cache_import_runtime` REF: Reuse dtypes variable in sfloat ufunc init REF: Cache sort and argsort functions in npy_runtime_imports_struct BUG: Add missing header file for and add error handling to `numpy_cache_import_runtime` BUG: Improve error handling for numpy runtime imports in PyUFunc_AddLoopsFromSpecs BUG: Replace npy_cache_import_runtime with direct npy_import calls for sort and argsort DOC: Fix code annotations and references in sorting and argsorting docs BUG: Optimize numpy import calls for sort and argsort in PyUFunc_AddLoopsFromSpecs BUG: Remove unused references to _sort and _argsort in npy_runtime_imports_struct BUG: Replace npy_import calls with npy_cache_import_runtime for sort and argsort in PyUFunc_AddLoopsFromSpecs DOC: Add versionadded directive for PyUFunc_AddLoopsFromSpecs in array.rst BUG: Add missing Py_DECREF for ufunc in PyUFunc_AddLoopsFromSpecs BUG: Replace npy_cache_import calls with npy_import for sort and argsort, simplify decrefs --- doc/source/reference/c-api/array.rst | 58 ++++++ numpy/_core/code_generators/cversions.txt | 6 +- numpy/_core/code_generators/numpy_api.py | 4 +- numpy/_core/include/numpy/dtype_api.h | 7 + numpy/_core/src/umath/_scaled_float_dtype.c | 213 +++++++++----------- numpy/_core/src/umath/dispatching.cpp | 78 +++++++ numpy/_core/src/umath/dispatching.h | 3 + 7 files changed, 248 insertions(+), 121 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 95f5cc033b4d..605516097274 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1886,6 +1886,34 @@ with the rest of the ArrayMethod API. the main ufunc registration function. This adds a new implementation/loop to a ufunc. It replaces `PyUFunc_RegisterLoopForType`. +.. c:type:: PyUFunc_LoopSlot + + Structure used to add multiple loops to ufuncs from ArrayMethod specs. + This is used in `PyUFunc_AddLoopsFromSpecs`. + + .. c:struct:: PyUFunc_LoopSlot + + .. c:member:: const char *name + + The name of the ufunc to add the loop to. + + .. c:member:: PyArrayMethod_Spec *spec + + The ArrayMethod spec to use to create the loop. + +.. c:function:: int PyUFunc_AddLoopsFromSpecs( \ + PyUFunc_LoopSlot *slots) + + .. versionadded:: 2.4 + + Add multiple loops to ufuncs from ArrayMethod specs. This also + handles the registration of methods for the ufunc-like functions + ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + + The ``slots`` argument must be a NULL-terminated array of + `PyUFunc_LoopSlot` (see above), which give the name of the + ufunc and spec needed to create the loop. + .. c:function:: int PyUFunc_AddPromoter( \ PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter) @@ -2036,6 +2064,36 @@ code: Py_INCREF(loop_descrs[2]); } +.. _array-methods-sorting: + +Sorting and Argsorting +~~~~~~~~~~~~~~~~~~~~~~~ + +Sorting and argsorting methods for dtypes can be registered using the +ArrayMethod API. This is done by adding an ArrayMethod spec with the name +``"sort"`` or ``"argsort"`` respectively. The spec must have ``nin=1`` +and ``nout=1`` for both sort and argsort. Sorting is inplace, hence we +enforce that ``data[0] == data[1]``. Argsorting returns a new array of +indices, so the output must be of ``NPY_INTP`` type. + +The ``context`` passed to the loop contains the ``parameters`` field which +for these operations is a ``PyArrayMethod_SortParameters *`` struct. This +struct contains a ``flags`` field which is a bitwise OR of ``NPY_SORTKIND`` +values indicating the kind of sort to perform (that is, whether it is a +stable and/or descending sort). If the strided loop depends on the flags, +a good way to deal with this is to define :c:macro:`NPY_METH_get_loop`, +and not set any of the other loop slots. + +.. c:struct:: PyArrayMethod_SortParameters + + .. c:member:: NPY_SORTKIND flags + + The flags passed to the sort operation. This is a bitwise OR of + ``NPY_SORTKIND`` values indicating the kind of sort to perform. + +These specs can be registered using :c:func:`PyUFunc_AddLoopsFromSpecs` +along with other ufunc loops. + API for calling array methods ----------------------------- diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index a04dd784c67f..3a480dfd4ab3 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -80,5 +80,7 @@ 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 # Version 20 (NumPy 2.3.0) 0x00000014 = e56b74d32a934d085e7c3414cb9999b8, -# Version 21 (NumPy 2.4.0) Add 'same_value' casting, header additions -0x00000015 = e56b74d32a934d085e7c3414cb9999b8, +# Version 21 (NumPy 2.4.0) +# Add 'same_value' casting, header additions. +# General loop registration for ufuncs, sort, and argsort +0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index b366dc99dfb8..ac108aa20370 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -412,7 +412,7 @@ def get_annotations(): } ufunc_types_api = { - 'PyUFunc_Type': (0,) + 'PyUFunc_Type': (0,), } ufunc_funcs_api = { @@ -468,6 +468,8 @@ def get_annotations(): 'PyUFunc_AddPromoter': (44, MinVersion("2.0")), 'PyUFunc_AddWrappingLoop': (45, MinVersion("2.0")), 'PyUFunc_GiveFloatingpointErrors': (46, MinVersion("2.0")), + # End 2.0 API + 'PyUFunc_AddLoopsFromSpecs': (47, MinVersion("2.4")), } # List of all the dicts which define the C API diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 2222ff342253..5ac964782ec0 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -145,6 +145,13 @@ typedef struct { } PyArrayMethod_Spec; +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + /* * ArrayMethod slots * ----------------- diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 463ccbffae0b..574891360219 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -777,92 +777,6 @@ promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc), } -/* - * Add new ufunc loops (this is somewhat clumsy as of writing it, but should - * get less so with the introduction of public API). - */ -static int -sfloat_init_ufuncs(void) { - PyArray_DTypeMeta *dtypes[3] = { - &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = {{0, NULL}}; - PyArrayMethod_Spec spec = { - .nin = 2, - .nout =1, - .dtypes = dtypes, - .slots = slots, - }; - spec.name = "sfloat_multiply"; - spec.casting = NPY_NO_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &multiply_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &multiply_sfloats; - PyBoundArrayMethodObject *bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - int res = sfloat_add_loop("multiply", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - - spec.name = "sfloat_add"; - spec.casting = NPY_SAME_KIND_CASTING; - - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &add_sfloats_resolve_descriptors; - slots[1].slot = NPY_METH_strided_loop; - slots[1].pfunc = &add_sfloats; - bmeth = PyArrayMethod_FromSpec_int(&spec, 0); - if (bmeth == NULL) { - return -1; - } - res = sfloat_add_loop("add", - bmeth->dtypes, (PyObject *)bmeth->method); - Py_DECREF(bmeth); - if (res < 0) { - return -1; - } - - /* N.B.: Wrapping isn't actually correct if scaling can be negative */ - if (sfloat_add_wrapping_loop("hypot", dtypes) < 0) { - return -1; - } - - /* - * Add a promoter for both directions of multiply with double. - */ - PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; - - PyArray_DTypeMeta *promoter_dtypes[3] = { - &PyArray_SFloatDType, double_DType, NULL}; - - PyObject *promoter = PyCapsule_New( - &promote_to_sfloat, "numpy._ufunc_promoter", NULL); - if (promoter == NULL) { - return -1; - } - res = sfloat_add_loop("multiply", promoter_dtypes, promoter); - if (res < 0) { - Py_DECREF(promoter); - return -1; - } - promoter_dtypes[0] = double_DType; - promoter_dtypes[1] = &PyArray_SFloatDType; - res = sfloat_add_loop("multiply", promoter_dtypes, promoter); - Py_DECREF(promoter); - if (res < 0) { - return -1; - } - - return 0; -} - - NPY_NO_EXPORT int sfloat_stable_sort_loop( PyArrayMethod_Context *context, @@ -1044,54 +958,121 @@ sfloat_argsort_resolve_descriptors( } +/* + * Add new ufunc loops (this is somewhat clumsy as of writing it, but should + * get less so with the introduction of public API). + */ static int -sfloat_init_sort(void) -{ - PyArray_DTypeMeta *dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; - PyType_Slot slots[3] = { +sfloat_init_ufuncs(void) { + PyArray_DTypeMeta *all_sfloat_dtypes[3] = { + &PyArray_SFloatDType, &PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot multiply_slots[3] = { + {NPY_METH_resolve_descriptors, &multiply_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &multiply_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec multiply_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = multiply_slots, + .name = "sfloat_multiply", + .casting = NPY_NO_CASTING, + }; + + PyType_Slot add_slots[3] = { + {NPY_METH_resolve_descriptors, &add_sfloats_resolve_descriptors}, + {NPY_METH_strided_loop, &add_sfloats}, + {0, NULL} + }; + PyArrayMethod_Spec add_spec = { + .nin = 2, + .nout = 1, + .dtypes = all_sfloat_dtypes, + .slots = add_slots, + .name = "sfloat_add", + .casting = NPY_SAME_KIND_CASTING, + }; + + PyArray_DTypeMeta *sort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_SFloatDType}; + PyType_Slot sort_slots[3] = { {NPY_METH_resolve_descriptors, &sfloat_sort_resolve_descriptors}, {NPY_METH_get_loop, &sfloat_sort_get_loop}, {0, NULL} }; - PyArrayMethod_Spec spec = { + PyArrayMethod_Spec sort_spec = { .nin = 1, .nout = 1, - .dtypes = dtypes, - .slots = slots, + .dtypes = sort_dtypes, + .slots = sort_slots, }; - spec.name = "sfloat_sort"; - spec.casting = NPY_NO_CASTING; - spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + sort_spec.name = "sfloat_sort"; + sort_spec.casting = NPY_NO_CASTING; + sort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyArray_DTypeMeta *argsort_dtypes[2] = {&PyArray_SFloatDType, &PyArray_IntpDType}; + PyType_Slot argsort_slots[3] = { + {NPY_METH_resolve_descriptors, &sfloat_argsort_resolve_descriptors}, + {NPY_METH_get_loop, &sfloat_argsort_get_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + }; + argsort_spec.name = "sfloat_argsort"; + argsort_spec.casting = NPY_NO_CASTING; + argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + PyUFunc_LoopSlot loops[] = { + {"multiply", &multiply_spec}, + {"add", &add_spec}, + {"sort", &sort_spec}, + {"argsort", &argsort_spec}, + {NULL, NULL} + }; + if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { + return -1; + } - PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(&spec, 0); - if (sort_meth == NULL) { + /* N.B.: Wrapping isn't actually correct if scaling can be negative */ + if (sfloat_add_wrapping_loop("hypot", all_sfloat_dtypes) < 0) { return -1; } - // TODO: once registration method is in place, use it instead of setting hidden slot - NPY_DT_SLOTS(&PyArray_SFloatDType)->sort_meth = sort_meth->method; - Py_INCREF(sort_meth->method); - Py_DECREF(sort_meth); - spec.name = "sfloat_argsort"; - dtypes[1] = &PyArray_IntpDType; + /* + * Add a promoter for both directions of multiply with double. + */ + int res = -1; + PyArray_DTypeMeta *double_DType = &PyArray_DoubleDType; - slots[0].slot = NPY_METH_resolve_descriptors; - slots[0].pfunc = &sfloat_argsort_resolve_descriptors; - slots[1].slot = NPY_METH_get_loop; - slots[1].pfunc = &sfloat_argsort_get_loop; + PyArray_DTypeMeta *promoter_dtypes[3] = { + &PyArray_SFloatDType, double_DType, NULL}; - // TODO: once registration method is in place, use it instead of setting hidden slot - PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(&spec, 0); - if (argsort_meth == NULL) { + PyObject *promoter = PyCapsule_New( + &promote_to_sfloat, "numpy._ufunc_promoter", NULL); + if (promoter == NULL) { + return -1; + } + res = sfloat_add_loop("multiply", promoter_dtypes, promoter); + if (res < 0) { + Py_DECREF(promoter); + return -1; + } + promoter_dtypes[0] = double_DType; + promoter_dtypes[1] = &PyArray_SFloatDType; + res = sfloat_add_loop("multiply", promoter_dtypes, promoter); + Py_DECREF(promoter); + if (res < 0) { return -1; } - NPY_DT_SLOTS(&PyArray_SFloatDType)->argsort_meth = argsort_meth->method; - Py_INCREF(argsort_meth->method); - Py_DECREF(argsort_meth); return 0; } + /* * Python entry point, exported via `umathmodule.h` and `multiarraymodule.c`. * TODO: Should be moved when the necessary API is not internal anymore. @@ -1128,10 +1109,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - if (sfloat_init_sort() < 0) { - return NULL; - } - npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index ba98a9b5c5d1..c40ddd3b9f39 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -47,6 +47,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" +#include "npy_import.h" #include "common.h" #include "npy_pycompat.h" @@ -193,6 +194,83 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) } +/*UFUNC_API + * Add multiple loops to ufuncs from ArrayMethod specs. This also + * handles the registration of sort and argsort methods for dtypes + * from ArrayMethod specs. + */ +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) +{ + static PyObject *sort = NULL; + static PyObject *argsort = NULL; + if (sort == NULL) { + sort = npy_import("numpy", "sort"); + if (sort == NULL) { + return -1; + } + } + if (argsort == NULL) { + argsort = npy_import("numpy", "argsort"); + if (argsort == NULL) { + return -1; + } + } + + PyUFunc_LoopSlot *slot; + for (slot = slots; slot->name != NULL; slot++) { + PyObject *ufunc = npy_import("numpy", slot->name); + if (ufunc == NULL) { + return -1; + } + + if (ufunc == sort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (sort_meth == NULL) { + PyErr_Format(PyExc_TypeError, "Failed to create sort method for %R", dtype); + return -1; + } + + NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; + Py_INCREF(sort_meth->method); + Py_DECREF(sort_meth); + } + else if (ufunc == argsort) { + Py_DECREF(ufunc); + + PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; + PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); + if (argsort_meth == NULL) { + PyErr_Format(PyExc_TypeError, "Failed to create argsort method for %R", dtype); + return -1; + } + + NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; + Py_INCREF(argsort_meth->method); + Py_DECREF(argsort_meth); + } + else { + if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { + PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); + Py_DECREF(ufunc); + return -1; + } + + int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); + Py_DECREF(ufunc); + if (ret < 0) { + return -1; + } + } + } + + return 0; +} + + /** * Resolves the implementation to use, this uses typical multiple dispatching * methods of finding the best matching implementation or resolver. diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 95bcb32bf0ce..7ca8bd7a1598 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -16,6 +16,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate); NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv); +NPY_NO_EXPORT int +PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots); + NPY_NO_EXPORT PyArrayMethodObject * promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], From 7bc78c2a3f98a357d8a5859066833f1fe8ec00ce Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 12 Oct 2025 10:56:40 +0200 Subject: [PATCH 0583/1718] Go back to cached import for sort/argsort --- numpy/_core/src/common/npy_import.h | 10 ++++++++++ numpy/_core/src/umath/dispatching.cpp | 22 ++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 87944989e95d..0ec70a0f5142 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -6,6 +6,10 @@ #include "numpy/npy_common.h" #include "npy_atomic.h" +#ifdef __cplusplus +extern "C" { +#endif + /* * Cached references to objects obtained via an import. All of these are * can be initialized at any time by npy_cache_import_runtime. @@ -45,6 +49,8 @@ typedef struct npy_runtime_imports_struct { PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; + PyObject *sort; + PyObject *argsort; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; @@ -112,4 +118,8 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { NPY_NO_EXPORT int init_import_mutex(void); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index c40ddd3b9f39..7d8935d4bf40 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -202,19 +202,13 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) { - static PyObject *sort = NULL; - static PyObject *argsort = NULL; - if (sort == NULL) { - sort = npy_import("numpy", "sort"); - if (sort == NULL) { - return -1; - } + if (npy_cache_import_runtime( + "numpy", "sort", &npy_runtime_imports.sort) < 0) { + return -1; } - if (argsort == NULL) { - argsort = npy_import("numpy", "argsort"); - if (argsort == NULL) { - return -1; - } + if (npy_cache_import_runtime( + "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { + return -1; } PyUFunc_LoopSlot *slot; @@ -224,7 +218,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) return -1; } - if (ufunc == sort) { + if (ufunc == npy_runtime_imports.sort) { Py_DECREF(ufunc); PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; @@ -238,7 +232,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) Py_INCREF(sort_meth->method); Py_DECREF(sort_meth); } - else if (ufunc == argsort) { + else if (ufunc == npy_runtime_imports.argsort) { Py_DECREF(ufunc); PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; From b5ee0f0427c006ccbf139626308fd47232bb4000 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Sun, 12 Oct 2025 08:20:13 -0400 Subject: [PATCH 0584/1718] DOC: Add release notes for #29900 --- doc/release/upcoming_changes/29900.c_api.rst | 5 +++++ doc/release/upcoming_changes/29900.new_feature.rst | 9 +++++++++ 2 files changed, 14 insertions(+) create mode 100644 doc/release/upcoming_changes/29900.c_api.rst create mode 100644 doc/release/upcoming_changes/29900.new_feature.rst diff --git a/doc/release/upcoming_changes/29900.c_api.rst b/doc/release/upcoming_changes/29900.c_api.rst new file mode 100644 index 000000000000..b29014ac95fc --- /dev/null +++ b/doc/release/upcoming_changes/29900.c_api.rst @@ -0,0 +1,5 @@ +A new `PyUFunc_AddLoopsFromSpecs` convenience function has been added to the C API. +----------------------------------------------------------------------------------- +This function allows adding multiple ufunc loops from their specs in one call using +a NULL-terminated array of `PyUFunc_LoopSlot` structs. It allows registering +sorting and argsorting loops using the new ArrayMethod API. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29900.new_feature.rst b/doc/release/upcoming_changes/29900.new_feature.rst new file mode 100644 index 000000000000..1799b6043e29 --- /dev/null +++ b/doc/release/upcoming_changes/29900.new_feature.rst @@ -0,0 +1,9 @@ +DType sorting and argsorting supports the ArrayMethod API +--------------------------------------------------------- +User-defined dtypes can now implement custom sorting and argsorting using +the ArrayMethod API. This mechanism can be used in place of the `PyArray_ArrFuncs` +slots which may be deprecated in the future. + +The sorting and argsorting methods are registered by passing the arraymethod +specs that implement the operations to the new `PyUFunc_AddLoopsFromSpecs` function. +See the ArrayMethod API documentation for details. \ No newline at end of file From ba50a255e881fcf4b2794e696c29a866d0ec6b72 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 12 Oct 2025 10:56:26 -0400 Subject: [PATCH 0585/1718] MAINT: Just pass on errors instead of raising. --- numpy/_core/src/umath/dispatching.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 7d8935d4bf40..cb40793c4e7f 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -224,7 +224,6 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); if (sort_meth == NULL) { - PyErr_Format(PyExc_TypeError, "Failed to create sort method for %R", dtype); return -1; } @@ -238,7 +237,6 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); if (argsort_meth == NULL) { - PyErr_Format(PyExc_TypeError, "Failed to create argsort method for %R", dtype); return -1; } From a2519b560f14b5f86030fb63b89a97cef591f302 Mon Sep 17 00:00:00 2001 From: Swayam Date: Mon, 13 Oct 2025 17:09:58 +0530 Subject: [PATCH 0586/1718] DOC: Documentation related finfo refactors and new slot addition in #29836 (#29889) This is a follow-up PR of work happened in gh-29836 * The deprecated MachAr runtime discovery mechanism has been removed. * np.finfo fetches the constants provided by the compiler macros * new slot to fetch the dtype related constants --- doc/release/upcoming_changes/29836.c_api.rst | 15 +++ .../upcoming_changes/29836.improvement.rst | 26 ++++ doc/source/reference/c-api/array.rst | 115 ++++++++++++++++++ doc/source/reference/c-api/dtype.rst | 1 + numpy/_core/getlimits.py | 13 +- 5 files changed, 168 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/29836.c_api.rst create mode 100644 doc/release/upcoming_changes/29836.improvement.rst diff --git a/doc/release/upcoming_changes/29836.c_api.rst b/doc/release/upcoming_changes/29836.c_api.rst new file mode 100644 index 000000000000..9ac5478c742a --- /dev/null +++ b/doc/release/upcoming_changes/29836.c_api.rst @@ -0,0 +1,15 @@ +New ``NPY_DT_get_constant`` slot for DType constant retrieval +------------------------------------------------------------- +A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing +dtype implementations to provide constant values such as machine limits and +special values. The slot function has the signature:: + + int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) + +It returns 1 on success, 0 if the constant is not available, or -1 on error. +The function is always called with the GIL held and may write to unaligned memory. + +Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, +while floating-point constants return values of the dtype's native type. + +Implementing this can be used by user DTypes to provide `numpy.finfo` values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29836.improvement.rst b/doc/release/upcoming_changes/29836.improvement.rst new file mode 100644 index 000000000000..0d7df429d125 --- /dev/null +++ b/doc/release/upcoming_changes/29836.improvement.rst @@ -0,0 +1,26 @@ +``numpy.finfo`` Refactor +------------------------ +The ``numpy.finfo`` class has been completely refactored to obtain floating-point +constants directly from C compiler macros rather than deriving them at runtime. +This provides better accuracy, platform compatibility and corrected +several attribute calculations: + +* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and + ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, + ``DBL_MIN``, etc.), ensuring platform-correct values. + +* The deprecated ``MachAr`` runtime discovery mechanism has been removed. + +* Derived attributes have been corrected to match standard definitions: + ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for + all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` + follows the C standard definition. + +* longdouble constants, Specifically ``smallest_normal`` now follows the + C standard definitions as per respecitive platform. + +* Special handling added for PowerPC's IBM double-double format. + +* New test suite added in ``test_finfo.py`` to validate all + ``finfo`` properties against expected machine arithmetic values for + float16, float32, and float64 types. \ No newline at end of file diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 605516097274..00a0a399d601 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -3594,6 +3594,121 @@ member of ``PyArrayDTypeMeta_Spec`` struct. force newly created arrays to have a newly created descriptor instance, no matter what input descriptor is provided by a user. +.. c:macro:: NPY_DT_get_constant + +.. c:type:: int (PyArrayDTypeMeta_GetConstant)( \ + PyArray_Descr *descr, int constant_id, void *out) + + If defined, allows the DType to expose constant values such as machine + limits, special values (infinity, NaN), and floating-point characteristics. + The *descr* is the descriptor instance, *constant_id* is one of the + ``NPY_CONSTANT_*`` macros, and *out* is a pointer to uninitialized memory + where the constant value should be written. The memory pointed to by *out* + may be unaligned and is uninitialized. + Returns 1 on success, 0 if the constant is not available, + or -1 with an error set. + + **Constant IDs**: + + The following constant IDs are defined for retrieving dtype-specific values: + + **Basic constants** (available for all numeric types): + + .. c:macro:: NPY_CONSTANT_zero + + The zero value for the dtype. + + .. c:macro:: NPY_CONSTANT_one + + The one value for the dtype. + + .. c:macro:: NPY_CONSTANT_minimum_finite + + The minimum finite value representable by the dtype. For floating-point types, + this is the most negative finite value (e.g., ``-FLT_MAX``). + + .. c:macro:: NPY_CONSTANT_maximum_finite + + The maximum finite value representable by the dtype. + + **Floating-point special values**: + + .. c:macro:: NPY_CONSTANT_inf + + Positive infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_ninf + + Negative infinity (only for floating-point types). + + .. c:macro:: NPY_CONSTANT_nan + + Not-a-Number (only for floating-point types). + + **Floating-point characteristics** (values of the dtype's native type): + + .. c:macro:: NPY_CONSTANT_finfo_radix + + The radix (base) of the floating-point representation. This is 2 for all + floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_eps + + Machine epsilon: the difference between 1.0 and the next representable value + greater than 1.0. Corresponds to C macros like ``FLT_EPSILON``, ``DBL_EPSILON``. + + .. note:: + For long double in IBM double-double format (PowerPC), this is defined as + ``0x1p-105L`` (2^-105) based on the ~106 bits of mantissa precision. + + .. c:macro:: NPY_CONSTANT_finfo_epsneg + + The difference between 1.0 and the next representable value less than 1.0. + Typically ``eps / radix`` for binary floating-point types. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_normal + + The smallest positive normalized floating-point number. Corresponds to C + macros like ``FLT_MIN``, ``DBL_MIN``. This is the smallest value with a + leading 1 bit in the mantissa. + + .. c:macro:: NPY_CONSTANT_finfo_smallest_subnormal + + The smallest positive subnormal (denormalized) floating-point number. + Corresponds to C macros like ``FLT_TRUE_MIN``, ``DBL_TRUE_MIN``. This is + the smallest representable positive value, with leading 0 bits in the mantissa. + + **Floating-point characteristics** (integer values, type ``npy_intp``): + + These constants return integer metadata about the floating-point representation. + They are marked with the ``1 << 16`` bit to indicate they return ``npy_intp`` + values rather than the dtype's native type. + + .. c:macro:: NPY_CONSTANT_finfo_nmant + + Number of mantissa bits (excluding the implicit leading bit). For example, + IEEE 754 binary64 (double) has 52 explicit mantissa bits, so this returns 52. + Corresponds to ``MANT_DIG - 1`` from C standard macros. + + .. c:macro:: NPY_CONSTANT_finfo_min_exp + + Minimum exponent value. This is the minimum negative integer such that the + radix raised to the power of one less than that integer is a normalized + floating-point number. Corresponds to ``MIN_EXP - 1`` from C standard macros + (e.g., ``FLT_MIN_EXP - 1``). + + .. c:macro:: NPY_CONSTANT_finfo_max_exp + + Maximum exponent value. This is the maximum positive integer such that the + radix raised to the power of one less than that integer is a representable + finite floating-point number. Corresponds to ``MAX_EXP`` from C standard + macros (e.g., ``FLT_MAX_EXP``). + + .. c:macro:: NPY_CONSTANT_finfo_decimal_digits + + The number of decimal digits of precision. Corresponds to ``DIG`` from C + standard macros (e.g., ``FLT_DIG``, ``DBL_DIG``). + PyArray_ArrFuncs slots ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 43869d5b4c55..f6b2289ba18a 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -502,3 +502,4 @@ format specifier in printf and related commands. .. c:macro:: NPY_UINTP_FMT .. c:macro:: NPY_LONGDOUBLE_FMT + diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index b8b6f69cd5b7..3c03d81165fb 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -89,17 +89,20 @@ class finfo: The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. negep : int The exponent that yields `epsneg`. nexp : int The number of bits in the exponent including its sign and bias. nmant : int - The number of bits in the mantissa. + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). precision : int The approximate number of decimal digits to which this kind of float is precise. @@ -140,6 +143,12 @@ class finfo: fill the gap between 0 and ``smallest_normal``. However, subnormal numbers may have significantly reduced precision [2]_. + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + This function can also be used for complex data types as well. If used, the output will be the same as the corresponding real float type (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). From a410b2c9aa2f930e8e852c9b1f1d4a9eed37968e Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 13 Oct 2025 17:09:29 +0200 Subject: [PATCH 0587/1718] CI: Run mypy on Python 3.14 --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 8045d6bb3969..04db23a22ae3 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,7 +46,7 @@ jobs: fail-fast: false matrix: os_python: - - [macos-latest, '3.13'] + - [macos-latest, '3.14'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: From 2be60cce10aff73552a8c4b51a9b452f4d1d007f Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 13 Oct 2025 17:11:41 +0200 Subject: [PATCH 0588/1718] CI: Additional mypy workflow ignore paths --- .github/workflows/mypy.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 04db23a22ae3..22130793f106 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -18,11 +18,14 @@ on: - main - maintenance/** paths-ignore: - - 'benchmarks/' - '.circlecl/' + - '.devcontainer/' + - 'benchmarks/' + - 'branding/' - 'docs/' - 'meson_cpu/' - 'tools/' + - 'vendored-meson/' workflow_dispatch: defaults: From 1718e1c2a20d09412e816857dae8316f04d4f108 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:08 +0000 Subject: [PATCH 0589/1718] MAINT: Bump astral-sh/setup-uv from 7.0.0 to 7.1.0 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.0.0 to 7.1.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/eb1897b8dc4b5d5bfe39a428a8f2304605e0983c...3259c6206f993105e3a61b142c2d97bf4b9ef83d) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 22130793f106..b64cd8becd8c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@eb1897b8dc4b5d5bfe39a428a8f2304605e0983c # v7.0.0 + - uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From a52661748694ef018845aefac689acbe71bdee75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:14 +0000 Subject: [PATCH 0590/1718] MAINT: Bump pypa/cibuildwheel from 3.2.0 to 3.2.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.2.0 to 3.2.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/7c619efba910c04005a835b110b057fc28fd6e93...9c00cb4f6b517705a3794b22395aedc36257242c) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.2.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5af18aa14107..e152870efd7d 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 + - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 5294c34cbb63..e53e4bbefc57 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -94,7 +94,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@7c619efba910c04005a835b110b057fc28fd6e93 # v3.2.0 + uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 9ebaa26789905480e1220705fb3c3e4e880bc860 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:17 +0000 Subject: [PATCH 0591/1718] MAINT: Bump int128/hide-comment-action from 1.44.0 to 1.46.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.44.0 to 1.46.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/9803637eab610cca14ac6f64c42c0d7ffe9327e0...bddc4e774ea6f0b45b9621c3a689db72b9a3cec5) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.46.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 266d7a8f2074..95e487cd60b4 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@9803637eab610cca14ac6f64c42c0d7ffe9327e0 # v1.44.0 + uses: int128/hide-comment-action@bddc4e774ea6f0b45b9621c3a689db72b9a3cec5 # v1.46.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 27bcb23d80ede3d6d4f4cc4b9d8a3d5dec3e600c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 17:27:21 +0000 Subject: [PATCH 0592/1718] MAINT: Bump actions/dependency-review-action from 4.8.0 to 4.8.1 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.8.0 to 4.8.1. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/56339e523c0409420f6c2c9a2f4292bbb3c07dd3...40c09b7dc99638e5ddb0bfd91c1673effc064d8a) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index a592a969eb94..797918b2ceff 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@56339e523c0409420f6c2c9a2f4292bbb3c07dd3 # v4.8.0 + uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From b78f89101f0878dd0af18ccab17e5a22b0d2fb4e Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Mon, 13 Oct 2025 14:42:44 -0400 Subject: [PATCH 0593/1718] ENH: In spec registration, allow looking up ufuncs in any module. (#29931) This allows the addition of loops from specs also from more complicated names such as numpy:strings.str_len. --- doc/source/reference/c-api/array.rst | 5 ++- numpy/_core/meson.build | 1 + numpy/_core/src/common/npy_import.c | 41 +++++++++++++++++++ numpy/_core/src/common/npy_import.h | 11 ++++- .../src/multiarray/_multiarray_tests.c.src | 13 ++++++ numpy/_core/src/umath/_scaled_float_dtype.c | 9 ++-- numpy/_core/src/umath/dispatching.cpp | 2 +- numpy/_core/tests/test_multiarray.py | 32 +++++++++++++++ 8 files changed, 107 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 00a0a399d601..7a1bace2c63e 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1895,7 +1895,10 @@ with the rest of the ArrayMethod API. .. c:member:: const char *name - The name of the ufunc to add the loop to. + The name of the ufunc to add the loop to, in the form like that of + entry points, ``(module ':')? (object '.')* name``, with ``numpy`` + the default module. Examples: ``sin``, ``strings.str_len``, + ``numpy.strings:str_len``. .. c:member:: PyArrayMethod_Spec *spec diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 35a792c29909..055d93e942f6 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -759,6 +759,7 @@ py.extension_module('_multiarray_tests', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.cpp', + 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index cff071e9b522..a0308ff3e4c7 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -19,3 +19,44 @@ init_import_mutex(void) { #endif return 0; } + + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point) { + PyObject *result; + const char *item; + + const char *colon = strchr(entry_point, ':'); + if (colon) { // there is a module. + result = PyUnicode_FromStringAndSize(entry_point, colon - entry_point); + if (result != NULL) { + Py_SETREF(result, PyImport_Import(result)); + } + item = colon + 1; + } + else { + result = PyImport_ImportModule("numpy"); + item = entry_point; + } + + const char *dot = item - 1; + while (result != NULL && dot != NULL) { + item = dot + 1; + dot = strchr(item, '.'); + PyObject *string = PyUnicode_FromStringAndSize( + item, dot ? dot - item : strlen(item)); + if (string == NULL) { + Py_DECREF(result); + return NULL; + } + Py_SETREF(result, PyObject_GetAttr(result, string)); + Py_DECREF(string); + } + return result; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 0ec70a0f5142..d7fb8d20857d 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -112,12 +112,21 @@ npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { #endif Py_DECREF(value); } - return 0; + return 0; } NPY_NO_EXPORT int init_import_mutex(void); +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 8b0c4b3f85d1..b79908e1d5e4 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -7,6 +7,7 @@ #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" #include "numpy/halffloat.h" +#include "npy_import.h" #include "common.h" #include "npy_argparse.h" #include "mem_overlap.h" @@ -2238,6 +2239,15 @@ run_scalar_intp_from_sequence(PyObject *NPY_UNUSED(self), PyObject *obj) return PyArray_IntTupleFromIntp(1, vals); } +static PyObject * +_npy_import_entry_point(PyObject *NPY_UNUSED(self), PyObject *obj) { + PyObject *res = PyUnicode_AsASCIIString(obj); + if (res != NULL) { + Py_SETREF(res, npy_import_entry_point(PyBytes_AS_STRING(res))); + } + return res; +} + static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, @@ -2422,6 +2432,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"run_intp_converter", run_intp_converter, METH_VARARGS, NULL}, + {"npy_import_entry_point", + _npy_import_entry_point, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* Sentinel */ }; diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 574891360219..da842cd8c55d 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -947,7 +947,7 @@ sfloat_argsort_resolve_descriptors( { assert(given_descrs[1] == NULL || given_descrs[1]->type_num == NPY_INTP); assert(PyArray_IsNativeByteOrder(given_descrs[0]->byteorder)); - + loop_descrs[0] = given_descrs[0]; Py_INCREF(loop_descrs[0]); loop_descrs[1] = PyArray_DescrFromType(NPY_INTP); @@ -1026,11 +1026,12 @@ sfloat_init_ufuncs(void) { argsort_spec.casting = NPY_NO_CASTING; argsort_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + /* here we chose weirdish names to test the lookup mechanism */ PyUFunc_LoopSlot loops[] = { {"multiply", &multiply_spec}, - {"add", &add_spec}, - {"sort", &sort_spec}, - {"argsort", &argsort_spec}, + {"_core._multiarray_umath.add", &add_spec}, + {"numpy:sort", &sort_spec}, + {"numpy._core.fromnumeric:argsort", &argsort_spec}, {NULL, NULL} }; if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index cb40793c4e7f..3ce6624bbf4a 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -213,7 +213,7 @@ PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) PyUFunc_LoopSlot *slot; for (slot = slots; slot->name != NULL; slot++) { - PyObject *ufunc = npy_import("numpy", slot->name); + PyObject *ufunc = npy_import_entry_point(slot->name); if (ufunc == NULL) { return -1; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index bf339c07f068..a2ad97c79de8 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3,6 +3,7 @@ import ctypes import functools import gc +import importlib import io import itertools import mmap @@ -4475,6 +4476,37 @@ def test_intp_sequence_converters_errors(self, converter): # These converters currently convert overflows to a ValueError converter(2**64) + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + class TestSubscripting: def test_test_zero_rank(self): From 01ba640336521ed28093b62b7d1e830a3a71ad8e Mon Sep 17 00:00:00 2001 From: Alverok <143798318+Alverok@users.noreply.github.com> Date: Tue, 14 Oct 2025 03:02:19 +0530 Subject: [PATCH 0594/1718] DOC: add note on meson buildtype for debug builds [skip azp][skip actions][skip cirrus] (#29883) --- .../dev/development_advanced_debugging.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst index 07c80314da1b..594f88dcba17 100644 --- a/doc/source/dev/development_advanced_debugging.rst +++ b/doc/source/dev/development_advanced_debugging.rst @@ -244,6 +244,25 @@ The ``spin`` `development workflow tool `_. has built-in support for working with both ``gdb`` and ``lldb`` via the ``spin gdb`` and ``spin lldb`` commands. +.. note:: + + Building with ``-Dbuildtype=debug`` has a couple of important effects to + be aware of: + + * **Assertions are enabled**: This build type does not define the ``NDEBUG`` + macro, which means that any C-level assertions in the code will be + active. This is very useful for debugging, as it can help pinpoint + where an unexpected condition occurs. + + * **Compiler flags may need overriding**: Some compiler toolchains, + particularly those from ``conda-forge``, may set optimization flags + like ``-O2`` by default. These can override the ``debug`` build type. + To ensure a true debug build in such environments, you may need to + manually unset or override this flag. + + For more details on both points, see the `meson-python guide on + debug builds `_. + For both debuggers, it's advisable to build NumPy in either the ``debug`` or ``debugoptimized`` meson build profile. To use ``debug`` you can pass the option via ``spin build``: From d470611b6b112da659fa843af8a68d05525dddd1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 14 Oct 2025 10:35:01 +0200 Subject: [PATCH 0595/1718] MAINT: Simplify string arena growth strategy (#29885) Not sure if there is any downsides to this, but I think this is much simpler and pretty much identical. Plus, I don't quite like that the 1.25 factor could be large, although I admit it seems very unlikely (maybe impossible) to actually trigger a problem there. Signed-off-by: Sebastian Berg --- .../multiarray/stringdtype/static_string.c | 28 ++++++------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index f5c2025e183a..c437fab2d336 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -155,8 +155,6 @@ vstring_buffer(npy_string_arena *arena, _npy_static_string_u *string) return (char *)((size_t)arena->buffer + string->vstring.offset); } -#define ARENA_EXPAND_FACTOR 1.25 - static char * arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) { @@ -168,25 +166,17 @@ arena_malloc(npy_string_arena *arena, npy_string_realloc_func r, size_t size) else { string_storage_size = size + sizeof(size_t); } - if ((arena->size - arena->cursor) <= string_storage_size) { - // realloc the buffer so there is enough room - // first guess is to double the size of the buffer - size_t newsize; - if (arena->size == 0) { - newsize = string_storage_size; - } - else if (((ARENA_EXPAND_FACTOR * arena->size) - arena->cursor) > - string_storage_size) { - newsize = (size_t)(ARENA_EXPAND_FACTOR * arena->size); + if ((arena->size - arena->cursor) < string_storage_size) { + size_t minsize = arena->cursor + string_storage_size; + if (minsize < arena->cursor) { + return NULL; // overflow means out of memory } - else { - newsize = arena->size + string_storage_size; - } - // If there enough room for both the payload and its header - if ((arena->cursor + string_storage_size) > newsize) { - // need extra room beyond the expansion factor, leave some padding - newsize = (size_t)(ARENA_EXPAND_FACTOR * (arena->cursor + string_storage_size)); + // Allocate 25% more than needed for this string. + size_t newsize = minsize + minsize / 4; + if (newsize < minsize) { + return NULL; // overflow means out of memory } + // passing a NULL buffer to realloc is the same as malloc char *newbuf = r(arena->buffer, newsize); if (newbuf == NULL) { From 3ef745ffa270bde3a0a7d68073bf802aceeff1be Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 14:41:57 +0200 Subject: [PATCH 0596/1718] STY: rename ``@classmethod`` arg to ``cls`` --- numpy/_core/tests/test_stringdtype.py | 32 +++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 7a253c981c4d..492894087aa9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1672,17 +1672,17 @@ class TestImplementation: """ @classmethod - def setup_class(self): - self.MISSING = 0x80 - self.INITIALIZED = 0x40 - self.OUTSIDE_ARENA = 0x20 - self.LONG = 0x10 - self.dtype = StringDType(na_object=np.nan) - self.sizeofstr = self.dtype.itemsize - sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t) + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) # Below, size is not strictly correct, since it really uses # 7 (or 3) bytes, but good enough for the tests here. - self.view_dtype = np.dtype([ + cls.view_dtype = np.dtype([ ('offset', f'u{sp}'), ('size', f'u{sp // 2}'), ('xsiz', f'V{sp // 2 - 1}'), @@ -1693,13 +1693,13 @@ def setup_class(self): ('size', f'u{sp // 2}'), ('offset', f'u{sp}'), ]) - self.s_empty = "" - self.s_short = "01234" - self.s_medium = "abcdefghijklmnopqrstuvwxyz" - self.s_long = "-=+" * 100 - self.a = np.array( - [self.s_empty, self.s_short, self.s_medium, self.s_long], - self.dtype) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) def get_view(self, a): # Cannot view a StringDType as anything else directly, since From c0d939e7c4977a3c3bac2eb0b86d297f38b3d3b2 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:18:32 -0400 Subject: [PATCH 0597/1718] TST: Make temporary file usage thread safe (#29858) * TST: Make temporary file usage thread safe * TST: Refactor TestIO to still use fixture * STY: Remove extra changes? * TST: Fix pytest-run-parallel import issue * TST: Refactor most of fixture into method * TST: Remove unneeded changes to conftest * STY: Remove whitespace changes * TST: Rename and move tmp_filename function --- numpy/_core/tests/test_multiarray.py | 137 +++++++++------ numpy/lib/tests/test__datasource.py | 246 ++++++++++++--------------- 2 files changed, 198 insertions(+), 185 deletions(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index a2ad97c79de8..12ea10e5b568 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5613,6 +5613,16 @@ def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42 * 3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) + +def normalize_filename(tmp_path, param): + # Handles two cases, where filename should + # be a string, or a path object. + path = tmp_path / "file" + if param == "string": + return str(path) + return path + + class TestIO: """Test tofile, fromfile, tobytes, and fromstring""" @@ -5624,14 +5634,11 @@ def _create_data(self): return x @pytest.fixture(params=["string", "path_obj"]) - def tmp_filename(self, tmp_path, request): - # This fixture covers two cases: - # one where the filename is a string and - # another where it is a pathlib object - filename = tmp_path / "file" - if request.param == "string": - filename = str(filename) - yield filename + def param_filename(self, request): + # This fixtures returns string or path_obj + # so that every test doesn't need to have the + # paramterize marker. + return request.param def test_nofile(self): # this should probably be supported as a file @@ -5662,19 +5669,22 @@ def test_fromstring_count0(self): d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0) assert d.shape == (0,) - def test_empty_files_text(self, tmp_filename): + def test_empty_files_text(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: pass y = np.fromfile(tmp_filename) assert_(y.size == 0, "Array not empty") - def test_empty_files_binary(self, tmp_filename): + def test_empty_files_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'wb') as f: pass y = np.fromfile(tmp_filename, sep=" ") assert_(y.size == 0, "Array not empty") - def test_roundtrip_file(self, tmp_filename): + def test_roundtrip_file(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5683,13 +5693,15 @@ def test_roundtrip_file(self, tmp_filename): y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip(self, tmp_filename): + def test_roundtrip(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) y = np.fromfile(tmp_filename, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_roundtrip_dump_pathlib(self, tmp_filename): + def test_roundtrip_dump_pathlib(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() p = pathlib.Path(tmp_filename) x.dump(p) @@ -5722,8 +5734,9 @@ def test_roundtrip_repr(self): y = np.fromstring(s, sep="@") assert_array_equal(x, y) - def test_unseekable_fromfile(self, tmp_filename): + def test_unseekable_fromfile(self, tmp_path, param_filename): # gh-6246 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) @@ -5735,16 +5748,18 @@ def fail(*args, **kwargs): f.tell = fail assert_raises(OSError, np.fromfile, f, dtype=x.dtype) - def test_io_open_unbuffered_fromfile(self, tmp_filename): + def test_io_open_unbuffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_largish_file(self, tmp_filename): + def test_largish_file(self, tmp_path, param_filename): # check the fallocate path on files > 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) d = np.zeros(4 * 1024 ** 2) d.tofile(tmp_filename) assert_equal(os.path.getsize(tmp_filename), d.nbytes) @@ -5763,19 +5778,21 @@ def test_largish_file(self, tmp_filename): d.tofile(f) assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) - def test_io_open_buffered_fromfile(self, tmp_filename): + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() x.tofile(tmp_filename) with open(tmp_filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=x.dtype) assert_array_equal(y, x.flat) - def test_file_position_after_fromfile(self, tmp_filename): + def test_file_position_after_fromfile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: with open(tmp_filename, 'wb') as f: @@ -5791,11 +5808,12 @@ def test_file_position_after_fromfile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_file_position_after_tofile(self, tmp_filename): + def test_file_position_after_tofile(self, tmp_path, param_filename): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE // 8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: err_msg = "%d" % (size,) @@ -5816,8 +5834,9 @@ def test_file_position_after_tofile(self, tmp_filename): pos = f.tell() assert_equal(pos, 10, err_msg=err_msg) - def test_load_object_array_fromfile(self, tmp_filename): + def test_load_object_array_fromfile(self, tmp_path, param_filename): # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) with open(tmp_filename, 'w') as f: # Ensure we have a file with consistent contents pass @@ -5829,7 +5848,8 @@ def test_load_object_array_fromfile(self, tmp_filename): assert_raises_regex(ValueError, "Cannot read into object array", np.fromfile, tmp_filename, dtype=object) - def test_fromfile_offset(self, tmp_filename): + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() with open(tmp_filename, 'wb') as f: x.tofile(f) @@ -5865,23 +5885,21 @@ def test_fromfile_offset(self, tmp_filename): sep=",", offset=1) @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") - def test_fromfile_bad_dup(self, tmp_filename): + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' def dup_bigint(fd): return 2**68 + tmp_filename = normalize_filename(tmp_path, param_filename) x = self._create_data() - old_dup = os.dup - try: - with open(tmp_filename, 'wb') as f: - x.tofile(f) - for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): - os.dup = dup - assert_raises(exc, np.fromfile, f) - finally: - os.dup = old_dup + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) def _check_from(self, s, value, filename, **kw): if 'sep' not in kw: @@ -5923,38 +5941,44 @@ def test_decimal_comma_separator(): else: assert False, request.param - def test_nan(self, tmp_filename, decimal_sep_localization): + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], tmp_filename, sep=' ') - def test_inf(self, tmp_filename, decimal_sep_localization): + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], tmp_filename, sep=' ') - def test_numbers(self, tmp_filename, decimal_sep_localization): + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], tmp_filename, sep=' ') - def test_binary(self, tmp_filename): + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) self._check_from( b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), tmp_filename, dtype=' Date: Tue, 14 Oct 2025 19:52:07 +0200 Subject: [PATCH 0598/1718] TYP: remove non-existent ``version.__all__`` stub --- numpy/version.pyi | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/numpy/version.pyi b/numpy/version.pyi index b3284d7608b0..073885c017c2 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,18 +1,9 @@ from typing import Final, LiteralString -__all__ = ( - "__version__", - "full_version", - "git_revision", - "release", - "short_version", - "version", -) +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... -version: Final[LiteralString] -__version__: Final[LiteralString] -full_version: Final[LiteralString] - -git_revision: Final[LiteralString] -release: Final[bool] -short_version: Final[LiteralString] +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... From 71c1c627882a4fc650aabb745e0426fd7cdbe61d Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:04:50 +0200 Subject: [PATCH 0599/1718] TYP: update ``__all__`` in ``testing`` --- numpy/testing/__init__.pyi | 4 ++++ numpy/testing/_private/utils.pyi | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index ba3c9a2b7a44..6dc98930f6fd 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,8 +2,10 @@ from unittest import TestCase from . import overrides from ._private.utils import ( + BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, + IS_64BIT, IS_EDITABLE, IS_INSTALLED, IS_MUSL, @@ -51,8 +53,10 @@ from ._private.utils import ( ) __all__ = [ + "BLAS_SUPPORTS_FPE", "HAS_LAPACK64", "HAS_REFCOUNT", + "IS_64BIT", "IS_EDITABLE", "IS_INSTALLED", "IS_MUSL", diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 1b298132c8ef..670424f6a599 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -44,9 +44,13 @@ __all__ = [ # noqa: RUF022 "IS_PYPY", "IS_PYSTON", "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", "HAS_LAPACK64", "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", "NOGIL_BUILD", + "NUMPY_ROOT", "assert_", "assert_array_almost_equal_nulp", "assert_raises_regex", @@ -130,8 +134,10 @@ IS_MUSL: Final[bool] = ... IS_PYPY: Final[bool] = ... IS_PYSTON: Final[bool] = ... IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... HAS_REFCOUNT: Final[bool] = ... HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... NOGIL_BUILD: Final[bool] = ... class KnownFailureException(Exception): ... From 88316b73e09e07b85fb22fbe80bf9dfc9b9d202c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:08:29 +0200 Subject: [PATCH 0600/1718] TYP: implicit re-exports in ``testing`` --- numpy/testing/__init__.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 6dc98930f6fd..684a7c36adec 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,6 +1,7 @@ from unittest import TestCase -from . import overrides +from . import _private as _private, overrides +from ._private import extbuild as extbuild from ._private.utils import ( BLAS_SUPPORTS_FPE, HAS_LAPACK64, From d28ba9b8e3ffb12e74ef8931ce8f7da480596df8 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 14:14:50 -0400 Subject: [PATCH 0601/1718] TST: Convert mixed_types_structured to method (#29942) --- numpy/lib/tests/test_loadtxt.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index a2022a0d5175..7a4ed17e7f07 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -40,10 +40,9 @@ def test_comment_multiple_chars(comment): assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) -@pytest.fixture def mixed_types_structured(): """ - Fixture providing heterogeneous input data with a structured dtype, along + Function providing heterogeneous input data with a structured dtype, along with the associated structured array. """ data = StringIO( @@ -74,15 +73,14 @@ def mixed_types_structured(): @pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) -def test_structured_dtype_and_skiprows_no_empty_lines( - skiprows, mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_structured_dtype_and_skiprows_no_empty_lines(skiprows): + data, dtype, expected = mixed_types_structured() a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) assert_array_equal(a, expected[skiprows:]) -def test_unpack_structured(mixed_types_structured): - data, dtype, expected = mixed_types_structured +def test_unpack_structured(): + data, dtype, expected = mixed_types_structured() a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) assert_array_equal(a, expected["f0"]) From e25b4829c2eede2fe9e779c7abf33ac74c7f7b00 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:14:56 +0200 Subject: [PATCH 0602/1718] TYP: add missing ``__all__`` stubs in submodules of ``random`` --- numpy/random/_mt19937.pyi | 2 ++ numpy/random/_pcg64.pyi | 2 ++ numpy/random/_philox.pyi | 2 ++ numpy/random/_sfc64.pyi | 2 ++ numpy/random/mtrand.pyi | 56 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 99e10677c3a2..c8ea2dccda02 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["MT19937"] + @type_check_only class _MT19937Internal(TypedDict): key: NDArray[uint32] diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 0326781fd43a..6055c5d2921d 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -3,6 +3,8 @@ from typing import TypedDict, type_check_only from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["PCG64"] + @type_check_only class _PCG64Internal(TypedDict): state: int diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 353a0adb4861..ab1aee31eb02 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -5,6 +5,8 @@ from numpy._typing import _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy.typing import NDArray +__all__ = ["Philox"] + @type_check_only class _PhiloxInternal(TypedDict): counter: NDArray[uint64] diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index a6f0d8445f25..1e563fdebfde 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -4,6 +4,8 @@ from numpy import uint64 from numpy._typing import NDArray, _ArrayLikeInt_co from numpy.random.bit_generator import BitGenerator, SeedSequence +__all__ = ["SFC64"] + @type_check_only class _SFC64Internal(TypedDict): state: NDArray[uint64] diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 3221048a8af7..c20d35193d45 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -42,6 +42,62 @@ from numpy._typing import ( ) from numpy.random.bit_generator import BitGenerator +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] + class RandomState: _bit_generator: BitGenerator def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... From af379dca868870412f7de8fce55e9a95d89f8d96 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:35:57 +0200 Subject: [PATCH 0603/1718] TYP: add missing ``_core._asarray.__all__`` stub --- numpy/_core/_asarray.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 349933323b95..6bef69d8e4ea 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -3,6 +3,8 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc +__all__ = ["require"] + _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) _Requirements: TypeAlias = Literal[ From 602176a00ccf228f2be325dac201fe57c944fc2e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 20:36:23 +0200 Subject: [PATCH 0604/1718] TYP: remove non-existent ``_core._type_aliases.__all__`` stub --- numpy/_core/_type_aliases.pyi | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 3d57e8135378..e28541cc8987 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -3,16 +3,6 @@ from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only import numpy as np -__all__ = ( - "_abstract_type_names", - "_aliases", - "_extra_aliases", - "allTypes", - "c_names_dict", - "sctypeDict", - "sctypes", -) - sctypeDict: Final[dict[str, type[np.generic]]] allTypes: Final[dict[str, type[np.generic]]] From bfb0f77f1126b97a6afb89c232d50f503077f569 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 14 Oct 2025 22:10:51 +0200 Subject: [PATCH 0605/1718] TYP: fix inconsistent ``float64.__getformat__`` stub (#29954) --- numpy/__init__.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c18bc91ff4ba..b90acfaf0c88 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4921,9 +4921,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @property def imag(self) -> Self: ... def conjugate(self) -> Self: ... - def __getformat__(self, typestr: L["double", "float"], /) -> str: ... def __getnewargs__(self, /) -> tuple[float]: ... + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + # float64-specific operator overrides @overload def __add__(self, other: _Float64_co, /) -> float64: ... From 583ba3fb6b7b9f299319dbc3f5a5a4b935a634bd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 14 Oct 2025 22:11:41 +0200 Subject: [PATCH 0606/1718] TYP: add ``__class_getitem__`` to ``bool`` and ``datetime64`` (#29952) --- numpy/__init__.pyi | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b90acfaf0c88..fbdccf8a4d76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3909,6 +3909,8 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + def __bool__(self, /) -> _BoolItemT_co: ... @overload @@ -5429,6 +5431,8 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload From f9592bf23bd6fa69ebe7d037bd31cf835d45f2c6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 22:51:53 +0200 Subject: [PATCH 0607/1718] TYP: stub ``numpy.ma.testutils`` --- numpy/ma/testutils.pyi | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 numpy/ma/testutils.pyi diff --git a/numpy/ma/testutils.pyi b/numpy/ma/testutils.pyi new file mode 100644 index 000000000000..92b843b93a43 --- /dev/null +++ b/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal From d0eb6493cc23e59c52148832b97562de5060bd1a Mon Sep 17 00:00:00 2001 From: Jake VanderPlas Date: Tue, 14 Oct 2025 13:57:09 -0700 Subject: [PATCH 0608/1718] [doc] fix formatting in np.percentile docstring --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b8d6926bca88..4adb0c187395 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4163,7 +4163,7 @@ def percentile(a, the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. - weights : array_like, optional + weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the percentile according to its associated weight. The weights array can either be 1-D (in which case its length must be From 94b2986f55362b53fa9228235174ebac5decb283 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 23:11:56 +0200 Subject: [PATCH 0609/1718] TYP: update the ``finfo`` stubs --- numpy/_core/getlimits.pyi | 44 ++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index eb832f55bb7e..a22149ceb5c6 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,3 +1,4 @@ +from functools import cached_property from types import GenericAlias from typing import Final, Generic, Self, overload from typing_extensions import TypeVar @@ -72,28 +73,25 @@ class iinfo(Generic[_IntegerT_co]): def __class_getitem__(cls, item: object, /) -> GenericAlias: ... class finfo(Generic[_FloatingT_co]): - dtype: np.dtype[_FloatingT_co] - eps: _FloatingT_co - epsneg: _FloatingT_co - resolution: _FloatingT_co - smallest_subnormal: _FloatingT_co - max: _FloatingT_co - min: _FloatingT_co + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache bits: Final[int] - iexp: Final[int] - machep: Final[int] maxexp: Final[int] minexp: Final[int] - negep: Final[int] - nexp: Final[int] nmant: Final[int] precision: Final[int] - @property - def smallest_normal(self, /) -> _FloatingT_co: ... - @property - def tiny(self, /) -> _FloatingT_co: ... + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... # @overload @@ -110,5 +108,17 @@ class finfo(Generic[_FloatingT_co]): def __new__(cls, dtype: str) -> finfo: ... # - @classmethod - def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... From 46d892084f1f2ce4f6312ebbbab2d094cf216b16 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Tue, 14 Oct 2025 14:15:55 -0700 Subject: [PATCH 0610/1718] TYP: fix return type annotation for ``normalize_axis_tuple`` utility (#29956) --- numpy/lib/_array_utils_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index db9ef852ba57..1b290829caf4 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -17,7 +17,7 @@ def normalize_axis_tuple( ndim: int, argname: str | None = None, allow_duplicate: bool | None = False, -) -> tuple[int, int]: ... +) -> tuple[int, ...]: ... def normalize_axis_index( axis: int = ..., From 68dfbd873d9dbcdec2417da75e8e433d2f9088cf Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 14 Oct 2025 23:54:07 +0200 Subject: [PATCH 0611/1718] MAINT: remove obsolete ``generic.tostring`` method descriptor and docstring --- numpy/_core/_add_newdocs.py | 3 --- numpy/_core/defchararray.py | 1 - numpy/_core/src/multiarray/scalartypes.c.src | 5 +---- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 264bb8e25e7c..da9ea90eef34 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6849,9 +6849,6 @@ def refer_to_array_attribute(attr, method=True): add_newdoc('numpy._core.numerictypes', 'generic', refer_to_array_attribute('tolist')) -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tostring')) - add_newdoc('numpy._core.numerictypes', 'generic', refer_to_array_attribute('trace')) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 0378e976254d..098f4ab9ce9b 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -496,7 +496,6 @@ class adds the following functionality: title tofile tolist - tostring translate transpose upper diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index ad49de0c231e..337d8b9b48b2 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2185,7 +2185,7 @@ gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, * round, argmax, argmin, max, min, any, all, astype, resize, - * reshape, choose, tostring, tobytes, copy, searchsorted, view, + * reshape, choose, tobytes, copy, searchsorted, view, * flatten, ravel, squeeze# */ static PyObject * @@ -2643,9 +2643,6 @@ static PyMethodDef gentype_methods[] = { {"tofile", (PyCFunction)gentype_tofile, METH_VARARGS | METH_KEYWORDS, NULL}, - {"tostring", - (PyCFunction)gentype_tostring, - METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)gentype_byteswap, METH_VARARGS | METH_KEYWORDS, NULL}, From 2375e15d390669700725c362a3a0ea6db6fdc336 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 02:02:59 +0200 Subject: [PATCH 0612/1718] MAINT: Remove remnants of methods removed in numpy 2 --- numpy/_core/src/multiarray/getset.c | 40 ------------------- numpy/_core/src/multiarray/scalartypes.c.src | 36 ----------------- numpy/matrixlib/tests/test_defmatrix.py | 4 +- .../tests/data/reveal/ndarray_conversion.pyi | 1 - 4 files changed, 2 insertions(+), 79 deletions(-) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 48da52dd3178..16876639897c 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -865,34 +865,6 @@ array_matrix_transpose_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return PyArray_MatrixTranspose(self); } -static PyObject * -array_ptp(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from the ndarray class in NumPy 2.0. " - "Use np.ptp(arr, ...) instead."); - return NULL; -} - -static PyObject * -array_newbyteorder(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from the ndarray class " - "in NumPy 2.0. " - "Use `arr.view(arr.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -array_itemset(PyArrayObject *self, PyObject *args) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from the ndarray class in " - "NumPy 2.0. Use `arr[index] = value` instead."); - return NULL; -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, @@ -958,18 +930,6 @@ NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { (getter)array_matrix_transpose_get, NULL, NULL, NULL}, - {"ptp", - (getter)array_ptp, - NULL, - NULL, NULL}, - {"newbyteorder", - (getter)array_newbyteorder, - NULL, - NULL, NULL}, - {"itemset", - (getter)array_itemset, - NULL, - NULL, NULL}, {"device", (getter)array_device, NULL, diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index ad49de0c231e..af7895dbb549 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -1940,33 +1940,6 @@ gentype_transpose_get(PyObject *self, void *NPY_UNUSED(ignored)) return self; } -static PyObject * -gentype_newbyteorder(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`newbyteorder` was removed from scalar types in NumPy 2.0. " - "Use `sc.view(sc.dtype.newbyteorder(order))` instead."); - return NULL; -} - -static PyObject * -gentype_itemset(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`itemset` was removed from scalar types in NumPy 2.0 " - "because scalars are immutable."); - return NULL; -} - -static PyObject * -gentype_ptp(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) -{ - PyErr_SetString(PyExc_AttributeError, - "`ptp` was removed from scalar types in NumPy 2.0. " - "For a scalar, the range of values always equals 0."); - return NULL; -} - static PyGetSetDef gentype_getsets[] = { {"ndim", @@ -2011,15 +1984,6 @@ static PyGetSetDef gentype_getsets[] = { {"T", (getter)gentype_transpose_get, (setter)0, NULL, NULL}, - {"newbyteorder", - (getter)gentype_newbyteorder, - (setter)0, NULL, NULL}, - {"itemset", - (getter)gentype_itemset, - (setter)0, NULL, NULL}, - {"ptp", - (getter)gentype_ptp, - (setter)0, NULL, NULL}, {"device", (getter)array_device, (setter)0, NULL, NULL}, diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index ce23933ab7f7..a0e868f5fe2c 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -288,10 +288,10 @@ def test_instance_methods(self): 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', 'searchsorted', 'setflags', 'setfield', 'sort', - 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'partition', 'argpartition', 'to_device', 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', - 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + 'prod', 'std', 'ctypes', 'bitwise_count', ] for attrib in dir(a): if attrib.startswith('_') or attrib in excluded_methods: diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 5cb9b98029dd..4f7d0c28b747 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -37,7 +37,6 @@ any_sctype: np.ndarray[Any, Any] assert_type(any_dtype.tolist(), Any) assert_type(any_sctype.tolist(), Any) -# itemset does not return a value # tobytes is pretty simple # tofile does not return a value # dump does not return a value From 7f91f841b44db9a9596c7655b4af2353e5e3c1b6 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 14 Oct 2025 20:58:44 -0400 Subject: [PATCH 0613/1718] TST: Remove recwarn from test (#29957) --- numpy/lib/tests/test_nanfunctions.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 447b84db3edc..ef11ecfdf518 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -723,22 +723,22 @@ def test_ddof(self): res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) - def test_ddof_too_big(self, recwarn): + def test_ddof_too_big(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): - with warnings.catch_warnings(): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) if any(tgt): - assert_(len(recwarn) == 1) - recwarn.pop(RuntimeWarning) + assert_(len(w) == 1) else: - assert_(len(recwarn) == 0) + assert_(len(w) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) From 12562d611e6f5b56e26b2267b5bf3a58500e72d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:24:06 +0200 Subject: [PATCH 0614/1718] TYP: add missing ``generic`` methods --- numpy/__init__.pyi | 84 ++++++++++++++++--- numpy/typing/tests/data/fail/ndarray_misc.pyi | 27 ++++-- 2 files changed, 93 insertions(+), 18 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..78dddd5cf26a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6,7 +6,7 @@ import ctypes as ct import array as _array import datetime as dt from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from types import EllipsisType, GetSetDescriptorType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal from fractions import Fraction from uuid import UUID @@ -767,6 +767,7 @@ _RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelt _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _1DShapeT = TypeVar("_1DShapeT", bound=_1D) _2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) @@ -1687,6 +1688,15 @@ class _ArrayOrScalarCommon: def tolist(self) -> Any: ... def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + # NOTE: for `generic`, these two methods don't do anything + def fill(self, value: _ScalarLike_co, /) -> None: ... + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, /, write: builtins.bool | None = None, align: builtins.bool | None = None, uic: builtins.bool | None = None + ) -> None: ... + @property def __array_interface__(self) -> dict[str, Any]: ... @property @@ -2168,7 +2178,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... def byteswap(self, inplace: builtins.bool = ...) -> Self: ... - def fill(self, value: Any, /) -> None: ... @property def flat(self) -> flatiter[Self]: ... @@ -2349,13 +2358,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... - # `nonzero()` is deprecated for 0d arrays/generics + # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... - # `put` is technically available to `generic`, - # but is pointless as `generic`s are immutable - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... - @overload def searchsorted( self, # >= 1D array @@ -3574,13 +3579,47 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls) -> Self: ... - def __hash__(self) -> int: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[tuple[()], dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT: ... + @overload + def __array_wrap__( + self, + array: ndarray[_Shape1T, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[_Shape1T, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... @property def base(self) -> None: ... @@ -3599,10 +3638,33 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def item(self, /) -> _ItemT_co: ... @overload def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override def tolist(self, /) -> _ItemT_co: ... - def byteswap(self, inplace: L[False] = ...) -> Self: ... + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: Never = ..., + axis1: Never = ..., + axis2: Never = ..., + dtype: Never = ..., + out: Never = ..., + ) -> Never: ... + def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, /, axis1: Never, axis2: Never) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, /, v: Never, side: Never = ..., sorter: Never = ...) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = False) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + # @overload def astype( self, diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 93e1bce8fecb..29418930061c 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -5,6 +5,7 @@ More extensive tests are performed for the methods' function-based counterpart in `../from_numeric.py`. """ +from typing import Never import numpy as np import numpy.typing as npt @@ -17,14 +18,26 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes f8.argpartition(0) # type: ignore[attr-defined] -f8.diagonal() # type: ignore[attr-defined] -f8.dot(1) # type: ignore[attr-defined] -f8.nonzero() # type: ignore[attr-defined] f8.partition(0) # type: ignore[attr-defined] -f8.put(0, 2) # type: ignore[attr-defined] -f8.setfield(2, np.float64) # type: ignore[attr-defined] -f8.sort() # type: ignore[attr-defined] -f8.trace() # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] + +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. + +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] + +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] AR_M.__complex__() # type: ignore[misc] AR_b.__index__() # type: ignore[misc] From a9a8d0685cc063f95a3d8f615b7806fc2cca2e79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:38:03 +0200 Subject: [PATCH 0615/1718] TYP: mark ``flexible`` as ``@final`` --- numpy/__init__.pyi | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..e637589f67df 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5529,11 +5529,10 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __ge__(self, other: _SupportsGT, /) -> bool_: ... -class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): - @abstractmethod - def __new__(cls) -> Self: ... +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] -class void(flexible[bytes | tuple[Any, ...]]): +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload @@ -5547,13 +5546,13 @@ class void(flexible[bytes | tuple[Any, ...]]): def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... -class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod def __new__(cls) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart -class bytes_(character[bytes], bytes): +class bytes_(character[bytes], bytes): # type: ignore[misc] @overload def __new__(cls, o: object = ..., /) -> Self: ... @overload @@ -5562,7 +5561,7 @@ class bytes_(character[bytes], bytes): # def __bytes__(self, /) -> bytes: ... -class str_(character[str], str): +class str_(character[str], str): # type: ignore[misc] @overload def __new__(cls, value: object = ..., /) -> Self: ... @overload From b5de4719fadf799b563fed75f6e3b96c1a6386d2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 04:42:47 +0200 Subject: [PATCH 0616/1718] TYP: revert narrowing the ``ndarray.fill`` input type --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 78dddd5cf26a..9246a37441e3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1689,7 +1689,7 @@ class _ArrayOrScalarCommon: def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... # NOTE: for `generic`, these two methods don't do anything - def fill(self, value: _ScalarLike_co, /) -> None: ... + def fill(self, value: Incomplete, /) -> None: ... def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... # NOTE: even on `generic` this seems to work From 838e213c1c12b52ecc1e7e80b5714717a2dd6e6f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:02:03 +0200 Subject: [PATCH 0617/1718] TYP: remove ``mod`` from ``ma.MaskedArray.__pow__`` --- numpy/ma/core.pyi | 68 ++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c9c4ab152030..2e069164c3d0 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1102,73 +1102,69 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__pow__` + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) @overload - def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def __pow__( - self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / - ) -> _MaskedArray[complex128]: ... + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def __pow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload - def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__rpow__` + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) @overload - def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, mod: None = None, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> _MaskedArray[float64]: ... + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload - def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> _MaskedArray[float64]: ... + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> _MaskedArray[complex128]: ... + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... @overload - def __rpow__( - self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / - ) -> _MaskedArray[complex128]: ... + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> _MaskedArray[complexfloating]: ... + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> _MaskedArray[number]: ... + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload - def __rpow__(self: _MaskedArray[object_], other: Any, mod: None = None, /) -> Any: ... + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__iadd__` @overload From 801ad71122502a97ab4be8c431257cc375a57acb Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:02:54 +0200 Subject: [PATCH 0618/1718] TYP: add ``mod`` parameter to ``number.__pow__`` --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..8cdd35154fdb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3857,8 +3857,8 @@ class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... - def __pow__(self, other: _NumberLike_co, /) -> Incomplete: ... - def __rpow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... From 89b4eb740d8f473207d88615d80ba43fb4782acf Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:03:56 +0200 Subject: [PATCH 0619/1718] TYP: remove ``mod`` parameter from ``matrix.__pow__`` --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8cdd35154fdb..7ce16be02f87 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6013,9 +6013,9 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __imul__(self, other: ArrayLike, /) -> Self: ... # - def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] + def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[override] # keep in sync with `prod` and `mean` @overload # type: ignore[override] From b70eed2893a7e94a96ca7b1f430f60c5893fbfe3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:24:12 +0200 Subject: [PATCH 0620/1718] TYP: improved ``busdaycalendar`` annotations --- numpy/__init__.pyi | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..441748e53fd0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5764,15 +5764,16 @@ class broadcast: @final class busdaycalendar: - def __new__( - cls, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., - ) -> busdaycalendar: ... + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... @property - def weekmask(self) -> NDArray[np.bool]: ... + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... @property - def holidays(self) -> NDArray[datetime64]: ... + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... @final class nditer: From 8898e580d7764ad1dd0ceebe76bc6c37090d21b6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:34:13 +0200 Subject: [PATCH 0621/1718] TYP: missing ``vectorize`` default argument --- numpy/__init__.pyi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbdccf8a4d76..4ce6e3e0108a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -407,7 +407,7 @@ from numpy._core.shape_base import ( from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ from ._globals import _CopyMode as _CopyMode -from ._globals import _NoValue as _NoValue +from ._globals import _NoValue as _NoValue, _NoValueType from numpy.lib import ( scimath as emath, @@ -5902,7 +5902,8 @@ class vectorize: __doc__: str | None def __init__( self, - pyfunc: Callable[..., Any], + /, + pyfunc: Callable[..., Any] | _NoValueType = ..., # = _NoValue otypes: str | Iterable[DTypeLike] | None = None, doc: str | None = None, excluded: Iterable[int | str] | None = None, From a68b24cc792ced91602a9a04a1194fb457c3e340 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:44:00 +0200 Subject: [PATCH 0622/1718] TYP: ``corrcoef``: fix parameter name --- numpy/lib/_function_base_impl.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 9c2e2b1e28b0..d0018f54b161 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -478,7 +478,7 @@ def cov( # NOTE `bias` and `ddof` are deprecated and ignored @overload def corrcoef( - m: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -488,7 +488,7 @@ def corrcoef( ) -> NDArray[floating]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -498,7 +498,7 @@ def corrcoef( ) -> NDArray[complexfloating]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., @@ -508,7 +508,7 @@ def corrcoef( ) -> NDArray[_ScalarT]: ... @overload def corrcoef( - m: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: _NoValueType = ..., From e64f193b3b44a060d4137017036b186c5188bb40 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:48:12 +0200 Subject: [PATCH 0623/1718] TYP: ``diff``: fix argument default type --- numpy/lib/_function_base_impl.pyi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index d0018f54b161..6aa1eb24e00e 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -288,21 +288,21 @@ def gradient( ) -> Any: ... @overload -def diff( +def diff( # type: ignore[overload-overlap] a: _T, n: L[0], axis: SupportsIndex = -1, - prepend: ArrayLike = ..., - append: ArrayLike = ..., + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue ) -> _T: ... @overload def diff( a: ArrayLike, n: int = 1, axis: SupportsIndex = -1, - prepend: ArrayLike = ..., - append: ArrayLike = ..., -) -> NDArray[Any]: ... + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> NDArray[Incomplete]: ... @overload # float scalar def interp( From 43d81d49a530f6fb04f5faedcae5ac4f21a7d27d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:52:39 +0200 Subject: [PATCH 0624/1718] TYP: ``percentile`` and ``quantile``: missing (deprecated) ``interpolation`` kwarg --- numpy/lib/_function_base_impl.pyi | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 6aa1eb24e00e..26400295482c 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -627,6 +627,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> floating: ... @overload def percentile( @@ -639,6 +640,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def percentile( @@ -651,6 +653,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def percentile( @@ -663,6 +666,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> datetime64: ... @overload def percentile( @@ -675,6 +679,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -687,6 +692,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def percentile( @@ -699,6 +705,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def percentile( @@ -711,6 +718,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def percentile( @@ -723,6 +731,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def percentile( @@ -735,6 +744,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def percentile( @@ -747,6 +757,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -759,6 +770,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def percentile( @@ -771,6 +783,7 @@ def percentile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -785,6 +798,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> floating: ... @overload def quantile( @@ -797,6 +811,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def quantile( @@ -809,6 +824,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def quantile( @@ -821,6 +837,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> datetime64: ... @overload def quantile( @@ -833,6 +850,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -845,6 +863,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def quantile( @@ -857,6 +876,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def quantile( @@ -869,6 +889,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def quantile( @@ -881,6 +902,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def quantile( @@ -893,6 +915,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def quantile( @@ -905,6 +928,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -917,6 +941,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def quantile( @@ -929,6 +954,7 @@ def quantile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated ) -> _ArrayT: ... _ScalarT_fm = TypeVar( From 396219f9787aa388cbabff0d5f5623b19c410e67 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:54:11 +0200 Subject: [PATCH 0625/1718] TYP: ``piecewise``: fix pos-only parameters --- numpy/lib/_function_base_impl.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 26400295482c..3f7457bff82b 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -231,7 +231,6 @@ def piecewise( Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] | _ScalarT | object ], - /, *args: _Pss.args, **kw: _Pss.kwargs, ) -> NDArray[_ScalarT]: ... @@ -243,7 +242,6 @@ def piecewise( Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] | object ], - /, *args: _Pss.args, **kw: _Pss.kwargs, ) -> NDArray[Any]: ... From 9269f5ea030dfd514ac3caa9509da2fa3af4669a Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 05:56:21 +0200 Subject: [PATCH 0626/1718] TYP: ``trim_zeros``: missing ``axis`` parameter --- numpy/lib/_function_base_impl.pyi | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 3f7457bff82b..a20032e047c6 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -415,6 +415,7 @@ def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... def trim_zeros( filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", + axis: _ShapeLike | None = None, ) -> _T: ... @overload From 5d1ca7c07c9a4b80b19bf4419f4fc99194fd01de Mon Sep 17 00:00:00 2001 From: Alverok Date: Mon, 13 Oct 2025 00:54:33 +0530 Subject: [PATCH 0627/1718] DOC: Completed and fixed PR #29578 [skip azp][skip actions][skip cirrus] Co-authored-by: olivier --- doc/source/reference/routines.polynomials.rst | 34 +++++++++++-------- numpy/polynomial/polynomial.py | 4 +++ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst index 0763a1cf719a..00b4460eae21 100644 --- a/doc/source/reference/routines.polynomials.rst +++ b/doc/source/reference/routines.polynomials.rst @@ -47,23 +47,28 @@ The `~numpy.polynomial.polynomial.Polynomial` class is imported for brevity:: from numpy.polynomial import Polynomial -+------------------------+------------------------------+---------------------------------------+ -| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | -+------------------------+------------------------------+---------------------------------------+ -| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | -| polynomial object | | | -| from coefficients [1]_ | | | -+------------------------+------------------------------+---------------------------------------+ -| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | -| object from roots | ``p = np.poly1d(r)`` | | -+------------------------+------------------------------+---------------------------------------+ -| Fit a polynomial of | | | -| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | -+------------------------+------------------------------+---------------------------------------+ - ++------------------------+----------------------------------------+---------------------------------------+ +| **How to...** | Legacy (`numpy.poly1d`) | `numpy.polynomial` | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a | ``p = np.poly1d([1, 2, 3])`` | ``p = Polynomial([3, 2, 1])`` | +| polynomial object | | | +| from coefficients [1]_ | | | ++------------------------+----------------------------------------+---------------------------------------+ +| Create a polynomial | ``r = np.poly([-1, 1])`` | ``p = Polynomial.fromroots([-1, 1])`` | +| object from roots | ``p = np.poly1d(r)`` | | ++------------------------+----------------------------------------+---------------------------------------+ +| Fit a polynomial of | | | +| degree ``deg`` to data | ``np.polyfit(x, y, deg)`` | ``Polynomial.fit(x, y, deg)`` | ++------------------------+----------------------------------------+---------------------------------------+ +| Evaluate a polynomial | ``p(2.0)`` or | ``p(2.0)`` or ``polyval(2.0, p.coef)``| +| at a point [2]_ | ``np.polyval([1, 2, 3], 2.0)`` | (use ``p.convert().coef`` after fit) | ++------------------------+----------------------------------------+---------------------------------------+ .. [1] Note the reversed ordering of the coefficients +.. [2] When evaluating polynomials created with ``fit()``, use ``p(x)`` or + ``polyval(x, p.convert().coef)`` to handle domain/window scaling correctly. + Transition Guide ~~~~~~~~~~~~~~~~ @@ -188,3 +193,4 @@ Documentation for legacy polynomials :maxdepth: 2 routines.polynomials.poly1d + \ No newline at end of file diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 6ec0dc58a1de..220306693cf9 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -717,6 +717,10 @@ def polyval(x, c, tensor=True): ----- The evaluation uses Horner's method. + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + Examples -------- >>> import numpy as np From 3958757f49799ef4f0b4d8926178057d4bc60db6 Mon Sep 17 00:00:00 2001 From: Aaron Kollasch Date: Wed, 15 Oct 2025 11:35:13 -0400 Subject: [PATCH 0628/1718] BUG: Fix np.strings.slice if stop=None or start and stop >= len (#29944) Python treats `slice(-1)` differently from `slice(-1, None)`: The first is interpreted as `slice(None, -1, None)`, while the second becomes `slice(-1, None, None)`, according to the logic in `slice_new`. However, `np.strings.slice` treats these identically, as it cannot distinguish unset arguments from arguments set to None. This makes it impossible to get the last characters of each string, for example: ```python >>> a = np.array(['hello', 'world']) >>> np.strings.slice(a, -2, None) # should return last two characters array(['hel', 'wor'], dtype='>> a = np.array(['hello', 'world']) >>> np.strings.slice(a, -2, None) # returns last characters as expected array(['lo', 'ld'], dtype='>> np.strings.slice(a, -2) # original behavior preserved if no stop array(['hel', 'wor'], dtype='>> np.__version__ '2.3.3' >>> a = np.array(['hello', 'world'], dtype="T") >>> np.strings.slice(a, 5, 7) Traceback (most recent call last): File "", line 1, in File "numpy-dev/lib/python3.12/site-packages/numpy/_core/strings.py", line 1823, in slice return _slice(a, start, stop, step) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MemoryError: Failed to allocate string in slice ``` This causes either a MemoryError or kills the process with code 251. * BUG: Fix np.strings.slice when start and stop >= len Allows commented test_slice conditions to be uncommented. --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 13 +++++++----- numpy/_core/strings.py | 10 ++++++++-- numpy/_core/tests/test_strings.py | 21 ++++++++++++++++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ca574f605c1a..e00612a369f0 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2264,10 +2264,13 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], if (step == 1) { // step == 1 is the easy case, we can just use memcpy - npy_intp outsize = ((size_t)stop < num_codepoints - ? codepoint_offsets[stop] - : (unsigned char *)is.buf + is.size) - - codepoint_offsets[start]; + unsigned char *start_bounded = ((size_t)start < num_codepoints + ? codepoint_offsets[start] + : (unsigned char *)is.buf + is.size); + unsigned char *stop_bounded = ((size_t)stop < num_codepoints + ? codepoint_offsets[stop] + : (unsigned char *)is.buf + is.size); + npy_intp outsize = stop_bounded - start_bounded; if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { goto fail; @@ -2276,7 +2279,7 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], /* explicitly discard const; initializing new buffer */ char *buf = (char *)os.buf; - memcpy(buf, codepoint_offsets[start], outsize); + memcpy(buf, start_bounded, outsize); } else { // handle step != 1 diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 4d56f1e0c779..e9fa7f58e3ea 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1727,7 +1727,7 @@ def translate(a, table, deletechars=None): ) @set_module("numpy.strings") -def slice(a, start=None, stop=None, step=None, /): +def slice(a, start=None, stop=np._NoValue, step=None, /): """ Slice the strings in `a` by slices specified by `start`, `stop`, `step`. Like in the regular Python `slice` object, if only `start` is @@ -1760,6 +1760,9 @@ def slice(a, start=None, stop=None, step=None, /): >>> np.strings.slice(a, 2) array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) array(['el', 'ol'], dtype='>> np.strings.slice(b, -2) array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + >>> np.strings.slice(b, -2, None) + array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType()) + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) @@ -1785,7 +1791,7 @@ def slice(a, start=None, stop=None, step=None, /): """ # Just like in the construction of a regular slice object, if only start # is specified then start will become stop, see logic in slice_new. - if stop is None: + if stop is np._NoValue: stop = start start = None diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 939b7fbd465d..e5c3bb87c773 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -975,17 +975,38 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): @pytest.mark.parametrize("args", [ (None,), + (None, None), + (None, None, -1), (0,), + (0, None), + (0, None, -1), (1,), + (1, None), + (1, None, -1), (3,), + (3, None), (5,), + (5, None), + (5, 5), + (5, 5, -1), (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test stop index past the end (-1,), + (-1, None), + (-1, None, -1), (-3,), + (-3, None), ([3, 4],), + ([3, 4], None), ([2, 4],), ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), (1, 4), (-3, 5), (None, -1), From 55b6234e98d7f9e4fe50e776dce8141d9620accb Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 19:54:23 +0200 Subject: [PATCH 0629/1718] TYP: update ``ScalarType`` type --- numpy/_core/numerictypes.pyi | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index beec14079b48..46bb6a379861 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -168,28 +168,28 @@ ScalarType: Final[ type[str], type[memoryview[Any]], type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], type[datetime64], + type[timedelta64], type[object_], type[bytes_], type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], type[void], ] ] = ... From 95b4e135dea7f2196995310d92e03533a3ff553e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:05:48 +0200 Subject: [PATCH 0630/1718] TYP: fix and expand type-tests for ``ScalarType`` --- numpy/typing/tests/data/reveal/numerictypes.pyi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index aa5cf10410d4..75d108ce5a0f 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -4,8 +4,9 @@ import numpy as np assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) -assert_type(np.ScalarType[8], type[np.csingle]) -assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) From 955647d43fcced237a332b058dd835b7e61b7d9d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:16:36 +0200 Subject: [PATCH 0631/1718] TYP: expand ``TypedDict`` kwargs in ``full`` to appease stubtest --- numpy/_core/numeric.pyi | 48 ++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index f9ce1ce3a4a3..7e205bd43cda 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -739,7 +739,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( @@ -747,7 +749,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[tuple[int], _DTypeT]: ... @overload def full( @@ -755,7 +759,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], _ScalarT]: ... @overload def full( @@ -763,7 +769,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[tuple[int], Any]: ... # known shape @overload @@ -772,7 +780,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( @@ -780,7 +790,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[_AnyShapeT, _DTypeT]: ... @overload def full( @@ -788,7 +800,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def full( @@ -796,7 +810,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, Any]: ... # unknown shape @overload @@ -805,7 +821,9 @@ def full( fill_value: _ScalarT, dtype: None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def full( @@ -813,7 +831,9 @@ def full( fill_value: Any, dtype: _DTypeT | _SupportsDType[_DTypeT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> np.ndarray[Any, _DTypeT]: ... @overload def full( @@ -821,7 +841,9 @@ def full( fill_value: Any, dtype: type[_ScalarT], order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def full( @@ -829,7 +851,9 @@ def full( fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", - **kwargs: Unpack[_KwargsEmpty], + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload From e0398755df41852b660b6c8e98e3a9397a45325e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:24:36 +0200 Subject: [PATCH 0632/1718] TYP: remove unused imports in ``_core.numeric`` --- numpy/_core/numeric.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 7e205bd43cda..314bce708306 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -11,7 +11,6 @@ from typing import ( TypeAlias, TypeGuard, TypeVar, - Unpack, overload, ) @@ -127,7 +126,6 @@ from .multiarray import ( WRAP as WRAP, _Array, _ConstructorEmpty, - _KwargsEmpty, arange, array, asanyarray, From e1a484d3cb7b87def91272b87a52c0b2a0b7c680 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:37:12 +0200 Subject: [PATCH 0633/1718] TYP: remove deprecated ``interpolation`` kwarg in ``percentile`` and ``quantile`` Co-authored-by: Matti Picus --- numpy/lib/_function_base_impl.pyi | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index a20032e047c6..47dd0749f07f 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -626,7 +626,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> floating: ... @overload def percentile( @@ -639,7 +638,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def percentile( @@ -652,7 +650,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def percentile( @@ -665,7 +662,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> datetime64: ... @overload def percentile( @@ -678,7 +674,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -691,7 +686,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def percentile( @@ -704,7 +698,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def percentile( @@ -717,7 +710,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def percentile( @@ -730,7 +722,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def percentile( @@ -743,7 +734,6 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def percentile( @@ -756,7 +746,6 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def percentile( @@ -769,7 +758,6 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def percentile( @@ -782,7 +770,6 @@ def percentile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... # NOTE: keep in sync with `percentile` @@ -797,7 +784,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> floating: ... @overload def quantile( @@ -810,7 +796,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> complexfloating: ... @overload def quantile( @@ -823,7 +808,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> timedelta64: ... @overload def quantile( @@ -836,7 +820,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> datetime64: ... @overload def quantile( @@ -849,7 +832,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -862,7 +844,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[floating]: ... @overload def quantile( @@ -875,7 +856,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[complexfloating]: ... @overload def quantile( @@ -888,7 +868,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[timedelta64]: ... @overload def quantile( @@ -901,7 +880,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[datetime64]: ... @overload def quantile( @@ -914,7 +892,6 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> NDArray[object_]: ... @overload def quantile( @@ -927,7 +904,6 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> Any: ... @overload def quantile( @@ -940,7 +916,6 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... @overload def quantile( @@ -953,7 +928,6 @@ def quantile( method: _MethodKind = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, - interpolation: None = None, # deprecated ) -> _ArrayT: ... _ScalarT_fm = TypeVar( From aedb6922d33fcb62c715e438234732b9798b352d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:57:36 +0200 Subject: [PATCH 0634/1718] DEP: Remove expired ``interpolation`` kwarg from ``percentile`` and ``quantile`` --- numpy/lib/_function_base_impl.py | 51 ++++---------------------------- 1 file changed, 6 insertions(+), 45 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 4adb0c187395..bdc32574e8d1 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4090,8 +4090,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4104,8 +4103,7 @@ def percentile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th percentile of the data along the specified axis. @@ -4175,11 +4173,6 @@ def percentile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -4275,10 +4268,6 @@ def percentile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "percentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -4305,8 +4294,7 @@ def percentile(a, def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -4319,8 +4307,7 @@ def quantile(a, method="linear", keepdims=False, *, - weights=None, - interpolation=None): + weights=None): """ Compute the q-th quantile of the data along the specified axis. @@ -4390,11 +4377,6 @@ def quantile(a, .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -4536,10 +4518,6 @@ def quantile(a, The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = _check_interpolation_as_method( - method, interpolation, "quantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -4599,23 +4577,6 @@ def _quantile_is_valid(q): return True -def _check_interpolation_as_method(method, interpolation, fname): - # Deprecated NumPy 1.22, 2021-11-08 - warnings.warn( - f"the `interpolation=` argument to {fname} was renamed to " - "`method=`, which has additional options.\n" - "Users of the modes 'nearest', 'lower', 'higher', or " - "'midpoint' are encouraged to review the method they used. " - "(Deprecated NumPy 1.22)", - DeprecationWarning, stacklevel=4) - if method != "linear": - # sanity check, we assume this basically never happens - raise TypeError( - "You shall not pass both `method` and `interpolation`!\n" - "(`interpolation` is Deprecated in favor of `method`)") - return interpolation - - def _compute_virtual_index(n, quantiles, alpha: float, beta: float): """ Compute the floating point indexes of an array for the linear @@ -4651,7 +4612,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): sample. previous_indexes : array_like The floor values of virtual_indexes. - interpolation : dict + method : dict The interpolation method chosen, which may have a specific rule modifying gamma. @@ -4809,7 +4770,7 @@ def _quantile( These methods are extended to this function using _ureduce See nanpercentile for parameter usage It computes the quantiles of the array for the given axis. - A linear interpolation is performed based on the `interpolation`. + A linear interpolation is performed based on the `method`. By default, the method is "linear" where alpha == beta == 1 which performs the 7th method of Hyndman&Fan. From 514fc6c3053f5f173dfb4042adf08ea26facaf35 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 20:57:50 +0200 Subject: [PATCH 0635/1718] DEP: Remove expired ``interpolation`` kwarg from ``nanpercentile`` and ``nanquantile`` --- numpy/lib/_nanfunctions_impl.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index aec60d484ba4..f030d74c5c11 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1219,7 +1219,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu def _nanpercentile_dispatcher( a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1234,7 +1234,6 @@ def nanpercentile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth percentile of the data along the specified axis, @@ -1313,11 +1312,6 @@ def nanpercentile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- percentile : scalar or ndarray @@ -1379,10 +1373,6 @@ def nanpercentile( The American Statistician, 50(4), pp. 361-365, 1996 """ - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanpercentile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") @@ -1407,8 +1397,7 @@ def nanpercentile( def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, - method=None, keepdims=None, *, weights=None, - interpolation=None): + method=None, keepdims=None, *, weights=None): return (a, q, out, weights) @@ -1423,7 +1412,6 @@ def nanquantile( keepdims=np._NoValue, *, weights=None, - interpolation=None, ): """ Compute the qth quantile of the data along the specified axis, @@ -1500,11 +1488,6 @@ def nanquantile( .. versionadded:: 2.0.0 - interpolation : str, optional - Deprecated name for the method keyword argument. - - .. deprecated:: 1.22.0 - Returns ------- quantile : scalar or ndarray @@ -1565,11 +1548,6 @@ def nanquantile( The American Statistician, 50(4), pp. 361-365, 1996 """ - - if interpolation is not None: - method = fnb._check_interpolation_as_method( - method, interpolation, "nanquantile") - a = np.asanyarray(a) if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") From c162a27759c72538b5d054db0ad2c3c618100b4e Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:00:24 +0200 Subject: [PATCH 0636/1718] DEP: Remove ``TestQuantileInterpolationDeprecation`` --- numpy/_core/tests/test_deprecations.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..f76c68a531a3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -215,24 +215,6 @@ def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) -class TestQuantileInterpolationDeprecation(_DeprecationTestCase): - # Deprecated 2021-11-08, NumPy 1.22 - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_deprecated(self, func): - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="linear")) - self.assert_deprecated( - lambda: func([0., 1.], 0., interpolation="nearest")) - - @pytest.mark.parametrize("func", - [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) - def test_both_passed(self, func): - with pytest.warns(DeprecationWarning): - with pytest.raises(TypeError): - func([0., 1.], 0., interpolation="nearest", method="nearest") - - class TestPyIntConversion(_DeprecationTestCase): message = r".*stop allowing conversion of out-of-bound.*" From 37b02ca58b5ce3cb1a78007e4c7eca0dc421163d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:14:17 +0200 Subject: [PATCH 0637/1718] DOC: add release note for #29973 --- doc/release/upcoming_changes/29973.expired.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/29973.expired.rst diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst new file mode 100644 index 000000000000..0dda176fc360 --- /dev/null +++ b/doc/release/upcoming_changes/29973.expired.rst @@ -0,0 +1,12 @@ +Remove ``interpolation`` parameter from quantile and percentile functions +--------------------------------------------------------------------------- + +The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been +removed from the following functions: + +* ``numpy.percentile`` +* ``numpy.nanpercentile`` +* ``numpy.quantile`` +* ``numpy.nanquantile`` + +Use the ``method`` parameter instead. From 58481143ea65a59fec17ff80af714dfafd1d1f04 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:15:14 +0200 Subject: [PATCH 0638/1718] DOC: fix typo in release note for #29909 --- doc/release/upcoming_changes/29909.expired.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst index e3cb557e2c28..6a2ee4f53c09 100644 --- a/doc/release/upcoming_changes/29909.expired.rst +++ b/doc/release/upcoming_changes/29909.expired.rst @@ -1,5 +1,5 @@ -Remove numpy.linalg.linag and numpy.fft.helper ----------------------------------------------- +Remove numpy.linalg.linalg and numpy.fft.helper +----------------------------------------------- The following were deprecated in NumPy 2.0 and have been moved to private modules From c27fb484b7b41f11bfbddc23b782147df8d7b425 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 21:17:25 +0200 Subject: [PATCH 0639/1718] DOC: fix rst formatting in release note for #29973 --- doc/release/upcoming_changes/29973.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst index 0dda176fc360..5b51cb7cf428 100644 --- a/doc/release/upcoming_changes/29973.expired.rst +++ b/doc/release/upcoming_changes/29973.expired.rst @@ -1,5 +1,5 @@ Remove ``interpolation`` parameter from quantile and percentile functions ---------------------------------------------------------------------------- +------------------------------------------------------------------------- The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been removed from the following functions: From 7d276dea991b0c8f72502d5856fe9ea31e7e9cb0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Oct 2025 02:32:29 +0200 Subject: [PATCH 0640/1718] TYP: move ``matrix`` from ``__init__.pyi`` to ``matrixlib/defmatrix.pyi`` --- numpy/__init__.pyi | 187 +---------------------------- numpy/matrixlib/__init__.pyi | 4 +- numpy/matrixlib/defmatrix.pyi | 217 ++++++++++++++++++++++++++++++++-- 3 files changed, 212 insertions(+), 196 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..b03533f77b96 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -606,6 +606,7 @@ from numpy.lib._utils_impl import ( from numpy.matrixlib import ( asmatrix, bmat, + matrix, ) __all__ = [ # noqa: RUF022 @@ -5986,192 +5987,6 @@ class poly1d: k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, ) -> poly1d: ... -class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] - - def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] - data: ArrayLike, - dtype: DTypeLike | None = None, - copy: builtins.bool = True, - ) -> matrix[_2D, Incomplete]: ... - def __array_finalize__(self, obj: object) -> None: ... - - @overload # type: ignore[override] - def __getitem__( - self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / - ) -> Incomplete: ... - @overload - def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... - @overload - def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # - def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __imul__(self, other: ArrayLike, /) -> Self: ... - - # - def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[override] - - # keep in sync with `prod` and `mean` - @overload # type: ignore[override] - def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `mean` - @overload # type: ignore[override] - def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `sum` and `prod` - @overload # type: ignore[override] - def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... - @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... - @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `var` - @overload # type: ignore[override] - def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def std( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `std` - @overload # type: ignore[override] - def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... - @overload - def var( - self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 - ) -> matrix[_2D, Incomplete]: ... - @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... - @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... - - # keep in sync with `all` - @overload # type: ignore[override] - def any(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `any` - @overload # type: ignore[override] - def all(self, axis: None = None, out: None = None) -> np.bool: ... - @overload - def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... - @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `min` and `ptp` - @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `ptp` - @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `max` and `min` - @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... - @overload - def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... - @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... - @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmin` - @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # keep in sync with `argmax` - @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... - @overload - def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... - @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... - @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] - - #the second overload handles the (rare) case that the matrix is not 2-d - @overload - def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] - @overload - def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] - - # these three methods will at least return a `2-d` array of shape (1, n) - def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - - # matrix.T is inherited from _ScalarOrArrayCommon - def getT(self) -> Self: ... - @property - def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 - def getI(self) -> matrix[_2D, Incomplete]: ... - @property - def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - @property - def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... - @property - def H(self) -> matrix[_2D, _DTypeT_co]: ... - def getH(self) -> matrix[_2D, _DTypeT_co]: ... - def from_dlpack( x: _SupportsDLPack[None], /, diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index 56ae8bf4c84b..ad4091d98d06 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,5 +1,3 @@ -from numpy import matrix - -from .defmatrix import asmatrix, bmat +from .defmatrix import asmatrix, bmat, matrix __all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 462e8b209d2c..b5345a2d0d7c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,17 +1,220 @@ +from _typeshed import Incomplete from collections.abc import Mapping, Sequence -from typing import Any +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar -from numpy import matrix -from numpy._typing import ArrayLike, DTypeLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) __all__ = ["asmatrix", "bmat", "matrix"] +_T = TypeVar("_T") +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_2D: TypeAlias = tuple[int, int] +_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] +_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike | None = None, + copy: bool = True, + ) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __imul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], ldict: Mapping[str, Any] | None = None, gdict: Mapping[str, Any] | None = None, -) -> matrix[tuple[int, int], Any]: ... +) -> _Matrix[Incomplete]: ... -def asmatrix( - data: ArrayLike, dtype: DTypeLike | None = None -) -> matrix[tuple[int, int], Any]: ... +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... From b648a7cfbb41d4368dcc2566de1593afd0570041 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Oct 2025 02:47:38 +0200 Subject: [PATCH 0641/1718] TYP: update ``matrix`` type-tests --- numpy/typing/tests/data/reveal/matrix.pyi | 28 +++++++++---------- .../tests/data/reveal/ndarray_conversion.pyi | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 1a7285d428cc..3a32b3d394f0 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -9,11 +9,11 @@ mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] ar_ip: npt.NDArray[np.intp] -assert_type(mat * 5, np.matrix[_Shape2D, Any]) -assert_type(5 * mat, np.matrix[_Shape2D, Any]) +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) mat *= 5 -assert_type(mat**5, np.matrix[_Shape2D, Any]) +assert_type(mat**5, np.matrix) mat **= 5 assert_type(mat.sum(), Any) @@ -29,11 +29,11 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) -assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) @@ -56,18 +56,18 @@ assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.I, np.matrix) assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getI(), np.matrix) assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) -assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 4f7d0c28b747..2af616440c5e 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -71,7 +71,7 @@ assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) assert_type(i0_nd.view(), npt.NDArray[np.int_]) assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) assert_type(i0_nd.view(float), npt.NDArray[Any]) -assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) # getfield assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) From 125b8771ad8ca1c823c2eee82f7308549c0011d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 15 Oct 2025 22:03:54 +0200 Subject: [PATCH 0642/1718] TYP: fix ``random.Generator.shuffle`` input type --- numpy/random/_generator.pyi | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index b81f5d322416..1f7c342394e1 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,4 +1,4 @@ -from collections.abc import Callable +from collections.abc import Callable, MutableSequence from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np @@ -850,7 +850,12 @@ class Generator: def permuted( self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None ) -> NDArray[Any]: ... - def shuffle(self, x: ArrayLike, axis: int = 0) -> None: ... + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... def default_rng( seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None From 9b040fc582a371626f54a1c4f00b9d69500e8f7d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:10:59 +0200 Subject: [PATCH 0643/1718] DEP: remove ``in1d`` --- numpy/__init__.py | 1 - numpy/__init__.pyi | 5 +- numpy/_core/tests/test_deprecations.py | 3 +- numpy/conftest.py | 1 - numpy/lib/_arraysetops_impl.py | 114 +------------------------ numpy/lib/_arraysetops_impl.pyi | 14 +-- numpy/matlib.pyi | 1 - 7 files changed, 8 insertions(+), 131 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 7bfc12119694..4e3119238c0a 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -459,7 +459,6 @@ from .lib._arraypad_impl import pad from .lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..7ba72a7fc96b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -417,9 +417,8 @@ from numpy.lib._arraypad_impl import ( pad, ) -from numpy.lib._arraysetops_impl import ( # type: ignore[deprecated] +from numpy.lib._arraysetops_impl import ( ediff1d, - in1d, intersect1d, isin, setdiff1d, @@ -700,7 +699,7 @@ __all__ = [ # noqa: RUF022 "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..87892e29e93e 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -309,7 +309,7 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import in1d, row_stack, trapz + from numpy import row_stack, trapz from numpy._core.numerictypes import maximum_sctype from numpy.lib._function_base_impl import disp from numpy.lib._npyio_impl import recfromcsv, recfromtxt @@ -328,7 +328,6 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) - self.assert_deprecated(lambda: in1d([1], [1])) self.assert_deprecated(lambda: row_stack([[]])) self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/conftest.py b/numpy/conftest.py index 71ece01cc3b5..52f2a75b9df0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -163,7 +163,6 @@ def warnings_errors_and_rng(test=None): "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross - "`in1d` is deprecated", "NumPy warning suppression and assertion utilities are deprecated." ] msg = "|".join(msgs) diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index f85a3d55aae9..5d521b1fba60 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -15,7 +15,6 @@ """ import functools -import warnings from typing import NamedTuple import numpy as np @@ -28,7 +27,7 @@ __all__ = [ - "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values" ] @@ -804,112 +803,7 @@ def setxor1d(ar1, ar2, assume_unique=False): return aux[flag[1:] & flag[:-1]] -def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, - kind=None): - return (ar1, ar2) - - -@array_function_dispatch(_in1d_dispatcher) -def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): - """ - Test whether each element of a 1-D array is also present in a second array. - - .. deprecated:: 2.0 - Use :func:`isin` instead of `in1d` for new code. - - Returns a boolean array the same length as `ar1` that is True - where an element of `ar1` is in `ar2` and False otherwise. - - Parameters - ---------- - ar1 : (M,) array_like - Input array. - ar2 : array_like - The values against which to test each value of `ar1`. - assume_unique : bool, optional - If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. - invert : bool, optional - If True, the values in the returned array are inverted (that is, - False where an element of `ar1` is in `ar2` and True otherwise). - Default is False. ``np.in1d(a, b, invert=True)`` is equivalent - to (but is faster than) ``np.invert(in1d(a, b))``. - kind : {None, 'sort', 'table'}, optional - The algorithm to use. This will not affect the final result, - but will affect the speed and memory use. The default, None, - will select automatically based on memory considerations. - - * If 'sort', will use a mergesort-based approach. This will have - a memory usage of roughly 6 times the sum of the sizes of - `ar1` and `ar2`, not accounting for size of dtypes. - * If 'table', will use a lookup table approach similar - to a counting sort. This is only available for boolean and - integer arrays. This will have a memory usage of the - size of `ar1` plus the max-min value of `ar2`. `assume_unique` - has no effect when the 'table' option is used. - * If None, will automatically choose 'table' if - the required memory allocation is less than or equal to - 6 times the sum of the sizes of `ar1` and `ar2`, - otherwise will use 'sort'. This is done to not use - a large amount of memory by default, even though - 'table' may be faster in most cases. If 'table' is chosen, - `assume_unique` will have no effect. - - Returns - ------- - in1d : (M,) ndarray, bool - The values `ar1[in1d]` are in `ar2`. - - See Also - -------- - isin : Version of this function that preserves the - shape of ar1. - - Notes - ----- - `in1d` can be considered as an element-wise function version of the - python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly - equivalent to ``np.array([item in b for item in a])``. - However, this idea fails if `ar2` is a set, or similar (non-sequence) - container: As ``ar2`` is converted to an array, in those cases - ``asarray(ar2)`` is an object array rather than the expected array of - contained values. - - Using ``kind='table'`` tends to be faster than `kind='sort'` if the - following relationship is true: - ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, - but may use greater memory. The default value for `kind` will - be automatically selected based only on memory usage, so one may - manually set ``kind='table'`` if memory constraints can be relaxed. - - Examples - -------- - >>> import numpy as np - >>> test = np.array([0, 1, 2, 5, 0]) - >>> states = [0, 2] - >>> mask = np.in1d(test, states) - >>> mask - array([ True, False, True, False, True]) - >>> test[mask] - array([0, 2, 0]) - >>> mask = np.in1d(test, states, invert=True) - >>> mask - array([False, True, False, True, False]) - >>> test[mask] - array([1, 5]) - """ - - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`in1d` is deprecated. Use `np.isin` instead.", - DeprecationWarning, - stacklevel=2 - ) - - return _in1d(ar1, ar2, assume_unique, invert, kind=kind) - - -def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): +def _isin(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() @@ -1178,7 +1072,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, [ True, False]]) """ element = np.asarray(element) - return _in1d(element, test_elements, assume_unique=assume_unique, + return _isin(element, test_elements, assume_unique=assume_unique, invert=invert, kind=kind).reshape(element.shape) @@ -1261,4 +1155,4 @@ def setdiff1d(ar1, ar2, assume_unique=False): else: ar1 = unique(ar1) ar2 = unique(ar2) - return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] + return ar1[_isin(ar1, ar2, assume_unique=True, invert=True)] diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 6ca3ed8282b6..da687da03dde 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -7,7 +7,7 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np from numpy._typing import ( @@ -20,7 +20,6 @@ from numpy._typing import ( __all__ = [ "ediff1d", - "in1d", "intersect1d", "isin", "setdiff1d", @@ -461,14 +460,3 @@ def isin( *, kind: L["sort", "table"] | None = None, ) -> NDArray[np.bool]: ... - -# -@deprecated("Use 'isin' instead") -def in1d( - element: ArrayLike, - test_elements: ArrayLike, - assume_unique: bool = False, - invert: bool = False, - *, - kind: L["sort", "table"] | None = None, -) -> NDArray[np.bool]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 552e518fe9b9..0a4f649a7d9c 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -222,7 +222,6 @@ from numpy import ( # noqa: F401 i0, iinfo, imag, - in1d, index_exp, indices, inexact, From 6f554c1b25375c1f94f59f042d1321a3b28c3e38 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:23:39 +0200 Subject: [PATCH 0644/1718] DOC: add release note for #29978 --- doc/release/upcoming_changes/29978.expired.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/29978.expired.rst diff --git a/doc/release/upcoming_changes/29978.expired.rst b/doc/release/upcoming_changes/29978.expired.rst new file mode 100644 index 000000000000..e0f4de1d8715 --- /dev/null +++ b/doc/release/upcoming_changes/29978.expired.rst @@ -0,0 +1,4 @@ +Removed ``numpy.in1d`` +---------------------- + +``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. From edc42afc187ae623a8418f8e7afa9c2b05be27f0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:41:15 +0200 Subject: [PATCH 0645/1718] DOC: remove reference to ``numpy.in1d`` --- numpy/ma/extras.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 381cf75f00c3..b7405d3809f8 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1446,14 +1446,13 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Test whether each element of an array is also present in a second array. - The output is always a masked array. See `numpy.in1d` for more details. + The output is always a masked array. We recommend using :func:`isin` instead of `in1d` for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. - numpy.in1d : Equivalent function for ndarrays. Examples -------- From 5f7d84a40164c35f75f590e8d6163a2f9977268c Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:47:50 +0200 Subject: [PATCH 0646/1718] DEP: remove ``ndindex.ndincr`` (deprecated since 1.20) --- numpy/lib/_index_tricks_impl.py | 16 ---------------- numpy/lib/_index_tricks_impl.pyi | 6 +----- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index a0d04ad3285f..2dbb098c2bad 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -695,22 +695,6 @@ def __init__(self, *shape): def __iter__(self): return self - def ndincr(self): - """ - Increment the multi-dimensional index by one. - - This method is for backward compatibility only: do not use. - - .. deprecated:: 1.20.0 - This method has been advised against since numpy 1.8.0, but only - started emitting DeprecationWarning as of this version. - """ - # NumPy 1.20.0, 2020-09-08 - warnings.warn( - "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", - DeprecationWarning, stacklevel=2) - next(self) - def __next__(self): """ Standard iterator method, updates the index and returns the index diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 3c3074b1c1a4..ca8d223aa5d8 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -11,7 +11,7 @@ from typing import ( final, overload, ) -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import ravel_multi_index, unravel_index @@ -101,10 +101,6 @@ class ndindex: def __iter__(self) -> Self: ... def __next__(self) -> _AnyShape: ... - # - @deprecated("Deprecated since 1.20.0.") - def ndincr(self, /) -> None: ... - class nd_grid(Generic[_BoolT_co]): __slots__ = ("sparse",) From bca310b139393f293ddccdcd73889a83aa66022a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:49:33 +0200 Subject: [PATCH 0647/1718] STY: appease ruff --- numpy/lib/_index_tricks_impl.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 2dbb098c2bad..666171f7c2aa 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -1,7 +1,6 @@ import functools import math import sys -import warnings from itertools import product import numpy as np From 048845707c84fd86c3997fbfcff9ead9c3a45c7f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 01:53:07 +0200 Subject: [PATCH 0648/1718] DOC: add release note for #29980 --- doc/release/upcoming_changes/29980.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/29980.expired.rst diff --git a/doc/release/upcoming_changes/29980.expired.rst b/doc/release/upcoming_changes/29980.expired.rst new file mode 100644 index 000000000000..563ba8aa6929 --- /dev/null +++ b/doc/release/upcoming_changes/29980.expired.rst @@ -0,0 +1,5 @@ +Removed ``numpy.ndindex.ndincr()`` +---------------------------------- + +The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now removed; +use ``next(ndindex)`` instead. From bc6ce7ccf3d602b6a8197ef151325e8f88fc255f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 02:23:03 +0200 Subject: [PATCH 0649/1718] TYP: change ``ndenumerate.__new__`` into ``__init__`` --- numpy/lib/_index_tricks_impl.pyi | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 3c3074b1c1a4..908e54f15449 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -61,21 +61,21 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) class ndenumerate(Generic[_ScalarT_co]): @overload - def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... + def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... + def __init__(self: ndenumerate[np.bytes_], arr: bytes | _NestedSequence[bytes]) -> None: ... @overload - def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + def __init__(self: ndenumerate[np.bool], arr: bool | _NestedSequence[bool]) -> None: ... @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + def __init__(self: ndenumerate[np.intp], arr: int | _NestedSequence[int]) -> None: ... @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + def __init__(self: ndenumerate[np.float64], arr: float | _NestedSequence[float]) -> None: ... @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + def __init__(self: ndenumerate[np.complex128], arr: complex | _NestedSequence[complex]) -> None: ... @overload - def __new__(cls, arr: object) -> ndenumerate[Any]: ... + def __init__(self: ndenumerate[Incomplete], arr: object) -> None: ... # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) @overload From 813d3ec3e3d5e8cf195377a5fa1afbe93ec108af Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 02:47:31 +0200 Subject: [PATCH 0650/1718] DOC: remove reference to ``numpy.in1d`` from the reference docs --- doc/source/reference/routines.set.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/reference/routines.set.rst b/doc/source/reference/routines.set.rst index fbb5afdc1b75..47080f96fff8 100644 --- a/doc/source/reference/routines.set.rst +++ b/doc/source/reference/routines.set.rst @@ -19,7 +19,6 @@ Boolean operations .. autosummary:: :toctree: generated/ - in1d intersect1d isin setdiff1d From edc5f47a5cca02c3def39f0977130070af1bbec7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 03:04:05 +0200 Subject: [PATCH 0651/1718] TYP: change ``nditer.__new__`` into ``__init__`` and tighten its signature --- numpy/__init__.pyi | 43 +++++++++++++++++-------- numpy/typing/tests/data/fail/nditer.pyi | 2 +- 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2bf4eb16d1d9..5cfc6ca347a9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -990,7 +990,7 @@ _NDIterFlagsOp: TypeAlias = L[ "updateifcopy", "virtual", "writeonly", - "writemasked" + "writemasked", ] _MemMapModeKind: TypeAlias = L[ @@ -5776,18 +5776,35 @@ class busdaycalendar: @final class nditer: - def __new__( - cls, - op: ArrayLike | Sequence[ArrayLike | None], - flags: Sequence[_NDIterFlagsKind] | None = ..., - op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., - op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., - order: _OrderKACF = ..., - casting: _CastingKind = ..., - op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., - itershape: _ShapeLike | None = ..., - buffersize: SupportsIndex = ..., - ) -> nditer: ... + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + def __enter__(self) -> nditer: ... def __exit__( self, diff --git a/numpy/typing/tests/data/fail/nditer.pyi b/numpy/typing/tests/data/fail/nditer.pyi index cb64061e45fe..fae728da454e 100644 --- a/numpy/typing/tests/data/fail/nditer.pyi +++ b/numpy/typing/tests/data/fail/nditer.pyi @@ -5,4 +5,4 @@ class Test(np.nditer): ... # type: ignore[misc] np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] -np.nditer([0, 1], buffersize=1.0) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] From 21434bbc114f86df2474d17cced0535b3524b8fe Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 03:32:43 +0200 Subject: [PATCH 0652/1718] TYP: minor fixes and improvements in ``record`` and ``recarray`` --- numpy/_core/records.pyi | 42 ++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 200d5310f955..511a6a764829 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,6 +1,6 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false -from _typeshed import StrOrBytesPath +from _typeshed import Incomplete, StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Any, @@ -12,10 +12,10 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer +from numpy import _ByteOrder, _OrderKACF from numpy._typing import ( ArrayLike, DTypeLike, @@ -55,25 +55,31 @@ class _SupportsReadInto(Protocol): ### # exported in `numpy.rec` -class record(np.void): - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload - def __getitem__(self, key: list[str]) -> record: ... + def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): - __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" + @overload def __new__( subtype, shape: _ShapeLike, dtype: None = None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, *, @@ -89,7 +95,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): subtype, shape: _ShapeLike, dtype: DTypeLike | None, - buf: _SupportsBuffer | None = None, + buf: Buffer | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, formats: None = None, @@ -98,16 +104,18 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): byteorder: None = None, aligned: Literal[False] = False, order: _OrderKACF = "C", - ) -> _RecArray[Any]: ... - def __array_finalize__(self, /, obj: object) -> None: ... + ) -> _RecArray[Incomplete]: ... + def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + def __array_finalize__(self, /, obj: object) -> None: ... + # @overload def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, /, attr: int | str, val: None = None) -> Any: ... + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... # exported in `numpy.rec` class format_parser: @@ -174,7 +182,7 @@ def fromrecords( # exported in `numpy.rec` @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: DTypeLike | None, shape: _ShapeLike | None = None, offset: int = 0, @@ -186,7 +194,7 @@ def fromstring( ) -> _RecArray[record]: ... @overload def fromstring( - datastring: _SupportsBuffer, + datastring: Buffer, dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, From e43f070e1e64ea724f1ac83ebcf9808efd8e23cd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:04:33 +0200 Subject: [PATCH 0653/1718] DEP: remove the ``fix_imports`` parameter from ``save()`` --- numpy/_core/tests/test_deprecations.py | 25 +------------------------ numpy/lib/_npyio_impl.py | 19 +++---------------- numpy/lib/_npyio_impl.pyi | 13 +------------ numpy/typing/tests/data/fail/npyio.pyi | 4 ++-- 4 files changed, 7 insertions(+), 54 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..d7450fcc4101 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -11,7 +11,7 @@ import numpy as np import numpy._core._struct_ufunc_tests as struct_ufunc from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 -from numpy.testing import assert_raises, temppath +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -392,29 +392,6 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestDeprecatedSaveFixImports(_DeprecationTestCase): - # Deprecated in Numpy 2.1, 2024-05 - message = "The 'fix_imports' flag is deprecated and has no effect." - - def test_deprecated(self): - with temppath(suffix='.npy') as path: - sample_args = (path, np.array(np.zeros((1024, 10)))) - self.assert_not_deprecated(np.save, args=sample_args) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'fix_imports': False}) - for allow_pickle in [True, False]: - self.assert_not_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': True}) - self.assert_deprecated(np.save, args=sample_args, - kwargs={'allow_pickle': allow_pickle, - 'fix_imports': False}) - - class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 def test_deprecated(self): diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index e9a8509fc685..72e746f19eba 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -497,12 +497,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, f"Failed to interpret file {file!r} as a pickle") from e -def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): +def _save_dispatcher(file, arr, allow_pickle=None): return (arr,) @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): +def save(file, arr, allow_pickle=True): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -523,12 +523,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): require libraries that are not available, and not all pickled data is compatible between different versions of Python). Default: True - fix_imports : bool, optional - The `fix_imports` flag is deprecated and has no effect. - - .. deprecated:: 2.1 - This flag is ignored since NumPy 1.17 and was only needed to - support loading in Python 2 some files written in Python 3. See Also -------- @@ -565,12 +559,6 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): >>> print(a, b) # [1 2] [1 3] """ - if fix_imports is not np._NoValue: - # Deprecated 2024-05-16, NumPy 2.1 - warnings.warn( - "The 'fix_imports' flag is deprecated and has no effect. " - "(Deprecated in NumPy 2.1)", - DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: @@ -581,8 +569,7 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): with file_ctx as fid: arr = np.asanyarray(arr) - format.write_array(fid, arr, allow_pickle=allow_pickle, - pickle_kwargs={'fix_imports': fix_imports}) + format.write_array(fid, arr, allow_pickle=allow_pickle) def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 8a4dfa27ed9b..a859118329f1 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -21,7 +21,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeVar, deprecated, override +from typing_extensions import TypeVar, override import numpy as np from numpy._core.multiarray import packbits, unpackbits @@ -105,19 +105,8 @@ def load( max_header_size: int = 10_000, ) -> Any: ... -@overload def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... -@overload -@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... - -# def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... - -# def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index d57ebe3a2e3e..e20be3a2a247 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -12,8 +12,8 @@ AR_i8: npt.NDArray[np.int64] np.load(str_file) # type: ignore[arg-type] -np.save(bytes_path, AR_i8) # type: ignore[call-overload] -np.save(str_path, AR_i8, fix_imports=True) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] np.savez(bytes_path, AR_i8) # type: ignore[arg-type] From 11629d6623c9856d670ff40d712040145bc11c8e Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:27:43 +0200 Subject: [PATCH 0654/1718] DOC: add release note for #29984 --- doc/release/upcoming_changes/29984.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/29984.expired.rst diff --git a/doc/release/upcoming_changes/29984.expired.rst b/doc/release/upcoming_changes/29984.expired.rst new file mode 100644 index 000000000000..bcce0dedd4a7 --- /dev/null +++ b/doc/release/upcoming_changes/29984.expired.rst @@ -0,0 +1,5 @@ +Removed ``fix_imports`` parameter from ``numpy.save`` +----------------------------------------------------- + +The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. +This flag has been ignored since NumPy 1.17 and was only needed to support loading files in Python 2 that were written in Python 3. From 472935a944b81fc595c823c7be8ad00279d00113 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 04:51:33 +0200 Subject: [PATCH 0655/1718] MAINT: Remove ``_core.MachAr`` remnants --- numpy/_core/__init__.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 7b53022c3bc3..18b250f9972b 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -187,18 +187,6 @@ def _DType_reduce(DType): return _DType_reconstruct, (scalar_type,) -def __getattr__(name): - # Deprecated 2022-11-22, NumPy 1.25. - if name == "MachAr": - import warnings - warnings.warn( - "The `np._core.MachAr` is considered private API (NumPy 1.24)", - DeprecationWarning, stacklevel=2, - ) - return _machar.MachAr - raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") - - import copyreg copyreg.pickle(ufunc, _ufunc_reduce) From eff460aeb32de60354713256370ad340bd94448d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:20:36 +0200 Subject: [PATCH 0656/1718] DEP: Remove ``ndarray.ctypes.get_*`` methods (deprecated since 1.21) --- numpy/_core/_internal.py | 40 -------------------- numpy/_core/_internal.pyi | 12 +----- numpy/_core/tests/test_deprecations.py | 13 +------ numpy/typing/tests/data/pass/ndarray_misc.py | 13 ------- 4 files changed, 2 insertions(+), 76 deletions(-) diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index e00e1b2c1f60..490b3407997a 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -365,46 +365,6 @@ def _as_parameter_(self): """ return self.data_as(ctypes.c_void_p) - # Numpy 1.21.0, 2021-05-18 - - def get_data(self): - """Deprecated getter for the `_ctypes.data` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_data" is deprecated. Use "data" instead', - DeprecationWarning, stacklevel=2) - return self.data - - def get_shape(self): - """Deprecated getter for the `_ctypes.shape` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_shape" is deprecated. Use "shape" instead', - DeprecationWarning, stacklevel=2) - return self.shape - - def get_strides(self): - """Deprecated getter for the `_ctypes.strides` property. - - .. deprecated:: 1.21 - """ - warnings.warn('"get_strides" is deprecated. Use "strides" instead', - DeprecationWarning, stacklevel=2) - return self.strides - - def get_as_parameter(self): - """Deprecated getter for the `_ctypes._as_parameter_` property. - - .. deprecated:: 1.21 - """ - warnings.warn( - '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', - DeprecationWarning, stacklevel=2, - ) - return self._as_parameter_ - def _newnames(datatype, order): """ diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 04c26ca284db..6e37022ffd56 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -2,7 +2,7 @@ import ctypes as ct import re from collections.abc import Callable, Iterable from typing import Any, Final, Generic, Self, overload -from typing_extensions import TypeVar, deprecated +from typing_extensions import TypeVar import numpy as np import numpy.typing as npt @@ -47,16 +47,6 @@ class _ctypes(Generic[_PT_co]): def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - # - @deprecated('"get_data" is deprecated. Use "data" instead') - def get_data(self, /) -> _PT_co: ... - @deprecated('"get_shape" is deprecated. Use "shape" instead') - def get_shape(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_strides" is deprecated. Use "strides" instead') - def get_strides(self, /) -> ct.Array[c_intp]: ... - @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') - def get_as_parameter(self, /) -> ct.c_void_p: ... - class dummy_ctype(Generic[_T_co]): _cls: type[_T_co] diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27e98a563930..e2bd1dc3b5d3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -197,20 +197,9 @@ def test_not_deprecated(self): class TestCtypesGetter(_DeprecationTestCase): - # Deprecated 2021-05-18, Numpy 1.21.0 - warning_cls = DeprecationWarning ctypes = np.array([1]).ctypes - @pytest.mark.parametrize( - "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] - ) - def test_deprecated(self, name: str) -> None: - func = getattr(self.ctypes, name) - self.assert_deprecated(func) - - @pytest.mark.parametrize( - "name", ["data", "shape", "strides", "_as_parameter_"] - ) + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) def test_not_deprecated(self, name: str) -> None: self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 4d4e1f872763..a17082aeba15 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -11,8 +11,6 @@ import operator from typing import Any, cast -import pytest - import numpy as np import numpy.typing as npt @@ -189,14 +187,3 @@ class IntSubClass(npt.NDArray[np.intp]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] - -# deprecated - -with pytest.deprecated_call(): - ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -with pytest.deprecated_call(): - ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] From aa1ae516ff084b2d72205b375fd6224d9a1dc665 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:35:14 +0200 Subject: [PATCH 0657/1718] DOC: add release note for #29986 --- doc/release/upcoming_changes/29986.expired.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 doc/release/upcoming_changes/29986.expired.rst diff --git a/doc/release/upcoming_changes/29986.expired.rst b/doc/release/upcoming_changes/29986.expired.rst new file mode 100644 index 000000000000..2a6b44380dd4 --- /dev/null +++ b/doc/release/upcoming_changes/29986.expired.rst @@ -0,0 +1,10 @@ +Removal of four undocumented ``ndarray.ctypes`` methods +------------------------------------------------------- +Four undocumented methods of the ``ndarray.ctypes`` object have been removed: + +* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) + +These methods have been deprecated since NumPy 1.21. From b72ff69c9c7d200622837f07fd41289fefd50f9a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 05:51:25 +0200 Subject: [PATCH 0658/1718] MAINT: remove remnants of ``linalg.linalg`` and ``fft.helper`` --- numpy/fft/__init__.py | 4 +--- numpy/fft/helper.py | 1 - numpy/fft/helper.pyi | 1 - numpy/fft/meson.build | 2 -- numpy/linalg/__init__.py | 5 +---- numpy/linalg/linalg.py | 1 - numpy/linalg/linalg.pyi | 1 - numpy/linalg/meson.build | 2 -- numpy/tests/test_public_api.py | 2 -- 9 files changed, 2 insertions(+), 17 deletions(-) delete mode 100644 numpy/fft/helper.py delete mode 100644 numpy/fft/helper.pyi delete mode 100644 numpy/linalg/linalg.py delete mode 100644 numpy/linalg/linalg.pyi diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 55f7320f653f..2de162c5ec71 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -200,9 +200,7 @@ """ -# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should -# be deleted once downstream libraries move to `numpy.fft`. -from . import _helper, _pocketfft, helper +from . import _helper, _pocketfft from ._helper import * from ._pocketfft import * diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/fft/helper.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/fft/helper.pyi +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index e18949af5e31..a5b2413ebb90 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -23,8 +23,6 @@ py.install_sources( '_pocketfft.pyi', '_helper.py', '_helper.pyi', - 'helper.py', - 'helper.pyi', ], subdir: 'numpy/fft' ) diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index fa230ece580c..cc482cfc9579 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -84,10 +84,7 @@ """ # To get sub-modules -from . import ( - _linalg, - linalg, # deprecated in NumPy 2.0 -) +from . import _linalg from ._linalg import * __all__ = _linalg.__all__.copy() # noqa: PLE0605 diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/linalg/linalg.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi deleted file mode 100644 index 8b137891791f..000000000000 --- a/numpy/linalg/linalg.pyi +++ /dev/null @@ -1 +0,0 @@ - diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index e2f8136208d6..1d3297286317 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -47,8 +47,6 @@ py.install_sources( '_linalg.pyi', '_umath_linalg.pyi', 'lapack_lite.pyi', - 'linalg.py', - 'linalg.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 6a36358c3a06..f6fa8611e181 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -190,10 +190,8 @@ def test_NPY_NO_EXPORT(): "f2py.rules", "f2py.symbolic", "f2py.use_rules", - "fft.helper", "lib.user_array", # note: not in np.lib, but probably should just be deleted "linalg.lapack_lite", - "linalg.linalg", "ma.core", "ma.testutils", "matlib", From 35581560d50887c47994d631b7505f4494e50328 Mon Sep 17 00:00:00 2001 From: Aaron Kollasch Date: Wed, 15 Oct 2025 23:39:56 -0400 Subject: [PATCH 0659/1718] BUG: Fix np.strings.slice if start > stop If the slice start > stop, then `slice_strided_loop()` attempts to allocate a string of negative size, causing a MemoryError. To replicate: ``` >>> a = np.array(['test-strings'], dtype="T") >>> np.strings.slice(a, 6, 5) Traceback (most recent call last): File "", line 1, in File "lib/python3.12/site-packages/numpy/_core/strings.py", line 1813, in slice return _slice(a, start, stop, step) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MemoryError: Failed to allocate string in slice ``` --- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 1 + numpy/_core/tests/test_strings.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index e00612a369f0..ebc10586bf8b 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -2271,6 +2271,7 @@ slice_strided_loop(PyArrayMethod_Context *context, char *const data[], ? codepoint_offsets[stop] : (unsigned char *)is.buf + is.size); npy_intp outsize = stop_bounded - start_bounded; + outsize = outsize < 0 ? 0 : outsize; if (load_new_string(ops, &os, outsize, oallocator, "slice") < 0) { goto fail; diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e5c3bb87c773..24b9af9c160c 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -992,7 +992,8 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): (6,), # test index past the end (6, None), (6, None, -1), - (6, 7), # test stop index past the end + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index (-1,), (-1, None), (-1, None, -1), @@ -1016,8 +1017,16 @@ def test_rpartition(self, buf, sep, res1, res2, res3, dt): (None, None, -1), ([0, 6], [-1, 0], [2, -1]), ]) - def test_slice(self, args, dt): - buf = np.array(["hello", "world"], dtype=dt) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "你好世界" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) act = np.strings.slice(buf, *args) bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) res = np.array([s[slice(*arg)] From f338a83f97e22a4a8edbcc5a422f1eccdfb41ab0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 07:56:06 +0200 Subject: [PATCH 0660/1718] TYP: some minor fixes for the constants in ``_core.multiarray`` --- numpy/_core/multiarray.pyi | 49 ++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index b90453d66d02..dd47e8e872d7 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -365,37 +365,40 @@ NEEDS_INIT: Final = 8 NEEDS_PYAPI: Final = 16 USE_GETITEM: Final = 32 USE_SETITEM: Final = 64 -DATETIMEUNITS: Final[CapsuleType] -_ARRAY_API: Final[CapsuleType] -_flagdict: Final[dict[str, int]] -_monotonicity: Final[Callable[..., object]] -_place: Final[Callable[..., object]] -_reconstruct: Final[Callable[..., object]] -_vec_string: Final[Callable[..., object]] -correlate2: Final[Callable[..., object]] -dragon4_positional: Final[Callable[..., object]] -dragon4_scientific: Final[Callable[..., object]] -interp_complex: Final[Callable[..., object]] -set_datetimeparse_function: Final[Callable[..., object]] +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... + def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... -typeinfo: Final[dict[str, np.dtype[np.generic]]] + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) -BUFSIZE: L[8192] -CLIP: L[0] -WRAP: L[1] -RAISE: L[2] -MAXDIMS: L[32] -MAY_SHARE_BOUNDS: L[0] -MAY_SHARE_EXACT: L[-1] -tracemalloc_domain: L[389047] +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] -empty: Final[_ConstructorEmpty] +zeros: Final[_ConstructorEmpty] = ... +empty: Final[_ConstructorEmpty] = ... @overload def empty_like( From 18bc40b5e6ea9a8e4c31fc1be06401fc4fd46b3e Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 16 Oct 2025 09:51:03 +0300 Subject: [PATCH 0661/1718] Doc: tweak docstring [skip actions][skip azp][skip cirrus] --- doc/source/reference/arrays.datetime.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index ac402c973fd8..94468c3a7363 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -59,7 +59,8 @@ you can specify a different unit (e.g., 'M', 'D', 'h') to truncate the result to that precision. Units finer than seconds (such as 'ms' or 'ns') are supported but will show fractional parts as zeros, effectively truncating to whole seconds. The string "today" is also supported and returns the current UTC -date with day precision. +date with day precision. It also supports the same precision specifiers +as ``now``. .. admonition:: Example From f13b468202f7bbb0c0c430f05a1876dba49e52fb Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 16 Oct 2025 10:08:37 +0300 Subject: [PATCH 0662/1718] remove misleading f(x) [skip actions][skip azp][skip cirrus] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Melissa Weber Mendonça --- numpy/lib/_polynomial_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index d919373ea1f4..ee5b8593a9db 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -457,7 +457,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): A summary of the differences can be found in the :doc:`transition guide `. - Fit a polynomial ``f(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + Fit a polynomial ``p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error in the order `deg`, `deg-1`, ... `0`. From 95cdc307c91b53b33e48cffb66184209bfe7eff5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 09:45:38 +0200 Subject: [PATCH 0663/1718] DEP: remove the ``newshape`` parameter from ``reshape()`` --- numpy/_core/fromnumeric.py | 25 +++---------------------- numpy/_core/fromnumeric.pyi | 16 ---------------- numpy/_core/tests/test_numeric.py | 9 --------- 3 files changed, 3 insertions(+), 47 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 41f899c54815..6cbd1385d459 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -201,13 +201,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, - copy=None): +def _reshape_dispatcher(a, /, shape=None, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): +def reshape(a, /, shape=None, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -233,10 +232,6 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises @@ -300,23 +295,9 @@ def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): [3, 4], [5, 6]]) """ - if newshape is None and shape is None: + if shape is None: raise TypeError( "reshape() missing 1 required positional argument: 'shape'") - if newshape is not None: - if shape is not None: - raise TypeError( - "You cannot specify 'newshape' and 'shape' arguments " - "at the same time.") - # Deprecated in NumPy 2.1, 2024-04-18 - warnings.warn( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", - DeprecationWarning, - stacklevel=2, - ) - shape = newshape if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index dc8a040e30e8..d0e0a2a1f67c 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -14,7 +14,6 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import deprecated import numpy as np from numpy import ( @@ -240,21 +239,6 @@ def reshape( *, copy: bool | None = None, ) -> NDArray[Any]: ... -@overload -@deprecated( - "`newshape` keyword argument is deprecated, " - "use `shape=...` or pass shape positionally instead. " - "(deprecated in NumPy 2.1)", -) -def reshape( - a: ArrayLike, - /, - shape: None = None, - order: _OrderACF = "C", - *, - newshape: _ShapeLike, - copy: bool | None = None, -) -> NDArray[Any]: ... @overload def choose( diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 751c66584275..127f08527f1e 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -183,12 +183,6 @@ def test_reshape_shape_arg(self): shape = (3, 4) expected = arr.reshape(shape) - with pytest.raises( - TypeError, - match="You cannot specify 'newshape' and 'shape' " - "arguments at the same time." - ): - np.reshape(arr, shape=shape, newshape=shape) with pytest.raises( TypeError, match=r"reshape\(\) missing 1 required positional " @@ -201,9 +195,6 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) - with pytest.warns(DeprecationWarning): - actual = np.reshape(arr, newshape=shape) - assert_equal(actual, expected) def test_reshape_copy_arg(self): arr = np.arange(24).reshape(2, 3, 4) From a0cf4d338ed1f41f4deb8530b3b23f45d6ac1972 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 09:50:56 +0200 Subject: [PATCH 0664/1718] DOC: add release note for #29994 --- doc/release/upcoming_changes/29994.expired.rst | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 doc/release/upcoming_changes/29994.expired.rst diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst new file mode 100644 index 000000000000..0dcc083178ab --- /dev/null +++ b/doc/release/upcoming_changes/29994.expired.rst @@ -0,0 +1,7 @@ +Remove ``newshape`` parameter from ``numpy.reshape`` +---------------------------------------------------- + +The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been +removed from ``numpy.reshape``. + +Use the ``shape`` parameter instead. From 16a043ceec52b7e19c6ea77762b6204a6ca81232 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 16 Oct 2025 10:56:47 +0200 Subject: [PATCH 0665/1718] DOC: tweak release note Co-authored-by: Sebastian Berg --- doc/release/upcoming_changes/29994.expired.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst index 0dcc083178ab..11331da6e810 100644 --- a/doc/release/upcoming_changes/29994.expired.rst +++ b/doc/release/upcoming_changes/29994.expired.rst @@ -2,6 +2,5 @@ Remove ``newshape`` parameter from ``numpy.reshape`` ---------------------------------------------------- The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been -removed from ``numpy.reshape``. - -Use the ``shape`` parameter instead. +removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` +on newer NumPy versions. From 49fbb3119064105df967691858897a15ca8c8286 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 16 Oct 2025 17:32:00 +0800 Subject: [PATCH 0666/1718] DOC: update SIMD build options to cover riscv64 (#29992) Signed-off-by: Wang Yang --- doc/source/reference/simd/build-options.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 6cf27510103c..229a9ebbae0a 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -239,6 +239,17 @@ On IBM/ZSYSTEM(S390X) * - ``VXE2`` - ``VX`` ``VXE`` +On RISCV64 +~~~~~~~~~~~~~~~~~~~~~ +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + * - ``RVV`` + - + .. _opt-special-options: Special Options @@ -288,6 +299,8 @@ Enables the minimum CPU features for each architecture: - ``NEON`` ``NEON_FP16`` ``NEON_VFPV4`` ``ASIMD`` * - IBM/ZSYSTEM(S390X) - ``NONE`` + * - riscv64 + - ``NONE`` ``MAX`` From 1076e9da17bff2b6bbddd7baa13f2d7dda4057fa Mon Sep 17 00:00:00 2001 From: Sandeep Gupta Date: Thu, 16 Oct 2025 16:31:59 +0530 Subject: [PATCH 0667/1718] ci: run NumPy tests on POWER10 runner for full SIMD coverage (#29934) --- .github/workflows/linux-ppc64le.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index f54b5dc74060..a593bcd774a2 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -20,7 +20,7 @@ jobs: # It requires a native ppc64le GHA runner, which is not available on forks. # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' - runs-on: ubuntu-24.04-ppc64le + runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 From a79e1541d80b94be2e2432f9027b1bf5e507be1e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 16 Oct 2025 11:04:39 -0600 Subject: [PATCH 0668/1718] MAINT: Update main after the NumPy 2.3.4 release. - Add 2.3.4-changelog.rst - Add 2.3.4-notes.rst - Update release.rst - Update .mailmap - Update RELEASE_WALKTHROUGH.rst [skip azp] [skip cirrus] [skip actions] --- .mailmap | 2 + doc/RELEASE_WALKTHROUGH.rst | 95 +++++++++++++++++------------- doc/changelog/2.3.4-changelog.rst | 61 +++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.4-notes.rst | 83 ++++++++++++++++++++++++++ 5 files changed, 202 insertions(+), 40 deletions(-) create mode 100644 doc/changelog/2.3.4-changelog.rst create mode 100644 doc/source/release/2.3.4-notes.rst diff --git a/.mailmap b/.mailmap index e3e3bb56ecdf..ddadaf22a62f 100644 --- a/.mailmap +++ b/.mailmap @@ -214,6 +214,7 @@ Chris Kerr Chris Navarro Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris +Christian Barbia Christian Clauss Christopher Dahlin Christopher Hanley @@ -655,6 +656,7 @@ Saransh Chopra Saullo Giovani Saurabh Mehta Sayantika Banik +Sayed Awad Schrijvers Luc Sean Cheah Sean Cheah <67928790+thalassemia@users.noreply.github.com> diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 82f8d2c9e9f0..ac306c682148 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -47,6 +47,13 @@ Update 2.4.0 milestones Look at the issues/prs with 2.4.0 milestones and either push them off to a later version, or maybe remove the milestone. You may need to add a milestone. +Check the numpy-release repo +---------------------------- + +The things to check are the ``cibuildwheel`` version in +``.github/workflows/wheels.yml`` and the ``openblas`` versions in +``openblas_requirements.txt``. + Make a release PR ================= @@ -121,6 +128,20 @@ may also be appended, but not for the initial release as it is too long. Check previous release notes to see how this is done. +Test the wheel builds +--------------------- + +After the release PR is merged, go to the ``numpy-release`` repository in your +browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch +using the ``Run workflow`` button in ``actions``. Make sure that the upload +target is ``none`` in the *evironment* dropdown. The wheels take about 1 hour +to build, but sometimes GitHub is very slow. If some wheel builds fail for +unrelated reasons, you can re-run them as normal in the GitHub Actions UI with +``re-run failed``. After the wheels are built review the results, checking that +the number of artifacts are correct, the wheel names are as expected, etc. If +everything looks good, proceed with the release. + + Release walkthrough =================== @@ -131,8 +152,8 @@ cloned it locally. You can also edit ``.git/config`` and add ``upstream`` if it isn't already present. -1. Prepare the release commit ------------------------------ +1. Tag the release commit +------------------------- Checkout the branch for the release, make sure it is up to date, and clean the repository:: @@ -161,47 +182,36 @@ If you need to delete the tag due to error:: 2. Build wheels and sdist ------------------------- -Create a ``maintenance/2.4.x`` branch in the ``numpy-release`` repository, -and open a PR changing the ``SOURCE_REF_TO_BUILD`` identifier at the top of -``.github/workflows/wheels.yml`` to ``v2.4.0``. That will do a full set of -wheel builds on the PR, if everything looks good merge the PR. - -All wheels are currently built in that repository on GitHub Actions, they take -about 1 hour to build. - -If you wish to manually trigger a wheel build, you can do so: in your browser, -go to `numpy-release/actions/workflows/wheels.yml `__ -and click on the "Run workflow" button, then choose the tag to build. If some -wheel builds fail for unrelated reasons, you can re-run them as normal -in the GitHub Actions UI with "re-run failed". - -Once you are ready to publish a release to PyPI, use that same "Run workflow" -button and choose ``pypi`` in the *environment* dropdown. All wheels and the -sdist will build and be ready to release to PyPI after manual inspection that -everything passed. E.g., the number of artifacts is correct, and the wheel -filenames and sizes look as expected. If desired, you can also download an -artifact for local unzipping and inspection. You will get an email notification -as well with a "Review pending deployments" link. Once you're ready, press the -button to start the uploads to PyPI, which will complete the release. +Go to the ``numpy-release`` repository in your browser and manually trigger the +workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button +in ``actions``. Make sure that the upload target is ``pypi`` in the +*evironment* dropdown. the wheels take about 1 hour to build, but sometimes +GitHub is very slow. If some wheel builds fail for unrelated reasons, you can +re-run them as normal in the GitHub Actions UI with ``re-run failed``. After +the wheels are built review the results, checking that the number of artifacts +are correct, the wheel names are as expected, etc. If everything looks good +trigger the upload. 3. Upload files to GitHub Releases ---------------------------------- -Go to ``_, there should be a ``v2.4.0 -tag``, click on it and hit the edit button for that tag and update the title to +Go to ``_, there should be a ``v2.4.0`` +tag, click on it and hit the edit button for that tag and update the title to "v2.4.0 ()". There are two ways to add files, using an editable text -window and as binary uploads. +window and as binary uploads. The text window needs markdown, so translate the +release notes from rst to md:: + + $ python tools/write_release.py 2.4.0 -Start by running ``spin notes 2.4.0`` and then edit the ``release/README.md`` -that is translated from the rst version using pandoc. Things that will need -fixing: PR lines from the changelog, if included, are wrapped and need -unwrapping, links should be changed to monospaced text. Then copy the contents -to the clipboard and paste them into the text window. It may take several tries -to get it look right. Then +this will create a ``release/README.md`` file that you can edit. Check the +result to see that it looks correct. Things that may need fixing: wrapped lines +that need unwrapping and links that should be changed to monospaced text. Then +copy the contents to the clipboard and paste them into the text window. It may +take several tries to get it look right. Then -- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI upload it to GitHub as - a binary file. +- Download the sdist (``numpy-2.4.0.tar.gz``) from PyPI and upload it to GitHub + as a binary file. You cannot do this using pip. - Upload ``release/README.rst`` as a binary file. - Upload ``doc/changelog/2.4.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. @@ -271,14 +281,19 @@ notes will be a skeleton and have little content:: $ gvim doc/source/release/2.4.1-notes.rst $ git add doc/source/release/2.4.1-notes.rst -Add new release notes to the documentation release list. Then update the -``version`` in ``pyproject.toml``:: +Add a link to the new release notes:: + + $ gvim doc/source/release.rst + +Update the ``version`` in ``pyproject.toml``:: $ gvim pyproject.toml -Commit the result:: +Commit the result, edit the commit message, note the files in the commit, and +add a line ``[skip azp] [skip cirrus] [skip actions]``, then push:: $ git commit -a -m"MAINT: Prepare 2.4.x for further development" + $ git rebase -i HEAD^ $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. @@ -300,7 +315,7 @@ This assumes that you have forked ``_:: - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. - Edit the newsHeader and date fields at the top of news.md -- Also edit the butttonText on line 14 in content/en/config.yaml +- Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: @@ -316,7 +331,7 @@ Go to GitHub and make a PR. The release should be announced on the numpy-discussion and python-announce-list mailing lists. Look at previous announcements for the basic template. The contributor and PR lists are the same as generated for the -release notes above. If you crosspost, make sure that python-announce-list is +release notes above. If you cross-post, make sure that python-announce-list is BCC so that replies will not be sent to that list. diff --git a/doc/changelog/2.3.4-changelog.rst b/doc/changelog/2.3.4-changelog.rst new file mode 100644 index 000000000000..f94b46a07573 --- /dev/null +++ b/doc/changelog/2.3.4-changelog.rst @@ -0,0 +1,61 @@ + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + diff --git a/doc/source/release.rst b/doc/source/release.rst index 72ddab818a77..6f1c1a22f11d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.4 2.3.3 2.3.2 2.3.1 diff --git a/doc/source/release/2.3.4-notes.rst b/doc/source/release/2.3.4-notes.rst new file mode 100644 index 000000000000..6ba7c06b7514 --- /dev/null +++ b/doc/source/release/2.3.4-notes.rst @@ -0,0 +1,83 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.3.4 Release Notes +========================== + +The NumPy 2.3.4 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. This +release is based on the Python 3.14.0 final. + + +Changes +======= + +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC and +``setuptools``. Please note that using these static libraries is discouraged +and for existing projects using it, it's best to use it with a matching +compiler toolchain, which is ``clang-cl`` on Windows on Arm. + +(`gh-29750 `__) + + +Contributors +============ + +A total of 17 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* Charles Harris +* Christian Barbia + +* Evgeni Burovski +* Joren Hammudoglu +* Maaz + +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Riku Sakamoto + +* Sandeep Gupta + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Warren Weckesser +* dependabot[bot] + +Pull requests merged +==================== + +A total of 30 pull requests were merged for this release. + +* `#29725 `__: MAINT: Prepare 2.3.x for further development +* `#29781 `__: MAINT: Pin some upstream dependences +* `#29782 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29783 `__: BUG: Include python-including headers first (#29281) +* `#29784 `__: TYP: fix np.number and np.\*integer method declaration +* `#29785 `__: TYP: mypy 1.18.1 +* `#29788 `__: TYP: replace scalar type __init__ with __new__ +* `#29790 `__: BUG: Fix ``dtype`` refcount in ``__array__`` (#29715) +* `#29791 `__: TYP: fix method declarations in floating, timedelta64, and datetime64Backport +* `#29792 `__: MAINT: delete unused variables in unary logical dispatch +* `#29797 `__: BUG: Fix pocketfft umath strides for AIX compatibility (#29768) +* `#29798 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29799 `__: BUG: Fix assert in nditer buffer setup +* `#29800 `__: BUG: Stable ScalarType ordering +* `#29838 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29839 `__: BUG: linalg: emit a MemoryError on a malloc failure (#29811) +* `#29840 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29864 `__: CI: Fix loongarch64 CI (#29856) +* `#29865 `__: TYP: Various typing fixes +* `#29910 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC (#29908) +* `#29911 `__: TYP: add missing ``__slots__`` (#29901) +* `#29913 `__: TYP: wrong argument defaults in ``testing._private`` (#29902) +* `#29920 `__: BUG: avoid segmentation fault in string_expandtabs_length_promoter +* `#29921 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types... +* `#29922 `__: TYP: minor fixes related to ``errstate`` (#29914) +* `#29923 `__: TST: use requirements/test_requirements across CI (#29919) +* `#29926 `__: BUG: fix negative samples generated by Wald distribution (#29609) +* `#29940 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.1 +* `#29949 `__: STY: rename @classmethod arg to cls +* `#29950 `__: MAINT: Simplify string arena growth strategy (#29885) + From 72c4b8c9d0fc252806b50d894cd834383c638764 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Thu, 16 Oct 2025 14:14:34 -0400 Subject: [PATCH 0669/1718] TST: Mark thread-unsafe tests (#29816) --- numpy/_core/tests/test_arrayprint.py | 4 ++++ numpy/_core/tests/test_cpu_features.py | 1 + numpy/_core/tests/test_custom_dtypes.py | 6 +++++ numpy/_core/tests/test_deprecations.py | 3 +++ numpy/_core/tests/test_dtype.py | 8 +++++++ numpy/_core/tests/test_finfo.py | 3 +++ numpy/_core/tests/test_mem_policy.py | 1 + numpy/_core/tests/test_memmap.py | 1 + numpy/_core/tests/test_multiarray.py | 6 +++++ numpy/_core/tests/test_multithreading.py | 3 +++ numpy/_core/tests/test_nditer.py | 2 ++ numpy/_core/tests/test_print.py | 2 ++ numpy/_core/tests/test_regression.py | 1 + numpy/_core/tests/test_scalarmath.py | 6 +++++ numpy/_core/tests/test_shape_base.py | 1 + numpy/_core/tests/test_strings.py | 2 ++ numpy/_core/tests/test_ufunc.py | 2 ++ numpy/_core/tests/test_umath.py | 1 + numpy/conftest.py | 23 ++++++++++++++++++++ numpy/lib/tests/test__datasource.py | 6 +++++ numpy/lib/tests/test__iotools.py | 3 +++ numpy/lib/tests/test_format.py | 1 + numpy/lib/tests/test_function_base.py | 7 ++++++ numpy/lib/tests/test_io.py | 3 +++ numpy/lib/tests/test_nanfunctions.py | 1 + numpy/lib/tests/test_recfunctions.py | 3 +++ numpy/ma/tests/test_core.py | 3 +++ numpy/matrixlib/tests/test_matrix_linalg.py | 5 +++++ numpy/polynomial/tests/test_printing.py | 2 ++ numpy/random/tests/test_direct.py | 3 +++ numpy/random/tests/test_extending.py | 4 ++++ numpy/random/tests/test_generator_mt19937.py | 1 + numpy/random/tests/test_randomstate.py | 5 +++++ numpy/testing/tests/test_utils.py | 15 +++++++++++++ numpy/tests/test_ctypeslib.py | 6 +++++ numpy/tests/test_numpy_config.py | 1 + numpy/tests/test_reloading.py | 1 + numpy/typing/tests/test_isfile.py | 6 +++++ 38 files changed, 152 insertions(+) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index a054f408e954..04969cb74a4e 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -533,6 +533,9 @@ def test_nested_array_repr(self): ) @given(hynp.from_dtype(np.dtype("U"))) + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" + ) def test_any_text(self, text): # This test checks that, given any value that can be represented in an # array of dtype("U") (i.e. unicode string), ... @@ -546,6 +549,7 @@ def test_any_text(self, text): assert_equal(result, expected_repr) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_refcount(self): # make sure we do not hold references to the array due to a recursive # closure (gh-10620) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index ebf5b49fc357..818c1fb204a4 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -124,6 +124,7 @@ def load_flags_auxv(self): " therefore this test class cannot be properly executed." ), ) +@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index f03e35dca9f2..2acb4adf4c7c 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -327,6 +327,9 @@ def test_creation_class(self): assert np.zeros(3, dtype=SF).dtype == SF(1.) assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) def test_np_save_load(self): # this monkeypatch is needed because pickle # uses the repr of a type to reconstruct it @@ -370,6 +373,9 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) def test_type_pickle(): # can't actually unpickle, but we can pickle (if in namespace) import pickle diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index ca4463ece1f7..cb73d455ca8f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -458,6 +458,9 @@ def assign_to_index(): self.assert_deprecated(assign_to_index) +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) class TestWarningUtilityDeprecations(_DeprecationTestCase): # Deprecation in NumPy 2.4, 2025-08 message = r"NumPy warning suppression and assertion utilities are deprecated." diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 175680451f3e..e6c80d8d4212 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1316,6 +1316,9 @@ def test_object_flag_not_inherited(self): @pytest.mark.slow @hypothesis.given(dtype=hynp.nested_dtypes()) + @pytest.mark.thread_unsafe( + reason="hynp.nested_dtypes thread unsafe (HypothesisWorks/hypothesis#4562)" + ) def test_make_canonical_hypothesis(self, dtype): canonical = np.result_type(dtype) self.check_canonical(dtype, canonical) @@ -1324,6 +1327,9 @@ def test_make_canonical_hypothesis(self, dtype): assert np.can_cast(two_arg_result, canonical, casting="no") @pytest.mark.slow + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" + ) @hypothesis.given( dtype=hypothesis.extra.numpy.array_dtypes( subtype_strategy=hypothesis.extra.numpy.array_dtypes(), @@ -1916,6 +1922,7 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") + @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype(self): class mytype: pass @@ -1936,6 +1943,7 @@ class mytype: del a assert sys.getrefcount(o) == startcount + @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype_errors(self): class mytype: pass diff --git a/numpy/_core/tests/test_finfo.py b/numpy/_core/tests/test_finfo.py index 572490c1eb08..5703b8d6a765 100644 --- a/numpy/_core/tests/test_finfo.py +++ b/numpy/_core/tests/test_finfo.py @@ -69,6 +69,9 @@ def float64_ma(): 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', 'eps', 'epsneg', 'precision', 'resolution' ]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) def test_finfo_properties(dtype, ma_fixture, prop, request): """Test that finfo properties match expected machine arithmetic values.""" ma = request.getfixturevalue(ma_fixture) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 70befb8bd324..313d3efe779a 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -415,6 +415,7 @@ def test_new_policy(get_module): reason=("bad interaction between getenv and " "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): a = get_module.get_array() assert np._core.multiarray.get_handler_name(a) is None diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 49931e1680e8..8e2aa0a507b1 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -30,6 +30,7 @@ ) +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") class TestMemmap: def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 12ea10e5b568..c2d54dd15caf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7201,6 +7201,7 @@ def assert_dot_close(A, X, desired): @pytest.mark.slow @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_vectordot(self, dtype): # Large vector multiplications are chunked with 32bit BLAS # Test that the chunking does the right thing, see also gh-22262 @@ -8565,6 +8566,7 @@ def test_padding(self): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np._core._internal) @@ -8686,6 +8688,7 @@ class foo(ctypes.Structure): assert_equal(arr['a'], 3) @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]]) + @pytest.mark.thread_unsafe(reason="_multiarray_tests used memoryview, which is thread-unsafe") def test_error_if_stored_buffer_info_is_corrupted(self, obj): """ If a user extends a NumPy array before 1.20 and then runs it @@ -9082,6 +9085,7 @@ def __array_interface__(self): # This fails due to going into the buffer protocol path (f, {'data': None, 'shape': ()}, TypeError), ]) + @pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_scalar_interface(self, val, iface, expected): # Test scalar coercion within the array interface self.f.iface = {'typestr': 'f8'} @@ -9770,6 +9774,7 @@ def test_ctypes_is_available(self): assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + @pytest.mark.thread_unsafe(reason="modifies global module state") def test_ctypes_is_not_available(self): from numpy._core import _internal _internal.ctypes = None @@ -10677,6 +10682,7 @@ def test_argsort_largearrays(dtype): assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object") def test_gh_22683(): b = 777.68760986 a = np.array([b] * 10000, dtype=object) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 6563abde8ff4..149a216fa1cd 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -12,6 +12,9 @@ if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are already explicitly multi-threaded" +) def test_parallel_randomstate_creation(): # if the coercion cache is enabled and not thread-safe, creating diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 6396a19fe97f..acaaa0548fd5 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3403,6 +3403,7 @@ def test_arbitrary_number_of_ops_nested(): @pytest.mark.slow @requires_memory(9 * np.iinfo(np.intc).max) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_arbitrary_number_of_ops_error(): # A different error may happen for more than integer operands, but that # is too large to test nicely. @@ -3415,6 +3416,7 @@ def test_arbitrary_number_of_ops_error(): np.nested_iters(args, [[0], []]) +@pytest.mark.thread_unsafe(reason="capfd is thread-unsafe") def test_debug_print(capfd): """ Matches the expected output of a debug print with the actual output. diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index d99b2794d7ca..95a177b57a7d 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -117,6 +117,7 @@ def _test_redirected_print(x, tp, ref=None): err_msg=f'print failed for type{tp}') +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) def test_float_type_print(tp): """Check formatting when using print """ @@ -133,6 +134,7 @@ def test_float_type_print(tp): _test_redirected_print(1e16, tp, ref) +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") @pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) def test_complex_type_print(tp): """Check formatting when using print """ diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 8ad9a26fcc9a..dc457b2d5fc1 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2491,6 +2491,7 @@ def test__array_interface__descr(self): @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') @requires_memory(free_bytes=9e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dot_big_stride(self): # gh-17111 # blas stride = stride//itemsize > int32 max diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 8c24b4cfdc88..bbde90c08bbb 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -871,6 +871,9 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" +) def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -882,6 +885,9 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) +@pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" +) def test_operator_object_right(o, op, type_): try: with recursionlimit(200): diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 2d606a2d33fd..5be3d05bbf11 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -299,6 +299,7 @@ def test_exceptions(self): reason="only problematic on 64bit platforms" ) @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_huge_list_error(self): a = np.array([1]) max_int = np.iinfo(np.intc).max diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e5c3bb87c773..57d94ce12747 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -136,6 +136,7 @@ def test_string_size_dtype_large_repr(str_dt): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_coercion_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize try: @@ -163,6 +164,7 @@ def __str__(self): @pytest.mark.slow @requires_memory(2 * np.iinfo(np.intc).max) @pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_string_addition_error(str_dt): very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index d5f85d4b5c3b..2b01d37989cc 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1796,6 +1796,7 @@ def test_identityless_reduction(self, arrs, pos): @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_identityless_reduction_huge_array(self): # Regression test for gh-20921 (copying identity incorrectly failed) arr = np.zeros((2, 2**31), 'uint8') @@ -3247,6 +3248,7 @@ def test_resolve_dtypes_reduction_errors(self): @pytest.mark.skipif(not hasattr(ct, "pythonapi"), reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") def test_loop_access(self): # This is a basic test for the full strided loop access data_t = ct.c_char_p * 2 diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 572302541e47..cd0ec7d04f69 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4046,6 +4046,7 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + @pytest.mark.thread_unsafe(reason="modifies global module") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" diff --git a/numpy/conftest.py b/numpy/conftest.py index 52f2a75b9df0..c158a2251914 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -6,6 +6,7 @@ import tempfile import warnings from contextlib import contextmanager +from pathlib import Path import hypothesis import pytest @@ -20,6 +21,11 @@ except ModuleNotFoundError: HAVE_SCPDT = False +try: + import pytest_run_parallel # noqa: F401 + PARALLEL_RUN_AVALIABLE = True +except ModuleNotFoundError: + PARALLEL_RUN_AVALIABLE = False _old_fpu_mode = None _collect_results = {} @@ -62,6 +68,17 @@ def pytest_configure(config): "slow: Tests that are very slow.") config.addinivalue_line("markers", "slow_pypy: Tests that are very slow on pypy.") + if not PARALLEL_RUN_AVALIABLE: + config.addinivalue_line("markers", + "parallel_threads(n): run the given test function in parallel " + "using `n` threads.", + ) + config.addinivalue_line("markers", + "iterations(n): run the given test function `n` times in each thread", + ) + config.addinivalue_line("markers", + "thread_unsafe: mark the test function as single-threaded", + ) def pytest_addoption(parser): @@ -224,3 +241,9 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] + +def pytest_collection_modifyitems(config, items): + for item in items: + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 2dd19410bbf0..4c866511a5de 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -88,6 +88,7 @@ def invalid_httpfile(): return http_fakefile +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceOpen: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -156,6 +157,7 @@ def test_ValidBz2File(self, tmp_path): assert_equal(magic_line, result) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceExists: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -182,6 +184,7 @@ def test_InvalidFile(self, tmp_path): assert_equal(ds.exists(tmpfile), False) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceAbspath: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -244,6 +247,7 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryAbspath: def test_ValidHTTP(self, tmp_path): repos = datasource.Repository(valid_baseurl(), tmp_path) @@ -271,6 +275,7 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryExists: def test_ValidFile(self, tmp_path): # Create local temp file @@ -300,6 +305,7 @@ def test_CachedHTTPFile(self, tmp_path): assert_(repos.exists(tmpfile)) +@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestOpenFunc: def test_DataSourceOpen(self, tmp_path): local_file = valid_textfile(tmp_path) diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 548a3db2dc07..2555c4b86f6c 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -1,6 +1,8 @@ import time from datetime import date +import pytest + import numpy as np from numpy.lib._iotools import ( LineSplitter, @@ -200,6 +202,7 @@ def test_missing(self): except ValueError: pass + @pytest.mark.thread_unsafe(reason="monkeypatches StringConverter") def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index a6de6238d269..c70e3d5ebd43 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -957,6 +957,7 @@ def test_large_file_support(tmpdir): @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) +@pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_archive(tmpdir): # Regression test for product of saving arrays with dimensions of array # having a product that doesn't fit in int32. See gh-7598 for details. diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 6ca7892c6cbe..e441d9a1dd4b 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2056,6 +2056,9 @@ def unbound(*args): ('bound', A.iters), ('unbound', 0), ]) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object" + ) def test_frompyfunc_leaks(self, name, incr): # exposed in gh-11867 as np.vectorized, but the problem stems from # frompyfunc. @@ -3961,6 +3964,10 @@ def test_quantile_monotonic(self, method): quantile = np.quantile([0., 1., 2., 3.], p0, method=method) assert_equal(np.sort(quantile), quantile) + @pytest.mark.thread_unsafe( + reason="gives unreliable results w/ hypothesis " + "(HypothesisWorks/hypothesis#4562)" + ) @hypothesis.given( arr=arrays(dtype=np.float64, shape=st.integers(min_value=3, max_value=1000), diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b4b0db36ae2a..5b9c9ab42a5d 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -235,6 +235,7 @@ def test_load_non_npy(self): @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) @@ -630,6 +631,7 @@ def test_unicode_and_bytes_fmt(self, iotype): @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") @pytest.mark.slow @requires_memory(free_bytes=7e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_large_zip(self): def check_large_zip(memoryerror_raised): memoryerror_raised.value = False @@ -2806,6 +2808,7 @@ def test_npzfile_dict(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_load_refcount(): # Check that objects returned by np.load are directly freed based on # their refcount, rather than needing the gc to collect them. diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index ef11ecfdf518..6ef86bf84ee0 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1422,6 +1422,7 @@ def test__replace_nan(): assert np.isnan(arr_nan[-1]) +@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_memmap_takes_fast_route(tmpdir): # We want memory mapped arrays to take the fast route through nanmax, # which avoids creating a mask by using fmax.reduce (see gh-28721). So we diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 3991f92b16a3..b9cc266a9363 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,3 +1,5 @@ +import pytest + import numpy as np import numpy.ma as ma from numpy.lib.recfunctions import ( @@ -234,6 +236,7 @@ def test_repack_fields(self): dt = np.dtype((np.record, dt)) assert_(repack_fields(dt).type is np.record) + @pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") def test_structured_to_unstructured(self, tmp_path): a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) out = structured_to_unstructured(a) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index fbebd9fccc37..2da03158d317 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1046,6 +1046,7 @@ def test_mvoid_iter(self): # w/ mask assert_equal(list(a[1]), [masked, 4]) + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") def test_mvoid_print(self): # Test printing a mvoid mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) @@ -1063,6 +1064,7 @@ def test_mvoid_print(self): mx = array([(1,), (2,)], dtype=[('a', 'O')]) assert_equal(str(mx[0]), "(1,)") + @pytest.mark.thread_unsafe(reason="masked_print_option global state") def test_mvoid_multidim_print(self): # regression test for gh-6019 @@ -4331,6 +4333,7 @@ def test_axis_methods_nomask(self): assert_equal(a.max(-1), [3, 6]) assert_equal(a.max(1), [3, 6]) + @pytest.mark.thread_unsafe(reason="crashes with low memory") @requires_memory(free_bytes=2 * 10000 * 1000 * 2) def test_mean_overflow(self): # Test overflow in masked arrays diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 6ce03b36abde..99acf32adc49 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,4 +1,6 @@ """ Test functions for linalg module using the matrix class.""" +import pytest + import numpy as np from numpy.linalg.tests.test_linalg import ( CondCases, @@ -81,6 +83,9 @@ class TestDetMatrix(DetCases, MatrixTestCase): pass +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) class TestLstsqMatrix(LstsqCases, MatrixTestCase): pass diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index d3735e3b85f6..f7d0131c94a9 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -246,6 +246,7 @@ def test_linewidth_printoption(self, lw, tgt): assert_(len(line) < lw) +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_set_default_printoptions(): p = poly.Polynomial([1, 2, 3]) c = poly.Chebyshev([1, 2, 3]) @@ -259,6 +260,7 @@ def test_set_default_printoptions(): poly.set_default_printstyle('invalid_input') +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") def test_complex_coefficients(): """Test both numpy and built-in complex.""" coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 6f069e48879f..9916f8ad3440 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -572,6 +572,9 @@ def test_passthrough(self): assert rg2 is rg assert rg2.bit_generator is bg + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) def test_coercion_RandomState_Generator(self): # use default_rng to coerce RandomState to Generator rs = RandomState(1234) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 7a079d6362e8..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -57,6 +57,10 @@ @pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) def test_cython(tmp_path): import glob # build the examples in a temporary directory diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 51065f24868d..7d13c49149b3 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1266,6 +1266,7 @@ def test_dirichlet_small_alpha(self): assert_array_almost_equal(actual, expected, decimal=15) @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path alpha = np.array([0.02, 0.04, 0.03]) diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index fd371218616c..63ffb5a86389 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -2034,6 +2034,7 @@ def test_randomstate_ctor_old_style_pickle(): assert_equal(state_a['gauss'], state_b['gauss']) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_hot_swap(restore_singleton_bitgen): # GH 21808 def_bg = np.random.default_rng(0) @@ -2045,6 +2046,7 @@ def test_hot_swap(restore_singleton_bitgen): assert bg is second_bg +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_seed_alt_bit_gen(restore_singleton_bitgen): # GH 21808 bg = PCG64(0) @@ -2059,6 +2061,7 @@ def test_seed_alt_bit_gen(restore_singleton_bitgen): assert state["state"]["inc"] != new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_state_error_alt_bit_gen(restore_singleton_bitgen): # GH 21808 state = np.random.get_state() @@ -2068,6 +2071,7 @@ def test_state_error_alt_bit_gen(restore_singleton_bitgen): np.random.set_state(state) +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swap_worked(restore_singleton_bitgen): # GH 21808 np.random.seed(98765) @@ -2086,6 +2090,7 @@ def test_swap_worked(restore_singleton_bitgen): assert new_state["state"]["inc"] == new_state["state"]["inc"] +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") def test_swapped_singleton_against_direct(restore_singleton_bitgen): np.random.set_bit_generator(PCG64(98765)) singleton_vals = np.random.randint(0, 2 ** 30, 10) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 815f7ac2930a..6d43343ef98a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1146,6 +1146,7 @@ def test_strict(self): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") class TestWarns: def test_warn(self): @@ -1760,6 +1761,7 @@ def _get_fresh_mod(): return my_mod +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1795,6 +1797,7 @@ def test_clear_and_catch_warnings(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_module(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1844,6 +1847,7 @@ def warn(arr): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_suppress_warnings_type(): # Initial state of module, no warnings my_mod = _get_fresh_mod() @@ -1875,6 +1879,9 @@ def test_suppress_warnings_type(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_decorate_no_record(): sup = suppress_warnings() sup.filter(UserWarning) @@ -1893,6 +1900,9 @@ def warn(category): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_record(): sup = suppress_warnings() log1 = sup.record() @@ -1937,6 +1947,9 @@ def test_suppress_warnings_record(): @pytest.mark.filterwarnings( "ignore:.*NumPy warning suppression and assertion utilities are deprecated" ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) def test_suppress_warnings_forwarding(): def warn_other_module(): # Apply along axis is implemented in python; stacklevel=2 means @@ -2026,6 +2039,7 @@ class my_cacw(clear_and_catch_warnings): class_modules = (sys.modules[__name__],) +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") def test_clear_and_catch_warnings_inherit(): # Test can subclass and add default modules my_mod = _get_fresh_mod() @@ -2036,6 +2050,7 @@ def test_clear_and_catch_warnings_inherit(): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") class TestAssertNoGcCycles: """ Test assert_no_gc_cycles """ diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 68d31416040b..b88910ce457e 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -124,6 +124,7 @@ def test_flags(self): assert_(p.from_param(x)) assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") def test_cache(self): assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) @@ -178,6 +179,7 @@ def test_return(self, dt): arr.__array_interface__['data'] ) + @pytest.mark.thread_unsafe(reason="mutates global test vars") def test_vague_return_value(self): """ Test that vague ndpointer return values do not promote to arrays """ arr = np.zeros((2, 3)) @@ -252,6 +254,7 @@ def check(x): check(as_array(pointer(c_array[0]), shape=(2,))) check(as_array(pointer(c_array[0][0]), shape=(2, 3))) + @pytest.mark.thread_unsafe(reason="garbage collector is global state") def test_reference_cycles(self): # related to gh-6511 import ctypes @@ -302,6 +305,7 @@ def test_scalar(self): ct = np.ctypeslib.as_ctypes_type(dt) assert_equal(ct, ctypes.c_uint16) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_subarray(self): dt = np.dtype((np.int32, (2, 3))) ct = np.ctypeslib.as_ctypes_type(dt) @@ -321,6 +325,7 @@ def test_structure(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_structure_aligned(self): dt = np.dtype([ ('a', np.uint16), @@ -351,6 +356,7 @@ def test_union(self): ('b', ctypes.c_uint32), ]) + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") def test_padded_union(self): dt = np.dtype({ 'names': ['a', 'b'], diff --git a/numpy/tests/test_numpy_config.py b/numpy/tests/test_numpy_config.py index f01a279574a5..9219379b2552 100644 --- a/numpy/tests/test_numpy_config.py +++ b/numpy/tests/test_numpy_config.py @@ -21,6 +21,7 @@ class TestNumPyConfigs: ] @patch("numpy.__config__._check_pyyaml") + @pytest.mark.thread_unsafe(reason="unittest.mock.patch updates global state") def test_pyyaml_not_found(self, mock_yaml_importer): mock_yaml_importer.side_effect = ModuleNotFoundError() with pytest.warns(UserWarning): diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index b70a715237a5..aa87ae104318 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -10,6 +10,7 @@ from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises +@pytest.mark.thread_unsafe(reason="reloads global module") def test_numpy_reloading(): # gh-7844. Also check that relevant globals retain their identity. import numpy as np diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index f72122f208c9..0e3157a1e54d 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -2,6 +2,8 @@ import sys from pathlib import Path +import pytest + import numpy as np from numpy.testing import assert_ @@ -25,6 +27,10 @@ FILES += [ROOT / "distutils" / "__init__.pyi"] +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) class TestIsFile: def test_isfile(self): """Test if all ``.pyi`` files are properly installed.""" From e4d64414394cbb2ae62e0f61c3230ba88f537090 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 00:04:29 +0530 Subject: [PATCH 0670/1718] MAINT: remove deprecated `trapz` function, `disp` method and `bias/ddof` args in `corrcoef` This commit removes long-deprecated code paths in `numpy/lib/_function_base_impl`: - Removed `numpy.trapz`, deprecated in NumPy 2.0 (2023-08-18) - Removed `disp` method - Removed `bias` and `ddof` arguments from `numpy.corrcoef` Removed associated tests from `numpy/lib/tests/test_function_base.py` and `numpy/_core/test_deprecations.py` Associated tests and type stubs were also updated. All tests and Ruff checks pass locally. --- numpy/__init__.py | 1 - numpy/__init__.pyi | 3 +- numpy/_core/tests/test_deprecations.py | 5 +- numpy/_expired_attrs_2_0.py | 1 - numpy/_expired_attrs_2_0.pyi | 1 - numpy/lib/_function_base_impl.py | 88 +------------------------- numpy/lib/_function_base_impl.pyi | 7 +- numpy/lib/tests/test_function_base.py | 26 -------- numpy/matlib.pyi | 1 - 9 files changed, 4 insertions(+), 129 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 4e3119238c0a..a0178b211258 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -505,7 +505,6 @@ sinc, sort_complex, trapezoid, - trapz, trim_zeros, unwrap, vectorize, diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 47e3236be751..a272062a6cd3 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -460,7 +460,6 @@ from numpy.lib._function_base_impl import ( # type: ignore[deprecated] blackman, kaiser, trapezoid, - trapz, i0, meshgrid, delete, @@ -686,7 +685,7 @@ __all__ = [ # noqa: RUF022 "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index ca4463ece1f7..2927935cb90f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -280,9 +280,8 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import row_stack, trapz + from numpy import row_stack from numpy._core.numerictypes import maximum_sctype - from numpy.lib._function_base_impl import disp from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._shape_base_impl import get_array_wrap from numpy.lib._utils_impl import safe_eval @@ -295,12 +294,10 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(lambda: recfromcsv(data_gen())) self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(lambda: disp("test")) self.assert_deprecated(get_array_wrap) self.assert_deprecated(lambda: maximum_sctype(int)) self.assert_deprecated(lambda: row_stack([[]])) - self.assert_deprecated(lambda: trapz([1], [1])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1397134e3f8c..2eebf95bc558 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -61,7 +61,6 @@ "or use `typing.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " "directly, or use `typing.deprecated`.", - "disp": "Use your own printing function instead.", "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi index 14524689c1c5..de6c2d10f9b0 100644 --- a/numpy/_expired_attrs_2_0.pyi +++ b/numpy/_expired_attrs_2_0.pyi @@ -47,7 +47,6 @@ class _ExpiredAttributesType(TypedDict): recfromtxt: str deprecate: str deprecate_with_doc: str - disp: str find_common_type: str round_: str get_array_wrap: str diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index bdc32574e8d1..32774212befa 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2,7 +2,6 @@ import collections.abc import functools import re -import sys import warnings import numpy as np @@ -68,7 +67,7 @@ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'bincount', 'digitize', 'cov', 'corrcoef', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', - 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'blackman', 'kaiser', 'trapezoid', 'i0', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'quantile' ] @@ -2119,62 +2118,6 @@ def place(arr, mask, vals): return _place(arr, mask, vals) -def disp(mesg, device=None, linefeed=True): - """ - Display a message on a device. - - .. deprecated:: 2.0 - Use your own printing function instead. - - Parameters - ---------- - mesg : str - Message to display. - device : object - Device to write message. If None, defaults to ``sys.stdout`` which is - very similar to ``print``. `device` needs to have ``write()`` and - ``flush()`` methods. - linefeed : bool, optional - Option whether to print a line feed or not. Defaults to True. - - Raises - ------ - AttributeError - If `device` does not have a ``write()`` or ``flush()`` method. - - Examples - -------- - >>> import numpy as np - - Besides ``sys.stdout``, a file-like object can also be used as it has - both required methods: - - >>> from io import StringIO - >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) - >>> buf.getvalue() - '"Display" in a file\\n' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`disp` is deprecated, " - "use your own printing function instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if device is None: - device = sys.stdout - if linefeed: - device.write(f'{mesg}\n') - else: - device.write(f'{mesg}') - device.flush() - - # See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' @@ -2959,14 +2902,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - ddof : _NoValue, optional - Has no effect, do not use. - .. deprecated:: 1.10.0 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -3061,10 +2997,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 1. ]]) """ - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn('bias and ddof have no effect and are deprecated', - DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar, dtype=dtype) try: d = diag(c) @@ -5080,24 +5012,6 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): return ret -@set_module('numpy') -def trapz(y, x=None, dx=1.0, axis=-1): - """ - `trapz` is deprecated in NumPy 2.0. - - Please use `trapezoid` instead, or one of the numerical integration - functions in `scipy.integrate`. - """ - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`trapz` is deprecated. Use `trapezoid` instead, or one of the " - "numerical integration functions in `scipy.integrate`.", - DeprecationWarning, - stacklevel=2 - ) - return trapezoid(y, x=x, dx=dx, axis=axis) - - def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): return xi diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 47dd0749f07f..e99be06469f3 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -14,7 +14,7 @@ from typing import ( overload, type_check_only, ) -from typing_extensions import TypeIs, deprecated +from typing_extensions import TypeIs import numpy as np from numpy import ( @@ -87,7 +87,6 @@ __all__ = [ "blackman", "kaiser", "trapezoid", - "trapz", "i0", "meshgrid", "delete", @@ -990,10 +989,6 @@ def trapezoid( floating | complexfloating | timedelta64 | NDArray[floating | complexfloating | timedelta64 | object_] ): ... - -@deprecated("Use 'trapezoid' instead") -def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... - @overload def meshgrid( *, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 6ca7892c6cbe..7af2f9dc15bd 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -565,10 +565,6 @@ def test_return_dtype(self): m = np.isnan(d) assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) - def test_deprecated_empty(self): - assert_raises(ValueError, select, [], [], 3j) - assert_raises(ValueError, select, [], []) - def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] @@ -2476,28 +2472,6 @@ def test_simple(self): assert_almost_equal(tgt2, self.res2) assert_(np.all(np.abs(tgt2) <= 1.0)) - def test_ddof(self): - # ddof raises DeprecationWarning - with warnings.catch_warnings(): - warnings.simplefilter("always") - pytest.warns(DeprecationWarning, corrcoef, self.A, ddof=-1) - warnings.simplefilter('ignore', DeprecationWarning) - # ddof has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) - assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) - assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) - - def test_bias(self): - # bias raises DeprecationWarning - with warnings.catch_warnings(): - warnings.simplefilter("always") - pytest.warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) - pytest.warns(DeprecationWarning, corrcoef, self.A, bias=0) - warnings.simplefilter('ignore', DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(self.A, bias=1), self.res1) - def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) res = corrcoef(x) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 0a4f649a7d9c..d653a5a6cc98 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -450,7 +450,6 @@ from numpy import ( # noqa: F401 trace, transpose, trapezoid, - trapz, tri, tril, tril_indices, From 1b89f6bfcb3dcddb13ad02b2075f1b8c22422599 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Oct 2025 23:29:49 +0200 Subject: [PATCH 0671/1718] TYP: fix ``char.startswith`` signature --- numpy/_core/defchararray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 815d1397b39c..53ff96f450f5 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1036,7 +1036,7 @@ def startswith( @overload def startswith( a: T_co, - suffix: T_co, + prefix: T_co, start: i_co = 0, end: i_co | None = None, ) -> NDArray[np.bool]: ... From fe051faefefbb519e31fef7943a63b1abef5e836 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 16 Oct 2025 16:11:55 -0600 Subject: [PATCH 0672/1718] MAINT: Update write_release.py (#29998) Remove hash generation and only translate the release notes from rst to markdown. [skip azp] [skip cirrus] [skip actions] --- tools/write_release.py | 68 ++++++------------------------------------ 1 file changed, 9 insertions(+), 59 deletions(-) diff --git a/tools/write_release.py b/tools/write_release.py index 7662eb7b1288..cdb5235f0bd0 100644 --- a/tools/write_release.py +++ b/tools/write_release.py @@ -18,8 +18,6 @@ import argparse import os import subprocess -import textwrap -from hashlib import md5, sha256 from pathlib import Path # Name of the notes directory @@ -29,38 +27,12 @@ # Output base name, `.rst` or `.md` will be appended OUTPUT_FILE = "README" -def compute_hash(wheel_dir, hash_func): - """ - Compute hashes of files in wheel_dir. - - Parameters - ---------- - wheel_dir: str - Path to wheel directory from repo root. - hash_func: function - Hash function, i.e., md5, sha256, etc. - - Returns - ------- - list_of_strings: list - List of of strings. Each string is the hash - followed by the file basename. - - """ - released = os.listdir(wheel_dir) - checksums = [] - for fn in sorted(released): - fn_path = Path(f"{wheel_dir}/{fn}") - m = hash_func(fn_path.read_bytes()) - checksums.append(f"{m.hexdigest()} {fn}") - return checksums - - def write_release(version): """ - Copy the -notes.rst file to the OUTPUT_DIR, append - the md5 and sha256 hashes of the wheels and sdist, and produce - README.rst and README.md files. + Copy the -notes.rst file to the OUTPUT_DIR and use + pandoc to translate it to markdown. That results in both + README.rst and README.md files that can be used for on + github for the release. Parameters ---------- @@ -73,35 +45,13 @@ def write_release(version): """ notes = Path(NOTES_DIR) / f"{version}-notes.rst" - wheel_dir = Path(OUTPUT_DIR) / "installers" - target_md = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.md" - target_rst = Path(OUTPUT_DIR) / f"{OUTPUT_FILE}.rst" - - os.system(f"cp {notes} {target_rst}") - - with open(str(target_rst), 'a') as f: - f.writelines(textwrap.dedent( - """ - Checksums - ========= - - MD5 - --- - :: - - """)) - f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, md5)]) - - f.writelines(textwrap.dedent( - """ - SHA256 - ------ - :: - - """)) - f.writelines([f' {c}\n' for c in compute_hash(wheel_dir, sha256)]) + outdir = Path(OUTPUT_DIR) + outdir.mkdir(exist_ok=True) + target_md = outdir / f"{OUTPUT_FILE}.md" + target_rst = outdir / f"{OUTPUT_FILE}.rst" # translate README.rst to md for posting on GitHub + os.system(f"cp {notes} {target_rst}") subprocess.run( ["pandoc", "-s", "-o", str(target_md), str(target_rst), "--wrap=preserve"], check=True, From 97e21930e923fa40d6936dc4b2dc82607690fd2b Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 06:38:51 +0200 Subject: [PATCH 0673/1718] TYP: fix ``ndarray.sort(stable=True)`` --- numpy/__init__.pyi | 4 ++-- numpy/typing/tests/data/pass/ndarray_misc.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 47e3236be751..866e3ba9b92a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1721,7 +1721,7 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: bool | None = ..., + stable: builtins.bool | None = ..., ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) @@ -2382,7 +2382,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: bool | None = ..., + stable: builtins.bool | None = ..., ) -> None: ... # Keep in sync with `MaskedArray.trace` diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index a17082aeba15..40c84d8641bd 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -53,7 +53,12 @@ class IntSubClass(npt.NDArray[np.intp]): ... A.argmin(out=B_int0) i4.argsort() +i4.argsort(stable=True) A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) i4.choose([()]) _choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) From c8e6395f0b9de0f5355705ec5d8a9327d8e282d6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 07:19:35 +0200 Subject: [PATCH 0674/1718] TYP: inconsistent ``strings.slice`` default argument for ``stop`` --- numpy/_core/strings.pyi | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 2f630ce8556a..270760412670 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,6 +1,7 @@ from typing import TypeAlias, overload import numpy as np +from numpy._globals import _NoValueType from numpy._typing import ( NDArray, _AnyShape, @@ -502,14 +503,34 @@ def translate( # @overload -def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +def slice( + a: U_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.str_]: ... @overload -def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +def slice( + a: S_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.bytes_]: ... @overload def slice( - a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> _StringDTypeArray: ... @overload def slice( - a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, ) -> _StringDTypeOrUnicodeArray: ... From 37ecd83af0be90e865ac2c73643b3e79d8ea6cde Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 07:37:28 +0200 Subject: [PATCH 0675/1718] TYP: remove implicit re-export in ``_core._exceptions`` Because it's not there at runtime either. --- numpy/_core/_exceptions.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index 02637a17b6a8..b340fde3e463 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -3,7 +3,6 @@ from typing import Any, Final, TypeVar, overload import numpy as np from numpy import _CastingKind -from numpy._utils import set_module as set_module ### From 2b5ad5b330aed167399cd9403f783fe089bc04a1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:11:05 +0200 Subject: [PATCH 0676/1718] TYP: inconsistent ``lib._iotools.NameValidator.deletechars`` annotation --- numpy/lib/_iotools.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 82275940e137..4c716ff68f87 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -49,8 +49,8 @@ class LineSplitter: def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... class NameValidator: - defaultexcludelist: ClassVar[Sequence[str]] - defaultdeletechars: ClassVar[Sequence[str]] + defaultexcludelist: ClassVar[Sequence[str]] = ... + defaultdeletechars: ClassVar[frozenset[str]] = ... excludelist: list[str] deletechars: set[str] case_converter: Callable[[str], str] From b381cf60f36415ab18435b88dd521d1b17fcf219 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:26:28 +0200 Subject: [PATCH 0677/1718] TYP: inconsistent ``lib._iotools._decode_line`` function signature --- numpy/lib/_iotools.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 4c716ff68f87..3bf41fc0cdde 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -99,7 +99,7 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... -def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _decode_line(line: str | bytes, encoding: str | None = None) -> str: ... def _is_string_like(obj: object) -> bool: ... def _is_bytes_like(obj: object) -> bool: ... def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... From 9fc90137fdbab27f89124d0217e6432a1b619002 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:06:59 +0530 Subject: [PATCH 0678/1718] DOC: add release note for removal of deprecated trapz, disp, and corrcoef args --- doc/release/upcoming_changes/29997.expired.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/29997.expired.rst diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst new file mode 100644 index 000000000000..c109012836cc --- /dev/null +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -0,0 +1,12 @@ +Removal of deprecated `trapz` function, `disp` method, and `bias`/`ddof` args in `corrcoef` +------------------------------------------------------------------------------------------- + +The following long-deprecated APIs have been removed: + +* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or + ``scipy.integrate`` functions instead. +* ``disp`` method — deprecated in earlier releases and no longer functional. Use your own printing function instead. +* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. + +Associated tests and type stubs have been updated or removed accordingly. + From 6eb523a7dc07b13990c3f12e6a1c412f2d9f9a2f Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 08:58:15 +0200 Subject: [PATCH 0679/1718] TYP: inconsistent ``lib.npyio.NpzFile.get`` method signature --- numpy/lib/_npyio_impl.pyi | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 8a4dfa27ed9b..657cb14a49e0 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -43,6 +43,7 @@ __all__ = [ "unpackbits", ] +_T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) @@ -64,8 +65,8 @@ class BagObj(Generic[_T_co]): class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 - zip: zipfile.ZipFile - fid: IO[str] | None + zip: zipfile.ZipFile | None = None + fid: IO[str] | None = None files: list[str] allow_pickle: bool pickle_kwargs: Mapping[str, Any] | None @@ -91,6 +92,15 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): def __iter__(self) -> Iterator[str]: ... @override def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + + # + @override + @overload + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + @overload + def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; From 499530ff675c05fb994ea2ce2571df3cc0a3f6dd Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:28:21 +0530 Subject: [PATCH 0680/1718] MAINT: remove redundant deprecated `bias` and `ddof` handling in `corrcoef` Removes leftover deprecation logic for the `bias` and `ddof` arguments in `numpy.corrcoef` from `extra.py`. These arguments were already deprecated and removed from `_function_base_impl`. No functional changes beyond removing redundant code. All tests and Ruff checks pass locally. --- numpy/ma/extras.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index b7405d3809f8..9fb3dd378bbf 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1751,19 +1751,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. - bias : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 - allow_masked : bool, optional - If True, masked values are propagated pair-wise: if a value is masked - in `x`, the corresponding value is masked in `y`. - If False, raises an exception. Because `bias` is deprecated, this - argument needs to be treated as keyword only to avoid a warning. - ddof : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.10.0 See Also -------- @@ -1791,10 +1778,6 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, dtype=float64) """ - msg = 'bias and ddof have no effect and are deprecated' - if bias is not np._NoValue or ddof is not np._NoValue: - # 2015-03-15, 1.10 - warnings.warn(msg, DeprecationWarning, stacklevel=2) # Estimate the covariance matrix. corr = cov(x, y, rowvar, allow_masked=allow_masked) # The non-masked version returns a masked value for a scalar. From a6ab3ab5de35981e573c1ab5edced62779e34556 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:13:57 +0200 Subject: [PATCH 0681/1718] TYP: mark ``lib.mixins.NDArrayOperatorsMixin.__array_ufunc__`` as ``@type_check_only`` --- numpy/lib/mixins.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index 1572f6bee289..e508a5cfd4bb 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, Literal as L +from typing import Any, Literal as L, type_check_only from numpy import ufunc @@ -15,11 +15,13 @@ __all__ = ["NDArrayOperatorsMixin"] class NDArrayOperatorsMixin(ABC): __slots__ = () + @type_check_only @abstractmethod def __array_ufunc__( self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... From 28352b0ef213d88791d4916658ce80884b8a90cf Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:17:30 +0200 Subject: [PATCH 0682/1718] TYP: add missing overload with default to `lib.recfunctions.unstructured_to_structured` --- numpy/lib/recfunctions.pyi | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index a0b49ba1df00..33713cf16331 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -268,6 +268,16 @@ def unstructured_to_structured( copy: bool = False, casting: str = "unsafe", ) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None = None, + *, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... # def apply_along_fields( From aabd7adffe41f91e320d883a058cacf56edb6804 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 09:31:46 +0200 Subject: [PATCH 0683/1718] TYP: update ``lib.npyio.NpzFile.zip`` test --- numpy/typing/tests/data/reveal/npyio.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 40da72c8544e..e3eaa45a5fa1 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -28,7 +28,7 @@ class BytesReader: bytes_writer: BytesWriter bytes_reader: BytesReader -assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.zip, zipfile.ZipFile | None) assert_type(npz_file.fid, IO[str] | None) assert_type(npz_file.files, list[str]) assert_type(npz_file.allow_pickle, bool) From 7065a9d14065265aa1f0fd690fdf930a01b9694d Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 13:37:04 +0530 Subject: [PATCH 0684/1718] This commit removes remaining deprecated `bias` and `ddof` arguments from `numpy.ma.extras` and updates the related tests. In addition: - Removed redundant declarations of `bias` and `ddof` in `extras.py` (deprecated since NumPy 2.0) - Removed associated tests from `test_extras.py` and `test_regression.py` - Amended `numpy/lib/_function_base_impl.py` to fully remove these arguments from function signatures and ensure consistency with documentation - Documented the above changes in the release notes All tests and Ruff checks pass locally. --- .../upcoming_changes/29997.expired.rst | 5 +- numpy/lib/_function_base_impl.py | 4 +- numpy/ma/extras.py | 9 ++- numpy/ma/tests/test_extras.py | 71 ------------------- numpy/ma/tests/test_regression.py | 17 +---- 5 files changed, 12 insertions(+), 94 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index c109012836cc..92c052015b7e 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,12 +1,11 @@ -Removal of deprecated `trapz` function, `disp` method, and `bias`/`ddof` args in `corrcoef` +Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` ------------------------------------------------------------------------------------------- The following long-deprecated APIs have been removed: * ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or ``scipy.integrate`` functions instead. -* ``disp`` method — deprecated in earlier releases and no longer functional. Use your own printing function instead. +* ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. * ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. -Associated tests and type stubs have been updated or removed accordingly. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 32774212befa..bccbdcc87d85 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2869,13 +2869,13 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, return c.squeeze() -def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, +def _corrcoef_dispatcher(x, y=None, rowvar=None, *, dtype=None): return (x, y) @array_function_dispatch(_corrcoef_dispatcher) -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, +def corrcoef(x, y=None, rowvar=True, *, dtype=None): """ Return Pearson product-moment correlation coefficients. diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 9fb3dd378bbf..7bd0f480f4f3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1729,8 +1729,8 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): return result -def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, - ddof=np._NoValue): +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): """ Return Pearson product-moment correlation coefficients. @@ -1751,6 +1751,11 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. See Also -------- diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 230c9d211f19..07f0fbdd7f97 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -6,7 +6,6 @@ """ import itertools -import warnings import pytest @@ -1406,53 +1405,12 @@ def _create_data(self): data2 = array(np.random.rand(12)) return data, data2 - def test_ddof(self): - # ddof raises DeprecationWarning - x, y = self._create_data() - expected = np.corrcoef(x) - expected2 = np.corrcoef(x, y) - with pytest.warns(DeprecationWarning): - corrcoef(x, ddof=-1) - - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - - # ddof has no or negligible effect on the function - assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) - assert_almost_equal(corrcoef(x, ddof=-1), expected) - assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) - assert_almost_equal(corrcoef(x, ddof=3), expected) - assert_almost_equal(corrcoef(x, y, ddof=3), expected2) - - def test_bias(self): - x, y = self._create_data() - expected = np.corrcoef(x) - # bias raises DeprecationWarning - with pytest.warns(DeprecationWarning): - corrcoef(x, y, True, False) - with pytest.warns(DeprecationWarning): - corrcoef(x, y, True, True) - with pytest.warns(DeprecationWarning): - corrcoef(x, y, bias=False) - - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # bias has no or negligible effect on the function - assert_almost_equal(corrcoef(x, bias=1), expected) - def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self._create_data()[0] assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_2d_without_missing(self): # Test corrcoef on 1 2D variable w/o missing values @@ -1460,11 +1418,6 @@ def test_2d_without_missing(self): assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) def test_1d_with_missing(self): # Test corrcoef 1 1D variable w/missing values @@ -1472,14 +1425,8 @@ def test_1d_with_missing(self): x[-1] = masked x -= x.mean() nx = x.compressed() - assert_almost_equal(np.corrcoef(nx), corrcoef(x)) assert_almost_equal(np.corrcoef(nx, rowvar=False), corrcoef(x, rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), - corrcoef(x, rowvar=False, bias=True)) try: corrcoef(x, allow_masked=False) except ValueError: @@ -1489,14 +1436,6 @@ def test_1d_with_missing(self): assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), corrcoef(x, x[::-1], rowvar=False)) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # ddof and bias have no or negligible effect on the function - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], bias=1)) - assert_almost_equal(np.corrcoef(nx, nx[::-1]), - corrcoef(x, x[::-1], ddof=2)) def test_2d_with_missing(self): # Test corrcoef on 2D variable w/ missing value @@ -1507,16 +1446,6 @@ def test_2d_with_missing(self): test = corrcoef(x) control = np.corrcoef(x) assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - # ddof and bias have no or negligible effect on the function - assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], - control[:-1, :-1]) - assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], - control[:-1, :-1]) class TestPolynomial: diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 2a08234cba61..4e40a3f8ee75 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,7 +1,5 @@ -import warnings - import numpy as np -from numpy.testing import assert_, assert_allclose, assert_array_equal +from numpy.testing import assert_, assert_array_equal class TestRegression: @@ -59,19 +57,6 @@ def test_var_sets_maskedarray_scalar(self): a.var(out=mout) assert_(mout._data == 0) - def test_ddof_corrcoef(self): - # See gh-3336 - x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) - y = np.array([2, 2.5, 3.1, 3, 5]) - # this test can be removed after deprecation. - with warnings.catch_warnings(): - warnings.filterwarnings( - 'ignore', "bias and ddof have no effect", DeprecationWarning) - r0 = np.ma.corrcoef(x, y, ddof=0) - r1 = np.ma.corrcoef(x, y, ddof=1) - # ddof should not have an effect (it gets cancelled out) - assert_allclose(r0.data, r1.data) - def test_mask_not_backmangled(self): # See gh-10314. Test case taken from gh-3140. a = np.ma.MaskedArray([1., 2.], mask=[False, False]) From f7d1b16e0f1dad9e61d94d904bfa3c2ed1d99430 Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Fri, 17 Oct 2025 14:08:12 +0530 Subject: [PATCH 0685/1718] Update doc/release/upcoming_changes/29997.expired.rst Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/29997.expired.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index 92c052015b7e..ab6f76a2a3d9 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -7,5 +7,3 @@ The following long-deprecated APIs have been removed: ``scipy.integrate`` functions instead. * ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. * ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. - - From fa44337de3ee26b538936ad11889d2f9dbddbf59 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 17 Oct 2025 12:28:21 +0530 Subject: [PATCH 0686/1718] MAINT: remove redundant deprecated `bias` and `ddof` handling in `corrcoef` Removes leftover deprecation logic for the `bias` and `ddof` arguments in `numpy.corrcoef` from `extra.py`. These arguments were already deprecated and removed from `_function_base_impl`. No functional changes beyond removing redundant code. All tests and Ruff checks pass locally. --- numpy/lib/_function_base_impl.py | 5 ----- numpy/ma/extras.py | 7 ------- 2 files changed, 12 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index bccbdcc87d85..54e6645f6182 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2926,11 +2926,6 @@ def corrcoef(x, y=None, rowvar=True, *, interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 7bd0f480f4f3..6d5569e833dd 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1762,13 +1762,6 @@ def corrcoef(x, y=None, rowvar=True, allow_masked=True, numpy.corrcoef : Equivalent function in top-level NumPy module. cov : Estimate the covariance matrix. - Notes - ----- - This function accepts but discards arguments `bias` and `ddof`. This is - for backwards compatibility with previous versions of this function. These - arguments had no effect on the return values of the function and can be - safely ignored in this and previous versions of numpy. - Examples -------- >>> import numpy as np From e07ae893a4be21de4c10d09f8096b4d0121a5419 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 17 Oct 2025 13:11:59 +0300 Subject: [PATCH 0687/1718] doc: correct underline formatting --- doc/release/upcoming_changes/29997.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index ab6f76a2a3d9..4d20a1c10078 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,5 +1,5 @@ Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` -------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------------------- The following long-deprecated APIs have been removed: From f1e088d36992394291aa844edef905ef1547884b Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Fri, 17 Oct 2025 14:43:41 +0300 Subject: [PATCH 0688/1718] Doc: simplify heading [skip actions][skip azp][skip cirrus] --- doc/release/upcoming_changes/29997.expired.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst index 4d20a1c10078..6bdfa792e4e6 100644 --- a/doc/release/upcoming_changes/29997.expired.rst +++ b/doc/release/upcoming_changes/29997.expired.rst @@ -1,5 +1,5 @@ -Removal of deprecated `trapz` function, `disp` function, and `bias`/`ddof` args in `corrcoef` --------------------------------------------------------------------------------------------- +Removal of deprecated functions and arguments +--------------------------------------------- The following long-deprecated APIs have been removed: From c5fb27e517c33af6ee6cd9804e4097d0cb819491 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 15:59:34 +0200 Subject: [PATCH 0689/1718] MAINT: avoid namespace pollution in ``_core._type_aliases`` --- numpy/_core/_type_aliases.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index de6c30953e91..51c8e6ca2677 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -36,6 +36,7 @@ for _abstract_type_name in _abstract_type_names: allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name for k, v in typeinfo.items(): if k.startswith("NPY_") and v not in c_names_dict: @@ -44,6 +45,8 @@ concrete_type = v.type allTypes[k] = concrete_type sctypeDict[k] = concrete_type + del concrete_type + del k, v _aliases = { "double": "float64", @@ -60,6 +63,7 @@ for k, v in _aliases.items(): sctypeDict[k] = allTypes[v] allTypes[k] = allTypes[v] + del k, v # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` @@ -76,18 +80,21 @@ for k, v in _extra_aliases.items(): sctypeDict[k] = allTypes[v] + del k, v # include extended precision sized aliases for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: - longdouble_type: type = allTypes[full_name] + longdouble_type = allTypes[full_name] - bits: int = dtype(longdouble_type).itemsize * 8 - base_name: str = "complex" if is_complex else "float" - extended_prec_name: str = f"{base_name}{bits}" + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" if extended_prec_name not in allTypes: sctypeDict[extended_prec_name] = longdouble_type allTypes[extended_prec_name] = longdouble_type + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + #################### # Building `sctypes` @@ -110,10 +117,15 @@ ]: if issubclass(concrete_type, abstract_type): sctypes[type_group].add(concrete_type) + del type_group, abstract_type break + del type_info, concrete_type + # sort sctype groups by bitsize for sctype_key in sctypes.keys(): sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list From 8334c49cb32a885493ef7d5ad291641c9d532303 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 17 Oct 2025 17:49:05 +0200 Subject: [PATCH 0690/1718] MAINT: remove confusing parameter default for ``shape`` in ``reshape`` --- numpy/_core/fromnumeric.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 6cbd1385d459..90493ef77626 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -201,12 +201,12 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, order=None, *, copy=None): +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, order='C', *, copy=None): +def reshape(a, /, shape, order='C', *, copy=None): """ Gives a new shape to an array without changing its data. @@ -295,9 +295,6 @@ def reshape(a, /, shape=None, order='C', *, copy=None): [3, 4], [5, 6]]) """ - if shape is None: - raise TypeError( - "reshape() missing 1 required positional argument: 'shape'") if copy is not None: return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) return _wrapfunc(a, 'reshape', shape, order=order) From 6090115be423163f1ab33d929ea4447433d01609 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Sat, 18 Oct 2025 00:00:20 +0800 Subject: [PATCH 0691/1718] TST: Add unit test for RISC-V CPU features (#29927) * TST: Add unit test for RISC-V CPU features Signed-off-by: Wang Yang * fix ruff check * fix comment * fix ruff check --------- Signed-off-by: Wang Yang --- numpy/_core/tests/test_cpu_features.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 818c1fb204a4..431fcb40b324 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -433,3 +433,18 @@ class Test_LOONGARCH_Features(AbstractTest): def load_flags(self): self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") From 6348e38250f42362646510e0b854d4fc173d19b1 Mon Sep 17 00:00:00 2001 From: Kelvin Li Date: Fri, 17 Oct 2025 17:19:56 -0400 Subject: [PATCH 0692/1718] TST: disable overflow exception test of numpy.power on AIX (#29740) --- numpy/_core/tests/test_numeric.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 127f08527f1e..fb5a37c33126 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1024,8 +1024,11 @@ def test_floating_exceptions(self, typecode): lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, lambda a, b: a - b, -ft_max, ft_max * ft_eps) - self.assert_raises_fpe(overflow, - np.power, ftype(2), ftype(2**fi.nexp)) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( From 4f83e3fb930b9324f2e3954dd2d97d76d9a0b37c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:38:52 +0200 Subject: [PATCH 0693/1718] TYP: fix stubtest errors in ``numpy.ma`` (#30018) * TYP: add missing parameter default in ``ma.MaskedArray`` for ``data`` * TYP: stub ``ma.MaskedArray.data`` * TYP: stub ``get_fill_value()`` and ``set_fill_value(_)`` of ``ma.MaskedArray`` * TYP: stub the ``dataiter`` and ``maskiter`` attributes of ``ma.MaskedIterator`` * TYP: annotate the missing return type of ``ma.MaskedIterator.__next__`` * TYP: fully stub ``ma.MaskedConstant`` * TYP: stub `ma._frommethod` and fix stubtest errors in `count`, `argmin`, `argmax` * TYP: remove stub for nonexistent ``ma.core.mask_rowcols`` function * TYP: fix inconsistent ``ma.corrcoef`` signature * Revert "TYP: stub `ma._frommethod` and fix stubtest errors in `count`, `argmin`, `argmax`" This reverts commit 7d42690638014fdf1e78f0ffaf51e08754db37f1. * TYP: fix inconsistent signature of `ma.count`, `na.argmin`, and `ma.argmax` --- numpy/ma/core.pyi | 121 ++++++++++++++++++++++++++------------------ numpy/ma/extras.pyi | 2 +- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2e069164c3d0..13aa3336fbd4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,9 +3,10 @@ import datetime as dt from _typeshed import Incomplete -from collections.abc import Iterator, Sequence +from collections.abc import Sequence from typing import ( Any, + Final, Generic, Literal, Never, @@ -18,7 +19,7 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -310,6 +311,10 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_Ignored: TypeAlias = object + +### + MaskType = bool_ nomask: bool_[Literal[False]] @@ -461,18 +466,23 @@ masked_print_option: _MaskedPrintOption def flatten_structured_array(a): ... +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. + class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): - ma: MaskedArray[_ShapeT_co, _DTypeT_co] - dataiter: Any - maskiter: Any + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... - def __iter__(self) -> Iterator[Any]: ... + def __iter__(self) -> Self: ... # Similar to `MaskedArray.__getitem__` but without the `void` case. @overload def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... @overload def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @@ -522,7 +532,8 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): @overload # catch-all def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... - def __next__(self) -> Any: ... + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Any @@ -575,7 +586,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __new__( cls, - data: object, + data: object = None, mask: _ArrayLikeBool_co = nomask, dtype: DTypeLike | None = None, copy: bool = False, @@ -653,6 +664,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... @@ -670,19 +682,28 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def sharedmask(self) -> bool: ... def shrink_mask(self) -> Self: ... + + @property + def baseclass(self) -> type[ndarray]: ... + @property - def baseclass(self) -> type[NDArray[Any]]: ... - data: Any + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @property + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... @flat.setter def flat(self, value: ArrayLike, /) -> None: ... + @property def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... @fill_value.setter - def fill_value(self, value: _ScalarLike_co | None = None) -> None: ... - get_fill_value: Any - set_fill_value: Any + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2047,28 +2068,31 @@ isarray = isMaskedArray isMA = isMaskedArray # 0D float64 array -class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): - def __new__(cls): ... - __class__: Any - def __array_finalize__(self, obj): ... - def __array_wrap__(self, obj, context=..., return_scalar=...): ... - def __format__(self, format_spec): ... - def __reduce__(self): ... - def __iop__(self, other): ... - __iadd__: Any - __isub__: Any - __imul__: Any - __ifloordiv__: Any - __itruediv__: Any - __ipow__: Any - def copy(self, *args, **kwargs): ... - def __copy__(self): ... - def __deepcopy__(self, memo): ... - def __setattr__(self, attr, value): ... - -masked: MaskedConstant -masked_singleton: MaskedConstant -masked_array = MaskedArray +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... + @override + def __isub__(self, other: _Ignored, /) -> Self: ... + @override + def __imul__(self, other: _Ignored, /) -> Self: ... + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... + +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +masked_array: TypeAlias = MaskedArray def array( data, @@ -2230,17 +2254,17 @@ trace: _frommethod var: _frommethod @overload -def count(self: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... @overload -def count(self: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... @overload -def count(self: ArrayLike, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... @overload -def count(self: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2249,7 +2273,7 @@ def argmin( ) -> intp: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2258,7 +2282,7 @@ def argmin( ) -> Any: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, @@ -2267,7 +2291,7 @@ def argmin( ) -> _ArrayT: ... @overload def argmin( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, out: _ArrayT, @@ -2278,7 +2302,7 @@ def argmin( # @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2287,7 +2311,7 @@ def argmax( ) -> intp: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, @@ -2296,7 +2320,7 @@ def argmax( ) -> Any: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, @@ -2305,7 +2329,7 @@ def argmax( ) -> _ArrayT: ... @overload def argmax( - self: ArrayLike, + a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, out: _ArrayT, @@ -2450,4 +2474,3 @@ zeros_like: _convert2ma def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... -def mask_rowcols(a, axis=...): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 034b309af080..afd03300ff5f 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -107,7 +107,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... def setdiff1d(ar1, ar2, assume_unique=False): ... def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... -def corrcoef(x, y=None, rowvar=True, bias=..., allow_masked=True, ddof=...): ... +def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): __slots__ = () From d6445151ae8d39ae91495f17aef01852e1f3d1f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Oct 2025 17:39:49 -0600 Subject: [PATCH 0694/1718] MAINT: Bump github/codeql-action from 4.30.8 to 4.30.9 (#30015) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.30.8 to 4.30.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f443b600d91635bebf5b0d9ebc620189c0d6fba5...16140ae1a102900babc80a33c44059580f687047) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.30.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0d52c4126be6..4c033a26e15c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/init@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/autobuild@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v4.30.8 + uses: github/codeql-action/analyze@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9a0c80aa3342..9587fc4e026d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f443b600d91635bebf5b0d9ebc620189c0d6fba5 # v2.1.27 + uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v2.1.27 with: sarif_file: results.sarif From f72f0f6bb4ed532bfe98b4c8bee4bf37cd8e5e15 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:41:54 +0200 Subject: [PATCH 0695/1718] TYP: stub ``MesonTemplate.objects_substitution()`` in ``f2py._backends._meson`` (#30004) --- numpy/f2py/_backends/_meson.pyi | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index bcb2b0304401..5c85c61586fc 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -40,6 +40,7 @@ class MesonTemplate: # def initialize_template(self) -> None: ... def sources_substitution(self) -> None: ... + def objects_substitution(self) -> None: ... def deps_substitution(self) -> None: ... def libraries_substitution(self) -> None: ... def include_substitution(self) -> None: ... From 58db55a1b8b3db98d0b3092d23430be2222d23f9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:42:55 +0200 Subject: [PATCH 0696/1718] TYP: update ``corrcoef`` signature (#30009) * TYP: update ``corrcoef`` signature * TYP: update ``ma.corrcoef`` signature --- numpy/lib/_function_base_impl.pyi | 8 -------- 1 file changed, 8 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index e99be06469f3..651ece6b3447 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -479,8 +479,6 @@ def corrcoef( x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: None = None, ) -> NDArray[floating]: ... @@ -489,8 +487,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: None = None, ) -> NDArray[complexfloating]: ... @@ -499,8 +495,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... @@ -509,8 +503,6 @@ def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, - bias: _NoValueType = ..., - ddof: _NoValueType = ..., *, dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... From c8ff9a75d60c7f61dae8b26e381cc1be290484c9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:44:05 +0200 Subject: [PATCH 0697/1718] TYP: ``linalg.svdvals``: fix inconsistent signature and add dtype overloads (#30011) * TYP: ``linalg.svdvals``: fix inconsistent signature and add dtype overloads * TYP: ``linalg.svdvals``: add some type-tests --- numpy/linalg/_linalg.pyi | 16 +++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 4 ++++ numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++++++++++ 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 4b8ac3da0ee2..ca4c6e0c77da 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -16,14 +16,12 @@ from numpy import ( complex128, complexfloating, float64, - # other floating, int32, object_, signedinteger, timedelta64, unsignedinteger, - # re-exports vecdot, ) from numpy._core.fromnumeric import matrix_transpose @@ -38,9 +36,11 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _NestedSequence, ) from numpy.linalg import LinAlgError @@ -301,9 +301,15 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -def svdvals( - x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co -) -> NDArray[floating]: ... +# the ignored `overload-overlap` mypy error below is a false-positive +@overload +def svdvals( # type: ignore[overload-overlap] + x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / +) -> NDArray[np.float64]: ... +@overload +def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... +@overload +def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensionl array otherwise diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index c4695ee671cd..78aceb235f8d 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -32,6 +32,10 @@ np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] np.linalg.svd(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] + np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index ef7819f448ca..cd8057651ad1 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,8 +10,11 @@ from numpy.linalg._linalg import ( SVDResult, ) +float_list_2d: list[list[float]] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_m: npt.NDArray[np.timedelta64] @@ -79,6 +82,16 @@ assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) + assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) assert_type(np.linalg.cond(AR_c16), Any) From 262158f3e3aae882541c29547684e4d4059dd836 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 01:45:44 +0200 Subject: [PATCH 0698/1718] TYP: stub ``linalg.lapack_lite.LapackError`` (#30014) --- numpy/linalg/lapack_lite.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi index 835293a26762..3ec3919bfa3b 100644 --- a/numpy/linalg/lapack_lite.pyi +++ b/numpy/linalg/lapack_lite.pyi @@ -57,6 +57,8 @@ class _ZUNGQR(TypedDict): _ilp64: Final[bool] = ... +class LapackError(Exception): ... + def dgelsd( m: int, n: int, From 0c514d94ae66ada8e275852f063bdd917cdf9c49 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 17 Oct 2025 18:02:38 -0600 Subject: [PATCH 0699/1718] MAINT, TST: Increase tolerance in fft test. `test_identity_long_short_reversed` fails fairly often, so increase the test tolerance by a bit. Note that `random` is used throughout this test module without a seed, but should be OK as the tolerance is set by the type spacing at 1, which should be out of range for the generated random values. --- numpy/fft/tests/test_pocketfft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 021181845b3b..6f26ab6c6d65 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -55,7 +55,7 @@ def test_identity_long_short(self, dtype): def test_identity_long_short_reversed(self, dtype): # Also test explicitly given number of points in reversed order. maxlen = 16 - atol = 5 * np.spacing(np.array(1., dtype=dtype)) + atol = 6 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) for i in range(1, maxlen * 2): From 75fc468c52354e57efa8d9db1ec24f8b955f6c60 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 17 Oct 2025 18:55:43 -0700 Subject: [PATCH 0700/1718] DOC: Correct typos in numpy API documentation (#30020) --- doc/source/user/basics.subclassing.rst | 2 +- numpy/_core/src/multiarray/array_method.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_errstate.py | 2 +- numpy/f2py/symbolic.py | 2 +- numpy/lib/_format_impl.py | 2 +- numpy/lib/tests/test_io.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index a937521a7abb..2a369aaae17c 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -567,7 +567,7 @@ which inputs and outputs it converted. Hence, e.g., Note that another approach would be to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand -also defines ``__array_ufunc__``. E.g., lets assume that we evaluate +also defines ``__array_ufunc__``. E.g., let's assume that we evaluate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index a0a6c3fda7d6..c7280435d3c3 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -4,7 +4,7 @@ * pointers to do fast operations on the given input functions. * It thus adds an abstraction layer around individual ufunc loops. * - * Unlike methods, a ArrayMethod can have multiple inputs and outputs. + * Unlike methods, an ArrayMethod can have multiple inputs and outputs. * This has some serious implication for garbage collection, and as far * as I (@seberg) understands, it is not possible to always guarantee correct * cyclic garbage collection of dynamically created DTypes with methods. diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 692c7cfc2e0a..1c208aa54a84 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -96,7 +96,7 @@ use_new_as_default(PyArray_DTypeMeta *self) return NULL; } /* - * Lets not trust that the DType is implemented correctly + * Let's not trust that the DType is implemented correctly * TODO: Should probably do an exact type-check (at least unless this is * an abstract DType). */ diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 83e0a0179e0a..216a2c75afb8 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -88,7 +88,7 @@ def test_array_array(): o = type("o", (object,), {"__array_struct__": a.__array_struct__}) # wasn't what I expected... is np.array(o) supposed to equal a ? - # instead we get a array([...], dtype=">V18") + # instead we get an array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) # test __array__ diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index b72fb65a3239..f0735a045a4d 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -87,7 +87,7 @@ def test_errstate_enter_once(self): @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") def test_asyncio_safe(self): - # asyncio may not always work, lets assume its fine if missing + # asyncio may not always work, let's assume its fine if missing # Pyodide/wasm doesn't support it. If this test makes problems, # it should just be skipped liberally (or run differently). asyncio = pytest.importorskip("asyncio") diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 11645172fe30..820ae2ec9b9f 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1478,7 +1478,7 @@ def restore(r): if isinstance(items, Expr): return items if paren in ['ROUNDDIV', 'SQUARE']: - # Expression is a array constructor + # Expression is an array constructor if isinstance(items, Expr): items = (items,) return as_array(items) diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index bfda7fec73b6..2bb557709c8b 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -645,7 +645,7 @@ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): "may be necessary.") # The header is a pretty-printed string representation of a literal - # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # Python dictionary with trailing newlines padded to an ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5b9c9ab42a5d..5ba634b9f612 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2574,7 +2574,7 @@ def test_squeeze_scalar(self): @pytest.mark.parametrize("ndim", [0, 1, 2]) def test_ndmin_keyword(self, ndim: int): - # lets have the same behaviour of ndmin as loadtxt + # let's have the same behaviour of ndmin as loadtxt # as they should be the same for non-missing values txt = "42" From 086267d8a29af4d4e520296e043b9115d62467be Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 18 Oct 2025 03:58:54 +0200 Subject: [PATCH 0701/1718] DEP: Remove ``delimitor`` kwarg from ``ma.mrecords.fromtextfile`` --- numpy/ma/mrecords.py | 13 +------------ numpy/ma/mrecords.pyi | 2 -- numpy/ma/tests/test_deprecations.py | 21 --------------------- 3 files changed, 1 insertion(+), 35 deletions(-) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 835f3ce5b772..bb4a2707fec1 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -657,8 +657,7 @@ def openfile(fname): def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', - varnames=None, vartypes=None, - *, delimitor=np._NoValue): # backwards compatibility + varnames=None, vartypes=None): """ Creates a mrecarray from data stored in the file `filename`. @@ -682,16 +681,6 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', Ultra simple: the varnames are in the header, one line""" - if delimitor is not np._NoValue: - if delimiter is not None: - raise TypeError("fromtextfile() got multiple values for argument " - "'delimiter'") - # NumPy 1.22.0, 2021-09-23 - warnings.warn("The 'delimitor' keyword argument of " - "numpy.ma.mrecords.fromtextfile() is deprecated " - "since NumPy 1.22.0, use 'delimiter' instead.", - DeprecationWarning, stacklevel=2) - delimiter = delimitor # Try to open the file. ftext = openfile(fname) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index c1f3592b0dd6..d5cdf097bb84 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -89,8 +89,6 @@ def fromtextfile( missingchar="", varnames=None, vartypes=None, - # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 - # delimitor=..., ): ... def addfield(mrecord, newfield, newfieldname=None): ... diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index a2c98d0c229d..07120b198bea 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -1,9 +1,6 @@ """Test deprecation and future warnings. """ -import io -import textwrap - import pytest import numpy as np @@ -66,21 +63,3 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) - - -class TestFromtextfile: - def test_fromtextfile_delimitor(self): - # NumPy 1.22.0, 2021-09-23 - - textfile = io.StringIO(textwrap.dedent( - """ - A,B,C,D - 'string 1';1;1.0;'mixed column' - 'string 2';2;2.0; - 'string 3';3;3.0;123 - 'string 4';4;4.0;3.14 - """ - )) - - with pytest.warns(DeprecationWarning): - result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') From 16e001a96b97431e67967df825154a7aed77de29 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 18 Oct 2025 04:02:31 +0200 Subject: [PATCH 0702/1718] DOC: release note for #30021 --- doc/release/upcoming_changes/30021.expired.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/30021.expired.rst diff --git a/doc/release/upcoming_changes/30021.expired.rst b/doc/release/upcoming_changes/30021.expired.rst new file mode 100644 index 000000000000..31ca300ce35f --- /dev/null +++ b/doc/release/upcoming_changes/30021.expired.rst @@ -0,0 +1,5 @@ +Remove ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` +------------------------------------------------------------------------ + +The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been +removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. From 938a9975a557a2829feb86ea117726e198ee10a7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 19:26:24 +0200 Subject: [PATCH 0703/1718] TYP: Improved ``ndarray`` augmented assignment operators (#29862) * TYP: Improved ``ndarray`` augmented assignment operators * TYP: Remove redundant ``MaskedArray`` augmented assignment operator method overrides --- numpy/__init__.pyi | 178 ++++++++++++++++++---------------- numpy/ma/core.pyi | 122 +---------------------- numpy/matrixlib/defmatrix.pyi | 2 - 3 files changed, 98 insertions(+), 204 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 29d86584e1b7..9fa6aafb56bb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -761,7 +761,21 @@ _FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) _ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) +_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) +_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) +_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) +_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) +_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) +_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) +_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) +_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) +_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) +_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) +_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) +_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) +_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) +_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) _RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) @@ -3417,141 +3431,139 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. - # Keep in sync with `MaskedArray.__iadd__` - @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # += + @overload # type: ignore[misc] + def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... @overload - def __iadd__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__isub__` - @overload - def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # -= + @overload # type: ignore[misc] + def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... @overload - def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__imul__` - @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # *= + @overload # type: ignore[misc] + def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __imul__( - self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... @overload - def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__ipow__` + # @= + @overload # type: ignore[misc] + def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # Keep in sync with `MaskedArray.__itruediv__` + # **= + @overload # type: ignore[misc] + def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... @overload - def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... - # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` + # /= + @overload # type: ignore[misc] + def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... @overload - def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... @overload - def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # %= # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... @overload - def __imod__( - self: NDArray[timedelta64], - other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... @overload - def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # <<= # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # >>= # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # &= # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # ^= # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + # |= # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - - # - @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... @overload - def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... # def __dlpack__( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 13aa3336fbd4..aeb32bed01f9 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,5 +1,4 @@ # pyright: reportIncompatibleMethodOverride=false -# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 import datetime as dt from _typeshed import Incomplete @@ -290,6 +289,9 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) + +_Ignored: TypeAlias = object + # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] @@ -1187,124 +1189,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - # Keep in sync with `ndarray.__iadd__` - @overload - def __iadd__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], - other: _ArrayLikeStr_co | _ArrayLikeString_co, - /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__isub__` - @overload - def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__imul__` - @overload - def __imul__( - self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__ifloordiv__` - @overload - def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__itruediv__` - @overload - def __itruediv__( - self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__( - self: _MaskedArray[complexfloating], - other: _ArrayLikeComplex_co, - /, - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - - # Keep in sync with `ndarray.__ipow__` - @overload - def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__( - self: _MaskedArray[object_], other: Any, / - ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... - # @property # type: ignore[misc] def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index b5345a2d0d7c..40c747d1ae3d 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -54,12 +54,10 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __imul__(self, other: ArrayLike, /) -> Self: ... # def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __ipow__(self, other: ArrayLike, /) -> Self: ... # keep in sync with `prod` and `mean` @overload # type: ignore[override] From 0066c73f573daafa01cbb975fde7f21d2b045ccb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 18 Oct 2025 19:27:16 +0200 Subject: [PATCH 0704/1718] TYP: ``linalg.tensordot``: fix inconsistent signature and simplify overloads (#30013) * TYP: ``linalg.tensordot``: fix inconsistent signature and simplify overloads * TYP: ``linalg.tensordot``: add some type-tests * TYP: update type-tests for ``_core.numeric.tensordot`` --- numpy/_core/numeric.pyi | 34 +++++----------- numpy/linalg/_linalg.pyi | 45 +++++++++++++++++++++- numpy/typing/tests/data/reveal/linalg.pyi | 7 ++++ numpy/typing/tests/data/reveal/numeric.pyi | 17 ++++---- 4 files changed, 68 insertions(+), 35 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 314bce708306..48576c77fd56 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -628,6 +628,7 @@ __all__ = [ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) +_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) _DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) @@ -1071,54 +1072,37 @@ def outer( out: _ArrayT, ) -> _ArrayT: ... +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLike[Never], - b: _ArrayLike[Never], + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[Any]: ... +) -> NDArray[_NumericScalarT]: ... @overload def tensordot( a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[np.bool]: ... -@overload -def tensordot( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[unsignedinteger]: ... +) -> NDArray[bool_]: ... @overload def tensordot( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[signedinteger]: ... +) -> NDArray[int_ | Any]: ... @overload def tensordot( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[floating]: ... +) -> NDArray[float64 | Any]: ... @overload def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[complexfloating]: ... -@overload -def tensordot( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[timedelta64]: ... -@overload -def tensordot( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[object_]: ... +) -> NDArray[complex128 | Any]: ... @overload def roll( diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index ca4c6e0c77da..cc22e0f9dc39 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -25,7 +25,6 @@ from numpy import ( vecdot, ) from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, @@ -41,6 +40,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _NestedSequence, + _ShapeLike, ) from numpy.linalg import LinAlgError @@ -80,6 +80,7 @@ __all__ = [ ] _NumberT = TypeVar("_NumberT", bound=np.number) +_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] @@ -438,6 +439,48 @@ def vector_norm( keepdims: bool = False, ) -> Any: ... +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot( + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[_NumericScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.bool_]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.complex128 | Any]: ... + # TODO: Returns a scalar or array def multi_dot( arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index cd8057651ad1..60056516def0 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -132,6 +132,13 @@ assert_type(np.linalg.vector_norm(AR_f8), np.floating) assert_type(np.linalg.vector_norm(AR_c16), np.floating) assert_type(np.linalg.vector_norm(AR_S), np.floating) +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 7c1ea8958e3b..247294cf34c6 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -72,16 +72,15 @@ assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.int_ | Any]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.isscalar(i8), bool) From d04dcce7d4f89c30217a6b3bcd26ecab46e99a34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Oct 2025 16:48:39 -0600 Subject: [PATCH 0705/1718] MAINT: Bump astral-sh/setup-uv from 7.1.0 to 7.1.1 (#30030) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.0 to 7.1.1. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/3259c6206f993105e3a61b142c2d97bf4b9ef83d...2ddd2b9cb38ad8efd50337e8ab201519a34c9f24) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index b64cd8becd8c..753b75330aea 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0 + - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true From 473039a159b200c1a83f4cc8633e2bed075894d8 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 00:56:17 +0200 Subject: [PATCH 0706/1718] TYP: `testing.check_support_sve`: fix inconsistent parameter default value (#30032) --- numpy/testing/_private/utils.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 670424f6a599..016bbecf4604 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -97,7 +97,6 @@ _Tss = ParamSpec("_Tss") _ET = TypeVar("_ET", bound=BaseException, default=BaseException) _FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_T_or_bool = TypeVar("_T_or_bool", default=bool) _StrLike: TypeAlias = str | bytes _RegexLike: TypeAlias = _StrLike | Pattern[Any] @@ -460,7 +459,7 @@ def temppath( ) -> _GeneratorContextManager[AnyStr]: ... # -def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] # def decorate_methods( From 874b611f8526f4fbf2e3718db8d7e581d4a1b78f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 02:08:18 +0200 Subject: [PATCH 0707/1718] TYP: fix stubtest errors in ``numpy.polynomial.*`` (#30031) * TYP: `polynomial.polyutils._fit`: remove nonexistent parameter stub * TYP: `polynomial.polyutils`: fix `_vander_nd[_flat]`, `_valnd`, `_gridnd`, and `_pow` * TYP: `polynomial._polybase.ABCPolyBase`: fix `staticmethod`s that should be `classmethod`s * TYP: `polynomial._polybase.ABCPolyBase`: fix abstract properties and `__call__` return * TYP: `polynomial._polybase.ABCPolyBase`: stub missing `cutdeg` parameter * TYP: `polynomial._polybase.ABCPolyBase`: reconcile abstract properties with subclasses * TYP: `polynomial`: fix stubtest errors in module level functions * TYP: `polynomial`: remove nonexistent `*valfromroots` function stubs * TYP: `polynomial.polynomial`: inline `polyvalfromroots` function signature * TYP: `polynomial`: support for non-float64 dtypes in `domain` and `window` attrs * TYP: `polynomial`: restore covariance * TYP: `polynomial`: update type-tests --- numpy/polynomial/_polybase.pyi | 109 ++++----- numpy/polynomial/_polytypes.pyi | 215 ++---------------- numpy/polynomial/chebyshev.pyi | 79 ++++--- numpy/polynomial/hermite.pyi | 73 +++--- numpy/polynomial/hermite_e.pyi | 73 +++--- numpy/polynomial/laguerre.pyi | 67 +++--- numpy/polynomial/legendre.pyi | 67 +++--- numpy/polynomial/polynomial.pyi | 99 +++++--- numpy/polynomial/polyutils.pyi | 182 ++++++++++++--- .../tests/data/reveal/polynomial_polybase.pyi | 30 ++- .../tests/data/reveal/polynomial_series.pyi | 14 +- 11 files changed, 490 insertions(+), 518 deletions(-) diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index d922a08ffa9d..fcece8376170 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,13 +1,11 @@ import abc import decimal -import numbers -from collections.abc import Iterator, Mapping, Sequence +from collections.abc import Iterator, Sequence from typing import ( Any, ClassVar, Generic, Literal, - LiteralString, Self, SupportsIndex, TypeAlias, @@ -39,34 +37,36 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] -_NameCo = TypeVar( - "_NameCo", - bound=LiteralString | None, - covariant=True, - default=LiteralString | None -) -_Other = TypeVar("_Other", bound=ABCPolyBase) - +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) +_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) _AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -_Hundred: TypeAlias = Literal[100] -class ABCPolyBase(Generic[_NameCo], abc.ABC): - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - __array_ufunc__: ClassVar[None] +class ABCPolyBase(Generic[_NameT_co], abc.ABC): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 - maxpower: ClassVar[_Hundred] - _superscript_mapping: ClassVar[Mapping[int, str]] - _subscript_mapping: ClassVar[Mapping[int, str]] - _use_unicode: ClassVar[bool] + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... - basis_name: _NameCo - coef: _CoefSeries - domain: _Array2[np.inexact | np.object_] - window: _Array2[np.inexact | np.object_] + _symbol: str + @property + def symbol(self, /) -> str: ... + + @property + @abc.abstractmethod + def domain(self) -> _Array2[np.float64 | Any]: ... - _symbol: LiteralString @property - def symbol(self, /) -> LiteralString: ... + @abc.abstractmethod + def window(self) -> _Array2[np.float64 | Any]: ... + + @property + @abc.abstractmethod + def basis_name(self) -> _NameT_co: ... + + coef: _CoefSeries def __init__( self, @@ -78,39 +78,17 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): ) -> None: ... @overload - def __call__(self, /, arg: _Other) -> _Other: ... - # TODO: Once `_ShapeT@ndarray` is covariant and bounded (see #26081), - # additionally include 0-d arrays as input types with scalar return type. + def __call__(self, /, arg: _PolyT) -> _PolyT: ... @overload - def __call__( - self, - /, - arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, - ) -> np.float64 | np.complex128: ... + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload - def __call__( - self, - /, - arg: _NumberLike_co | numbers.Complex, - ) -> np.complex128: ... + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... @overload - def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( - npt.NDArray[np.float64] - | npt.NDArray[np.complex128] - | npt.NDArray[np.object_] - ): ... + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... @overload - def __call__( - self, - /, - arg: _ArrayLikeCoefObject_co, - ) -> npt.NDArray[np.object_]: ... + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... def __format__(self, fmt_str: str, /) -> str: ... def __eq__(self, x: object, /) -> bool: ... @@ -133,21 +111,18 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): def __rmod__(self, x: _AnyOther, /) -> Self: ... def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... def __len__(self, /) -> int: ... - def __iter__(self, /) -> Iterator[np.inexact | object]: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... - @overload - def has_sametype(self, /, other: ABCPolyBase) -> TypeIs[Self]: ... - @overload - def has_sametype(self, /, other: object) -> Literal[False]: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... def copy(self, /) -> Self: ... def degree(self, /) -> int: ... - def cutdeg(self, /) -> Self: ... + def cutdeg(self, /, deg: int) -> Self: ... def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... @@ -156,18 +131,18 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_Other], + kind: type[_PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _Other: ... + ) -> _PolyT: ... @overload def convert( self, /, domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_Other], + kind: type[_PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _Other: ... + ) -> _PolyT: ... @overload def convert( self, @@ -278,7 +253,7 @@ class ABCPolyBase(Generic[_NameCo], abc.ABC): @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... - @staticmethod - def _str_term_ascii(i: str, arg_str: str) -> str: ... - @staticmethod - def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... + @classmethod + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 7b003c5742c6..91cb27e3a923 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -4,7 +4,6 @@ from collections.abc import Callable, Sequence from typing import ( Any, Literal, - LiteralString, NoReturn, Protocol, Self, @@ -23,7 +22,6 @@ from numpy._typing import ( # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, - _ArrayLikeObject_co, _ComplexLike_co, _FloatLike_co, # scalar-likes @@ -113,22 +111,10 @@ _ArrayLikeCoef_co: TypeAlias = ( | _ArrayLikeCoefObject_co ) -_Name_co = TypeVar( - "_Name_co", - bound=LiteralString, - covariant=True, - default=LiteralString -) - -@type_check_only -class _Named(Protocol[_Name_co]): - @property - def __name__(self, /) -> _Name_co: ... - -_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_ScalarT]] +_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @type_check_only -class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): +class _FuncLine(Protocol): @overload def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... @overload @@ -151,7 +137,7 @@ class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): ) -> _Line[np.object_]: ... @type_check_only -class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFromRoots(Protocol): @overload def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -160,7 +146,7 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncBinOp(Protocol): @overload def __call__( self, @@ -191,7 +177,7 @@ class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectSeries: ... @type_check_only -class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): +class _FuncUnOp(Protocol): @overload def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -200,7 +186,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPoly2Ortho(Protocol): @overload def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload @@ -209,7 +195,7 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only -class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPow(Protocol): @overload def __call__( self, @@ -236,7 +222,7 @@ class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectSeries: ... @type_check_only -class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): +class _FuncDer(Protocol): @overload def __call__( self, @@ -266,7 +252,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @type_check_only -class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): +class _FuncInteg(Protocol): @overload def __call__( self, @@ -302,58 +288,7 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): ) -> _ObjectArray: ... @type_check_only -class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = ..., - ) -> np.floating: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = ..., - ) -> np.complexfloating: ... - @overload - def __call__( - self, - /, - x: _FloatLike_co | _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - x: _NumberLike_co | _ArrayLikeComplex_co, - r: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co | _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - x: _CoefLike_co, - r: _CoefLike_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... - -@type_check_only -class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal(Protocol): @overload def __call__( self, @@ -404,7 +339,7 @@ class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): ) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal2D(Protocol): @overload def __call__( self, @@ -455,7 +390,7 @@ class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): ) -> _SupportsCoefOps[Any]: ... @type_check_only -class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVal3D(Protocol): @overload def __call__( self, @@ -517,58 +452,7 @@ _AnyValF: TypeAlias = Callable[ ] @type_check_only -class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - /, - *args: _FloatLike_co, - ) -> np.floating: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - /, - *args: _NumberLike_co, - ) -> np.complexfloating: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - /, - *args: _ArrayLikeFloat_co, - ) -> _FloatArray: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - /, - *args: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _SeriesLikeObject_co, - /, - *args: _CoefObjectLike_co, - ) -> _SupportsCoefOps[Any]: ... - @overload - def __call__( - self, - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - /, - *args: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... - -@type_check_only -class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander(Protocol): @overload def __call__( self, @@ -601,7 +485,7 @@ class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @type_check_only -class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander2D(Protocol): @overload def __call__( self, @@ -636,7 +520,7 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): ) -> _CoefArray: ... @type_check_only -class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): +class _FuncVander3D(Protocol): @overload def __call__( self, @@ -674,53 +558,10 @@ class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): deg: _AnyDegrees, ) -> _CoefArray: ... -# keep in sync with the broadest overload of `._FuncVander` -_AnyFuncVander: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] - -@type_check_only -class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeFloat_co], - degrees: Sequence[SupportsIndex], - ) -> _FloatArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[_ArrayLikeComplex_co], - degrees: Sequence[SupportsIndex], - ) -> _ComplexArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[ - _ArrayLikeObject_co | _ArrayLikeComplex_co, - ], - degrees: Sequence[SupportsIndex], - ) -> _ObjectArray: ... - @overload - def __call__( - self, - /, - vander_fs: Sequence[_AnyFuncVander], - points: Sequence[npt.ArrayLike], - degrees: Sequence[SupportsIndex], - ) -> _CoefArray: ... - _FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] @type_check_only -class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): +class _FuncFit(Protocol): @overload def __call__( self, @@ -827,7 +668,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): ) -> tuple[_ObjectArray, _FullFitResult]: ... @type_check_only -class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): +class _FuncRoots(Protocol): @overload def __call__( self, @@ -846,7 +687,7 @@ class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] @type_check_only -class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): +class _FuncCompanion(Protocol): @overload def __call__( self, @@ -863,7 +704,7 @@ class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only -class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): +class _FuncGauss(Protocol): def __call__( self, /, @@ -871,22 +712,14 @@ class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): ) -> _Tuple2[_Series[np.float64]]: ... @type_check_only -class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): +class _FuncWeight(Protocol): @overload - def __call__( - self, - /, - c: _ArrayLikeFloat_co, - ) -> npt.NDArray[np.float64]: ... + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... @overload - def __call__( - self, - /, - c: _ArrayLikeComplex_co, - ) -> npt.NDArray[np.complex128]: ... + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload - def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... @type_check_only -class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): +class _FuncPts(Protocol): def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 71f8021bccc3..35043a001f10 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,5 +1,14 @@ from collections.abc import Callable, Iterable -from typing import Any, Concatenate, Final, Literal as L, Self, TypeVar, overload +from typing import ( + Any, + ClassVar, + Concatenate, + Final, + Literal as L, + Self, + TypeVar, + overload, +) import numpy as np import numpy.typing as npt @@ -26,7 +35,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -87,40 +95,39 @@ def _zseries_div( def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] -cheb2poly: _FuncUnOp[L["cheb2poly"]] +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... -chebdomain: Final[_Array2[np.float64]] -chebzero: Final[_Array1[np.int_]] -chebone: Final[_Array1[np.int_]] -chebx: Final[_Array2[np.int_]] +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... -chebline: _FuncLine[L["chebline"]] -chebfromroots: _FuncFromRoots[L["chebfromroots"]] -chebadd: _FuncBinOp[L["chebadd"]] -chebsub: _FuncBinOp[L["chebsub"]] -chebmulx: _FuncUnOp[L["chebmulx"]] -chebmul: _FuncBinOp[L["chebmul"]] -chebdiv: _FuncBinOp[L["chebdiv"]] -chebpow: _FuncPow[L["chebpow"]] -chebder: _FuncDer[L["chebder"]] -chebint: _FuncInteg[L["chebint"]] -chebval: _FuncVal[L["chebval"]] -chebval2d: _FuncVal2D[L["chebval2d"]] -chebval3d: _FuncVal3D[L["chebval3d"]] -chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] -chebgrid2d: _FuncVal2D[L["chebgrid2d"]] -chebgrid3d: _FuncVal3D[L["chebgrid3d"]] -chebvander: _FuncVander[L["chebvander"]] -chebvander2d: _FuncVander2D[L["chebvander2d"]] -chebvander3d: _FuncVander3D[L["chebvander3d"]] -chebfit: _FuncFit[L["chebfit"]] -chebcompanion: _FuncCompanion[L["chebcompanion"]] -chebroots: _FuncRoots[L["chebroots"]] -chebgauss: _FuncGauss[L["chebgauss"]] -chebweight: _FuncWeight[L["chebweight"]] -chebpts1: _FuncPts[L["chebpts1"]] -chebpts2: _FuncPts[L["chebpts2"]] +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +chebpts1: Final[_FuncPts] = ... +chebpts2: Final[_FuncPts] = ... # keep in sync with `Chebyshev.interpolate` _RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) @@ -144,6 +151,10 @@ def chebinterpolate( ) -> npt.NDArray[_RT]: ... class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload @classmethod def interpolate( diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 07db43d0c000..9d8a63a6b95d 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +62,46 @@ __all__ = [ "hermweight", ] -poly2herm: _FuncPoly2Ortho[L["poly2herm"]] -herm2poly: _FuncUnOp[L["herm2poly"]] +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... -hermdomain: Final[_Array2[np.float64]] -hermzero: Final[_Array1[np.int_]] -hermone: Final[_Array1[np.int_]] -hermx: Final[_Array2[np.int_]] +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... -hermline: _FuncLine[L["hermline"]] -hermfromroots: _FuncFromRoots[L["hermfromroots"]] -hermadd: _FuncBinOp[L["hermadd"]] -hermsub: _FuncBinOp[L["hermsub"]] -hermmulx: _FuncUnOp[L["hermmulx"]] -hermmul: _FuncBinOp[L["hermmul"]] -hermdiv: _FuncBinOp[L["hermdiv"]] -hermpow: _FuncPow[L["hermpow"]] -hermder: _FuncDer[L["hermder"]] -hermint: _FuncInteg[L["hermint"]] -hermval: _FuncVal[L["hermval"]] -hermval2d: _FuncVal2D[L["hermval2d"]] -hermval3d: _FuncVal3D[L["hermval3d"]] -hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] -hermgrid2d: _FuncVal2D[L["hermgrid2d"]] -hermgrid3d: _FuncVal3D[L["hermgrid3d"]] -hermvander: _FuncVander[L["hermvander"]] -hermvander2d: _FuncVander2D[L["hermvander2d"]] -hermvander3d: _FuncVander3D[L["hermvander3d"]] -hermfit: _FuncFit[L["hermfit"]] -hermcompanion: _FuncCompanion[L["hermcompanion"]] -hermroots: _FuncRoots[L["hermroots"]] +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) def _normed_hermite_n( - x: np.ndarray[_ND, np.dtype[np.float64]], + x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -hermgauss: _FuncGauss[L["hermgauss"]] -hermweight: _FuncWeight[L["hermweight"]] +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... -class Hermite(ABCPolyBase[L["H"]]): ... +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index 94ad7248f268..b93975f246da 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,44 +62,46 @@ __all__ = [ "hermeweight", ] -poly2herme: _FuncPoly2Ortho[L["poly2herme"]] -herme2poly: _FuncUnOp[L["herme2poly"]] +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... -hermedomain: Final[_Array2[np.float64]] -hermezero: Final[_Array1[np.int_]] -hermeone: Final[_Array1[np.int_]] -hermex: Final[_Array2[np.int_]] +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... -hermeline: _FuncLine[L["hermeline"]] -hermefromroots: _FuncFromRoots[L["hermefromroots"]] -hermeadd: _FuncBinOp[L["hermeadd"]] -hermesub: _FuncBinOp[L["hermesub"]] -hermemulx: _FuncUnOp[L["hermemulx"]] -hermemul: _FuncBinOp[L["hermemul"]] -hermediv: _FuncBinOp[L["hermediv"]] -hermepow: _FuncPow[L["hermepow"]] -hermeder: _FuncDer[L["hermeder"]] -hermeint: _FuncInteg[L["hermeint"]] -hermeval: _FuncVal[L["hermeval"]] -hermeval2d: _FuncVal2D[L["hermeval2d"]] -hermeval3d: _FuncVal3D[L["hermeval3d"]] -hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] -hermegrid2d: _FuncVal2D[L["hermegrid2d"]] -hermegrid3d: _FuncVal3D[L["hermegrid3d"]] -hermevander: _FuncVander[L["hermevander"]] -hermevander2d: _FuncVander2D[L["hermevander2d"]] -hermevander3d: _FuncVander3D[L["hermevander3d"]] -hermefit: _FuncFit[L["hermefit"]] -hermecompanion: _FuncCompanion[L["hermecompanion"]] -hermeroots: _FuncRoots[L["hermeroots"]] +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... -_ND = TypeVar("_ND", bound=Any) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) def _normed_hermite_e_n( - x: np.ndarray[_ND, np.dtype[np.float64]], + x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int | np.intp, -) -> np.ndarray[_ND, np.dtype[np.float64]]: ... +) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... -hermegauss: _FuncGauss[L["hermegauss"]] -hermeweight: _FuncWeight[L["hermeweight"]] +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... -class HermiteE(ABCPolyBase[L["He"]]): ... +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index a2b84f72bab7..8b70b899ed59 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,37 +62,39 @@ __all__ = [ "lagweight", ] -poly2lag: _FuncPoly2Ortho[L["poly2lag"]] -lag2poly: _FuncUnOp[L["lag2poly"]] +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... -lagdomain: Final[_Array2[np.float64]] -lagzero: Final[_Array1[np.int_]] -lagone: Final[_Array1[np.int_]] -lagx: Final[_Array2[np.int_]] +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... -lagline: _FuncLine[L["lagline"]] -lagfromroots: _FuncFromRoots[L["lagfromroots"]] -lagadd: _FuncBinOp[L["lagadd"]] -lagsub: _FuncBinOp[L["lagsub"]] -lagmulx: _FuncUnOp[L["lagmulx"]] -lagmul: _FuncBinOp[L["lagmul"]] -lagdiv: _FuncBinOp[L["lagdiv"]] -lagpow: _FuncPow[L["lagpow"]] -lagder: _FuncDer[L["lagder"]] -lagint: _FuncInteg[L["lagint"]] -lagval: _FuncVal[L["lagval"]] -lagval2d: _FuncVal2D[L["lagval2d"]] -lagval3d: _FuncVal3D[L["lagval3d"]] -lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] -laggrid2d: _FuncVal2D[L["laggrid2d"]] -laggrid3d: _FuncVal3D[L["laggrid3d"]] -lagvander: _FuncVander[L["lagvander"]] -lagvander2d: _FuncVander2D[L["lagvander2d"]] -lagvander3d: _FuncVander3D[L["lagvander3d"]] -lagfit: _FuncFit[L["lagfit"]] -lagcompanion: _FuncCompanion[L["lagcompanion"]] -lagroots: _FuncRoots[L["lagroots"]] -laggauss: _FuncGauss[L["laggauss"]] -lagweight: _FuncWeight[L["lagweight"]] +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... -class Laguerre(ABCPolyBase[L["L"]]): ... +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index d81f3e6f54a4..53f8f7c210fa 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,4 +1,4 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, Literal as L import numpy as np @@ -21,7 +21,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -63,37 +62,39 @@ __all__ = [ "legweight", ] -poly2leg: _FuncPoly2Ortho[L["poly2leg"]] -leg2poly: _FuncUnOp[L["leg2poly"]] +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... -legdomain: Final[_Array2[np.float64]] -legzero: Final[_Array1[np.int_]] -legone: Final[_Array1[np.int_]] -legx: Final[_Array2[np.int_]] +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... -legline: _FuncLine[L["legline"]] -legfromroots: _FuncFromRoots[L["legfromroots"]] -legadd: _FuncBinOp[L["legadd"]] -legsub: _FuncBinOp[L["legsub"]] -legmulx: _FuncUnOp[L["legmulx"]] -legmul: _FuncBinOp[L["legmul"]] -legdiv: _FuncBinOp[L["legdiv"]] -legpow: _FuncPow[L["legpow"]] -legder: _FuncDer[L["legder"]] -legint: _FuncInteg[L["legint"]] -legval: _FuncVal[L["legval"]] -legval2d: _FuncVal2D[L["legval2d"]] -legval3d: _FuncVal3D[L["legval3d"]] -legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] -leggrid2d: _FuncVal2D[L["leggrid2d"]] -leggrid3d: _FuncVal3D[L["leggrid3d"]] -legvander: _FuncVander[L["legvander"]] -legvander2d: _FuncVander2D[L["legvander2d"]] -legvander3d: _FuncVander3D[L["legvander3d"]] -legfit: _FuncFit[L["legfit"]] -legcompanion: _FuncCompanion[L["legcompanion"]] -legroots: _FuncRoots[L["legroots"]] -leggauss: _FuncGauss[L["leggauss"]] -legweight: _FuncWeight[L["legweight"]] +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... -class Legendre(ABCPolyBase[L["P"]]): ... +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 2942adf2afa8..ddd10fa4282e 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,11 +1,19 @@ -from typing import Final, Literal as L +from typing import Any, ClassVar, Final, overload import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) from ._polybase import ABCPolyBase from ._polytypes import ( _Array1, _Array2, + _ArrayLikeCoef_co, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -19,7 +27,6 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, - _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -57,32 +64,66 @@ __all__ = [ "polycompanion", ] -polydomain: Final[_Array2[np.float64]] -polyzero: Final[_Array1[np.int_]] -polyone: Final[_Array1[np.int_]] -polyx: Final[_Array2[np.int_]] +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots( + x: _FloatLike_co, + r: _FloatLike_co, + tensor: bool = True, +) -> np.float64 | Any: ... +@overload +def polyvalfromroots( + x: _NumberLike_co, + r: _NumberLike_co, + tensor: bool = True, +) -> np.complex128 | Any: ... +@overload +def polyvalfromroots( + x: _ArrayLikeFloat_co, + r: _ArrayLikeFloat_co, + tensor: bool = True, +) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots( + x: _ArrayLikeNumber_co, + r: _ArrayLikeNumber_co, + tensor: bool = True, +) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots( + x: _ArrayLikeCoef_co, + r: _ArrayLikeCoef_co, + tensor: bool = True, +) -> npt.NDArray[np.object_ | Any]: ... -polyline: _FuncLine[L["Polyline"]] -polyfromroots: _FuncFromRoots[L["polyfromroots"]] -polyadd: _FuncBinOp[L["polyadd"]] -polysub: _FuncBinOp[L["polysub"]] -polymulx: _FuncUnOp[L["polymulx"]] -polymul: _FuncBinOp[L["polymul"]] -polydiv: _FuncBinOp[L["polydiv"]] -polypow: _FuncPow[L["polypow"]] -polyder: _FuncDer[L["polyder"]] -polyint: _FuncInteg[L["polyint"]] -polyval: _FuncVal[L["polyval"]] -polyval2d: _FuncVal2D[L["polyval2d"]] -polyval3d: _FuncVal3D[L["polyval3d"]] -polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] -polygrid2d: _FuncVal2D[L["polygrid2d"]] -polygrid3d: _FuncVal3D[L["polygrid3d"]] -polyvander: _FuncVander[L["polyvander"]] -polyvander2d: _FuncVander2D[L["polyvander2d"]] -polyvander3d: _FuncVander3D[L["polyvander3d"]] -polyfit: _FuncFit[L["polyfit"]] -polycompanion: _FuncCompanion[L["polycompanion"]] -polyroots: _FuncRoots[L["polyroots"]] +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... -class Polynomial(ABCPolyBase[None]): ... +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 8938c3cc8259..fd818ce0c90d 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,35 +1,38 @@ from collections.abc import Callable, Iterable, Sequence -from typing import Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ArrayLikeObject_co, _FloatLike_co, _NumberLike_co, ) from ._polytypes import ( _AnyInt, + _AnyValF, _Array2, _ArrayLikeCoef_co, _CoefArray, _CoefLike_co, + _CoefObjectLike_co, _CoefSeries, _ComplexArray, _ComplexSeries, _FloatArray, _FloatSeries, _FuncBinOp, - _FuncValND, - _FuncVanderND, _ObjectArray, _ObjectSeries, _SeriesLikeCoef_co, _SeriesLikeComplex_co, _SeriesLikeFloat_co, _SeriesLikeInt_co, + _SeriesLikeObject_co, + _SupportsCoefOps, _Tuple2, ) @@ -43,18 +46,9 @@ __all__: Final[Sequence[str]] = [ "trimseq", ] -_AnyLineF: TypeAlias = Callable[ - [_CoefLike_co, _CoefLike_co], - _CoefArray, -] -_AnyMulF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike], - _CoefArray, -] -_AnyVanderF: TypeAlias = Callable[ - [npt.ArrayLike, SupportsIndex], - _CoefArray, -] +_AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] +_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] @overload def as_series( @@ -262,8 +256,57 @@ def _nth_slice( ndim: SupportsIndex, ) -> tuple[slice | None, ...]: ... -_vander_nd: _FuncVanderND[Literal["_vander_nd"]] -_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... # keep in sync with `._polytypes._FuncFromRoots` @overload @@ -282,7 +325,7 @@ def _fromroots( def _fromroots( line_f: _AnyLineF, mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, + roots: _SeriesLikeObject_co, ) -> _ObjectSeries: ... @overload def _fromroots( @@ -291,8 +334,81 @@ def _fromroots( roots: _SeriesLikeCoef_co, ) -> _CoefSeries: ... -_valnd: _FuncValND[Literal["_valnd"]] -_gridnd: _FuncValND[Literal["_gridnd"]] +# keep in sync with `_gridnd` +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + *args: _FloatLike_co, +) -> np.floating: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + *args: _NumberLike_co, +) -> np.complexfloating: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + *args: _ArrayLikeFloat_co, +) -> _FloatArray: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + *args: _ArrayLikeComplex_co, +) -> _ComplexArray: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _SeriesLikeObject_co, + *args: _CoefObjectLike_co, +) -> _SupportsCoefOps[Any]: ... +@overload +def _valnd( + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + *args: _ArrayLikeCoef_co, +) -> _ObjectArray: ... + +# keep in sync with `_valnd` +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + *args: _FloatLike_co, +) -> np.floating: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + *args: _NumberLike_co, +) -> np.complexfloating: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + *args: _ArrayLikeFloat_co, +) -> _FloatArray: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + *args: _ArrayLikeComplex_co, +) -> _ComplexArray: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _SeriesLikeObject_co, + *args: _CoefObjectLike_co, +) -> _SupportsCoefOps[Any]: ... +@overload +def _gridnd( + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + *args: _ArrayLikeCoef_co, +) -> _ObjectArray: ... # keep in sync with `_polytypes._FuncBinOp` @overload @@ -310,8 +426,8 @@ def _div( @overload def _div( mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, + c1: _SeriesLikeObject_co, + c2: _SeriesLikeObject_co, ) -> _Tuple2[_ObjectSeries]: ... @overload def _div( @@ -320,37 +436,37 @@ def _div( c2: _SeriesLikeCoef_co, ) -> _Tuple2[_CoefSeries]: ... -_add: Final[_FuncBinOp] -_sub: Final[_FuncBinOp] +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( # type: ignore[overload-overlap] +def _pow( mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _FloatSeries: ... @overload def _pow( mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _ComplexSeries: ... @overload def _pow( mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, + c: _SeriesLikeObject_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _ObjectSeries: ... @overload def _pow( mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, - maxpower: _AnyInt | None = ..., + maxpower: _AnyInt | None, ) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @@ -360,7 +476,6 @@ def _fit( # type: ignore[overload-overlap] x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeFloat_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeFloat_co | None = None, @@ -371,7 +486,6 @@ def _fit( x: _SeriesLikeComplex_co, y: _ArrayLikeComplex_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeComplex_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeComplex_co | None = None, @@ -382,7 +496,6 @@ def _fit( x: _SeriesLikeCoef_co, y: _ArrayLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., rcond: _FloatLike_co | None = None, full: Literal[False] = False, w: _SeriesLikeCoef_co | None = None, @@ -393,10 +506,8 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co | None, full: Literal[True], - /, w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... @overload @@ -405,7 +516,6 @@ def _fit( x: _SeriesLikeCoef_co, y: _SeriesLikeCoef_co, deg: _SeriesLikeInt_co, - domain: _SeriesLikeCoef_co | None = ..., rcond: _FloatLike_co | None = None, *, full: Literal[True], diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 2870d50310a2..4c4899ad6308 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,7 +1,6 @@ from collections.abc import Sequence from decimal import Decimal -from fractions import Fraction -from typing import Any, Literal as L, LiteralString, TypeAlias, TypeVar, assert_type +from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type import numpy as np import numpy.polynomial as npp @@ -17,7 +16,7 @@ _Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] _Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] _Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact | np.object_]] +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] _Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] _Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] _Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] @@ -103,7 +102,7 @@ assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) assert_type(PS_all.coef, _Ar_x_n) assert_type(PS_all.domain, _Ar_x_2) assert_type(PS_all.window, _Ar_x_2) -assert_type(PS_all.symbol, LiteralString) +assert_type(PS_all.symbol, str) # instance methods @@ -113,7 +112,7 @@ assert_type(PS_all.has_samewindow(PS_all), bool) assert_type(PS_all.has_sametype(PS_all), bool) assert_type(PS_poly.has_sametype(PS_poly), bool) assert_type(PS_poly.has_sametype(PS_leg), bool) -assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) assert_type(PS_poly.copy(), npp.Polynomial) assert_type(PS_cheb.copy(), npp.Chebyshev) @@ -122,7 +121,7 @@ assert_type(PS_herme.copy(), npp.HermiteE) assert_type(PS_lag.copy(), npp.Laguerre) assert_type(PS_leg.copy(), npp.Legendre) -assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.cutdeg(3), npp.Legendre) assert_type(PS_leg.trim(), npp.Legendre) assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) @@ -172,17 +171,16 @@ assert_type(repr(PS_all), str) assert_type(format(PS_all), str) assert_type(len(PS_all), int) -assert_type(next(iter(PS_all)), np.inexact | object) - -assert_type(PS_all(SC_f_co), np.float64 | np.complex128) -assert_type(PS_all(SC_c_co), np.complex128) -assert_type(PS_all(Decimal()), np.float64 | np.complex128) -assert_type(PS_all(Fraction()), np.float64 | np.complex128) -assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) -assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) -assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) assert_type(PS_all(PS_poly), npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 0f4a9e09f2e7..3188ad9a1239 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import TypeAlias, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.polynomial as npp @@ -79,12 +79,12 @@ assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating]) -assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) From 75e1f93049fd0e418e2bfc31aed61e65a10924ea Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Oct 2025 02:11:51 +0200 Subject: [PATCH 0708/1718] TYP: fix stubtest error in ``numpy.typing`` (#30033) --- numpy/typing/__init__.pyi | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 numpy/typing/__init__.pyi diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi new file mode 100644 index 000000000000..7a4c7b41079c --- /dev/null +++ b/numpy/typing/__init__.pyi @@ -0,0 +1,3 @@ +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] From 9147472ebe97223604d2a969499a1b2f1f11152e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 09:40:29 +0200 Subject: [PATCH 0709/1718] DEV: add a `spin stubtest` command --- .spin/cmds.py | 31 ++++++ pyproject.toml | 2 + tools/stubtest/allowlist.txt | 205 +++++++++++++++++++++++++++++++++++ tools/stubtest/mypy.ini | 6 + 4 files changed, 244 insertions(+) create mode 100644 tools/stubtest/allowlist.txt create mode 100644 tools/stubtest/mypy.ini diff --git a/.spin/cmds.py b/.spin/cmds.py index 9490aca297d9..25ae4551781b 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -521,6 +521,37 @@ def mypy(ctx): ctx.forward(test) +@click.command() +@click.option( + '--concise', + is_flag=True, + default=False, + help="Concise output format", +) +def stubtest(*, concise: bool) -> None: + """🧐 Run stubtest on NumPy's .pyi stubs + + Requires mypy to be installed + """ + ctx = click.get_current_context() + ctx.invoke(build) + + stubtest_dir = curdir.parent / 'tools' / 'stubtest' + mypy_config = stubtest_dir / 'mypy.ini' + allowlist = stubtest_dir / 'allowlist.txt' + + cmd = [ + 'stubtest', + '--ignore-disjoint-bases', + f'--mypy-config-file={mypy_config}', + f'--allowlist={allowlist}', + ] + if concise: + cmd.append('--concise') + cmd.append('numpy') + spin.util.run(cmd) + + @click.command(context_settings={ 'ignore_unknown_options': True }) diff --git a/pyproject.toml b/pyproject.toml index 5ffbd9ea0247..0dba95b04e84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -235,6 +235,7 @@ meson = 'vendored-meson/meson/meson.py' [tool.meson-python.args] install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' @@ -246,6 +247,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", ] diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt new file mode 100644 index 000000000000..c5d7fe2418d3 --- /dev/null +++ b/tools/stubtest/allowlist.txt @@ -0,0 +1,205 @@ +# intentional type-check-only deviations from runtime +numpy\._typing.* +numpy\.polynomial\._polytypes +numpy\.typing\.mypy_plugin\..* + +# internal testing code +numpy\.conftest.* +numpy\.random\._generator\.__test__ +numpy(\.\w+)?\.tests.* + +# system-dependent extended precision types +numpy(\..+)?\.float(96|128) +numpy(\..+)?\.complex(192|256) + +# system-dependent SIMD constants +numpy\._core\._simd\.\w+ + +# these are always either float96/complex192 or float128/complex256 +numpy\.__all__ +numpy\._?core\.__all__ +numpy\._?core\.numeric\.__all__ +numpy\._?core\.numerictypes\.__all__ +numpy\.matlib\.__all__ + +# requires numpy/_core/code_generators to be on the PYTHONPATH when running stubtest +numpy\._core\.cversions + +# raises SystemExit on import +numpy\.f2py\.__main__ + +# inexpressible: the `dtype.type` class-attribute is `None` unless instantiated +numpy(\..+)?\.dtype\.type + +# mypy false positive "... is not a Union" errors (py314 only?) +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike + +# distutils +numpy\.distutils.* +numpy\.f2py\._backends\._distutils + +# import errors +numpy\._build_utils.* +numpy\._core\.code_generators.* +numpy\._core\.include\.numpy\.libdivide +numpy\._core\.src.* +numpy\._pyinstaller.* +numpy\.fft\.pocketfft +numpy\.random\._examples\..* +numpy\.random\.include +numpy\.random\.src.* + +# known minor deviations from runtime +numpy\.(\w+\.)*integer\.__index__ +numpy\.(\w+\.)*integer\.bit_count +numpy\.(\w+\.)*floating\.as_integer_ratio +numpy\.(\w+\.)*floating\.is_integer +numpy\.(\w+\.)*complexfloating\.__complex__ + +# intentionally missing deprecated module stubs +numpy\.core\._dtype +numpy\.core\._dtype_ctypes +numpy\.core\._internal +numpy\.core\._multiarray_umath.* +numpy\.core\.arrayprint.* +numpy\.core\.defchararray.* +numpy\.core\.einsumfunc.* +numpy\.core\.fromnumeric.* +numpy\.core\.function_base.* +numpy\.core\.getlimits.* +numpy\.core\.multiarray.* +numpy\.core\.numeric.* +numpy\.core\.overrides +numpy\.core\.records.* +numpy\.core\.shape_base.* +numpy\.core\.umath.* + +# ufuncs +numpy\.(\w+\.)*abs +numpy\.(\w+\.)*absolute +numpy\.(\w+\.)*acos +numpy\.(\w+\.)*acosh +numpy\.(\w+\.)*add +numpy\.(\w+\.)*arccos +numpy\.(\w+\.)*arccosh +numpy\.(\w+\.)*arcsin +numpy\.(\w+\.)*arcsinh +numpy\.(\w+\.)*arctan +numpy\.(\w+\.)*arctan2 +numpy\.(\w+\.)*arctanh +numpy\.(\w+\.)*asin +numpy\.(\w+\.)*asinh +numpy\.(\w+\.)*atan +numpy\.(\w+\.)*atan2 +numpy\.(\w+\.)*atanh +numpy\.(\w+\.)*bitwise_and +numpy\.(\w+\.)*bitwise_count +numpy\.(\w+\.)*bitwise_invert +numpy\.(\w+\.)*bitwise_left_shift +numpy\.(\w+\.)*bitwise_not +numpy\.(\w+\.)*bitwise_or +numpy\.(\w+\.)*bitwise_right_shift +numpy\.(\w+\.)*bitwise_xor +numpy\.(\w+\.)*cbrt +numpy\.(\w+\.)*ceil +numpy\.(\w+\.)*conj +numpy\.(\w+\.)*conjugate +numpy\.(\w+\.)*copysign +numpy\.(\w+\.)*cos +numpy\.(\w+\.)*cosh +numpy\.(\w+\.)*deg2rad +numpy\.(\w+\.)*degrees +numpy\.(\w+\.)*divide +numpy\.(\w+\.)*divmod +numpy\.(\w+\.)*equal +numpy\.(\w+\.)*exp +numpy\.(\w+\.)*exp2 +numpy\.(\w+\.)*expm1 +numpy\.(\w+\.)*fabs +numpy\.(\w+\.)*float_power +numpy\.(\w+\.)*floor +numpy\.(\w+\.)*floor_divide +numpy\.(\w+\.)*fmax +numpy\.(\w+\.)*fmin +numpy\.(\w+\.)*fmod +numpy\.(\w+\.)*frexp +numpy\.(\w+\.)*gcd +numpy\.(\w+\.)*greater +numpy\.(\w+\.)*greater_equal +numpy\.(\w+\.)*heaviside +numpy\.(\w+\.)*hypot +numpy\.(\w+\.)*invert +numpy\.(\w+\.)*isfinite +numpy\.(\w+\.)*isinf +numpy\.(\w+\.)*isnan +numpy\.(\w+\.)*isnat +numpy\.(\w+\.)*lcm +numpy\.(\w+\.)*ldexp +numpy\.(\w+\.)*left_shift +numpy\.(\w+\.)*less +numpy\.(\w+\.)*less_equal +numpy\.(\w+\.)*log +numpy\.(\w+\.)*log10 +numpy\.(\w+\.)*log1p +numpy\.(\w+\.)*log2 +numpy\.(\w+\.)*logaddexp +numpy\.(\w+\.)*logaddexp2 +numpy\.(\w+\.)*logical_and +numpy\.(\w+\.)*logical_not +numpy\.(\w+\.)*logical_or +numpy\.(\w+\.)*logical_xor +numpy\.(\w+\.)*matmul +numpy\.(\w+\.)*matvec +numpy\.(\w+\.)*maximum +numpy\.(\w+\.)*minimum +numpy\.(\w+\.)*mod +numpy\.(\w+\.)*modf +numpy\.(\w+\.)*multiply +numpy\.(\w+\.)*negative +numpy\.(\w+\.)*nextafter +numpy\.(\w+\.)*not_equal +numpy\.(\w+\.)*positive +numpy\.(\w+\.)*pow +numpy\.(\w+\.)*power +numpy\.(\w+\.)*rad2deg +numpy\.(\w+\.)*radians +numpy\.(\w+\.)*reciprocal +numpy\.(\w+\.)*remainder +numpy\.(\w+\.)*right_shift +numpy\.(\w+\.)*rint +numpy\.(\w+\.)*sign +numpy\.(\w+\.)*signbit +numpy\.(\w+\.)*sin +numpy\.(\w+\.)*sinh +numpy\.(\w+\.)*spacing +numpy\.(\w+\.)*sqrt +numpy\.(\w+\.)*square +numpy\.(\w+\.)*subtract +numpy\.(\w+\.)*tan +numpy\.(\w+\.)*tanh +numpy\.(\w+\.)*true_divide +numpy\.(\w+\.)*trunc +numpy\.(\w+\.)*vecdot +numpy\.(\w+\.)*vecmat +numpy\.(\w+\.)*isalnum +numpy\.(\w+\.)*isalpha +numpy\.(\w+\.)*isdecimal +numpy\.(\w+\.)*isdigit +numpy\.(\w+\.)*islower +numpy\.(\w+\.)*isnumeric +numpy\.(\w+\.)*isspace +numpy\.(\w+\.)*istitle +numpy\.(\w+\.)*isupper +numpy\.(\w+\.)*str_len +numpy\._core\._methods\.umr_bitwise_count +numpy\._core\._methods\.umr_bitwise_count +numpy\.linalg\._umath_linalg\.qr_complete +numpy\.linalg\._umath_linalg\.qr_reduced +numpy\.linalg\._umath_linalg\.solve +numpy\.linalg\._umath_linalg\.solve1 + +# ma.core._frommethod callables +numpy\.ma\.(core\.)?argmax +numpy\.ma\.(core\.)?argmin +numpy\.ma\.(core\.)?count diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini new file mode 100644 index 000000000000..307590b0f922 --- /dev/null +++ b/tools/stubtest/mypy.ini @@ -0,0 +1,6 @@ +; Stubtest requires mypy to pass before running, which would currently fail +; on numerous stubs. To allow running stubtest independently, we ignore mypy +; errors here. + +[mypy] +ignore_errors = True From 964e6986dc976370a5819ca3e5f94f69b586d1af Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 10:42:24 +0200 Subject: [PATCH 0710/1718] TYP: restore abstract scalar type constructor parameters --- numpy/__init__.pyi | 12 ++++++------ numpy/typing/tests/data/fail/scalars.pyi | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9fa6aafb56bb..30b298e1d607 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3589,7 +3589,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # See https://github.com/numpy/numpy-stubs/pull/80 for more details. class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... @@ -3916,8 +3916,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def dtype(self) -> _dtype[Self]: ... class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): - @abstractmethod - def __new__(cls) -> Self: ... + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __neg__(self) -> Self: ... @@ -4289,7 +4289,7 @@ class object_(_RealMixin, generic): class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes def bit_count(self, /) -> int: ... @@ -4825,7 +4825,7 @@ ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... @@ -5621,7 +5621,7 @@ class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] @abstractmethod - def __new__(cls) -> Self: ... + def __new__(cls, value: object = ..., /) -> Self: ... # NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 02043e06e8fe..018a88e652ae 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -46,12 +46,12 @@ np.uint64(A()) # type: ignore[arg-type] np.void("test") # type: ignore[call-overload] np.void("test", dtype=None) # type: ignore[call-overload] -np.generic(1) # type: ignore[abstract, call-arg] -np.number(1) # type: ignore[abstract, call-arg] -np.integer(1) # type: ignore[abstract, call-arg] -np.inexact(1) # type: ignore[abstract, call-arg] -np.character("test") # type: ignore[abstract, call-arg] -np.flexible(b"test") # type: ignore[abstract, call-arg] +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] np.float64(value=0.0) # type: ignore[call-arg] np.int64(value=0) # type: ignore[call-arg] From e618a050737c53481845c0a746373a0fb8f42760 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 21 Oct 2025 17:43:32 +0800 Subject: [PATCH 0711/1718] TYP: Add type annotations for ASIMD, NEON, and RVV targets (#30034) --- numpy/_core/_simd.pyi | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi index 70bb7077797e..0ba7d78eeb87 100644 --- a/numpy/_core/_simd.pyi +++ b/numpy/_core/_simd.pyi @@ -8,6 +8,13 @@ AVX2: ModuleType | None = ... AVX512F: ModuleType | None = ... AVX512_SKX: ModuleType | None = ... +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + baseline: ModuleType | None = ... @type_check_only @@ -17,6 +24,9 @@ class SimdTargets(TypedDict): FMA3: ModuleType | None AVX512F: ModuleType | None AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None baseline: ModuleType | None targets: SimdTargets = ... From de6f6858045b559162f6cd0f38684d833c79d657 Mon Sep 17 00:00:00 2001 From: Parsa Shemirani Date: Tue, 21 Oct 2025 08:09:34 -0700 Subject: [PATCH 0712/1718] DOC: Clarify signed vs unsigned intptr_t vs uintptr_t in user/basics.types Clarified that uintptr_t is for pointers without sign as opposed to intptr_t. Note: I am just a beginner with Numpy getting started with the docs so apologies if I misunderstood it. --- doc/source/user/basics.types.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 8145f6adb93b..d6914f437faa 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -227,7 +227,7 @@ confusion with builtin python type names, such as `numpy.bool_`. * - N/A - ``'P'`` - ``uintptr_t`` - - Guaranteed to hold pointers. Character code only (Python and C). + - Guaranteed to hold pointers without sign. Character code only (Python and C). * - `numpy.int32` or `numpy.int64` - `numpy.long` From 9300e4accbeee2622e0a4f43567225afb2416bad Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 21 Oct 2025 19:04:14 +0200 Subject: [PATCH 0713/1718] MAINT: bump ``hypothesis`` to ``6.142.2`` --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 101cb6251251..33b5756b7362 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -2,7 +2,7 @@ Cython wheel==0.38.1 setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' -hypothesis==6.137.1 +hypothesis==6.142.2 pytest==7.4.0 pytest-cov==4.1.0 meson From 97d41bf02d133bbacdd70ea425e93d179e221a16 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Tue, 21 Oct 2025 16:29:52 -0400 Subject: [PATCH 0714/1718] CI, TST: Enable parallel threads testing in macOS CI job (#30005) --- .github/workflows/macos.yml | 9 ++++++++- .spin/cmds.py | 18 +++++++++++++++++- doc/TESTS.rst | 17 +++++++++++++++++ doc/source/dev/development_environment.rst | 1 + numpy/_core/tests/test_array_coercion.py | 1 + numpy/_core/tests/test_arrayprint.py | 4 +--- numpy/_core/tests/test_deprecations.py | 3 +++ numpy/_core/tests/test_dtype.py | 12 +++--------- numpy/_core/tests/test_multiarray.py | 1 + numpy/_core/tests/test_scalarmath.py | 8 ++------ numpy/conftest.py | 13 ++++++------- numpy/lib/tests/test_function_base.py | 4 ---- numpy/linalg/tests/test_regression.py | 1 + 13 files changed, 61 insertions(+), 31 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d747ab959dd6..f4393e71523c 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -115,7 +115,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t-dev"] + version: ["3.11", "3.14t"] steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -144,4 +144,11 @@ jobs: spin build -- -Duse-ilp64=true -Ddisable-optimization=true -Dallow-noblas=false - name: Test (fast tests) + if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} run: spin test -j2 -- --timeout=600 --durations=10 + + - name: Test in multiple threads + if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} + run: | + pip install pytest-run-parallel==0.7.0 + spin test -p 4 -- --timeout=600 --durations=10 diff --git a/.spin/cmds.py b/.spin/cmds.py index 25ae4551781b..5c0c21198d78 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -141,11 +141,24 @@ def docs(*, parent_callback, **kwargs): default=default, help="Run tests with the given markers" ) +@click.option( + "-p", + "--parallel-threads", + metavar='PARALLEL_THREADS', + default="1", + help="Run tests many times in number of parallel threads under pytest-run-parallel." + " Can be set to `auto` to use all cores. Use `spin test -p -- " + "--skip-thread-unsafe=true` to only run tests that can run in parallel. " + "pytest-run-parallel must be installed to use." +) @spin.util.extend_command(spin.cmds.meson.test) -def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): +def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **kwargs): """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` + + When pytest-run-parallel is avaliable, use `spin test -p auto` or + `spin test -p ` to run tests sequentional in parallel threads. """ # noqa: E501 if (not pytest_args) and (not tests): pytest_args = ('--pyargs', 'numpy') @@ -154,6 +167,9 @@ def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args + if parallel_threads != "1": + pytest_args = ('--parallel-threads', parallel_threads) + pytest_args + kwargs['pytest_args'] = pytest_args parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index ee8a8b4b07e1..b74dc700b1b9 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -74,6 +74,22 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` +Running tests in multiple threads +--------------------------------- + +To help with stress testing NumPy for thread safety, the test suite can be run under +`pytest-run-parallel`_. To install ``pytest-run-parallel``:: + + $ pip install pytest-run-parallel + +To run the test suite in multiple threads:: + + $ spin test -p auto # have pytest-run-parallel detect the number of available cores + $ spin test -p 4 # run each test under 4 threads + $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe + +When you write new tests (see below), it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs makes use of it. Running doctests ---------------- @@ -406,3 +422,4 @@ Documentation for ``numpy.test`` .. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/ .. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework .. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html +.. _pytest-run-parallel: https://github.com/Quansight-Labs/pytest-run-parallel diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index da8135def475..c2085a0013ef 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -92,6 +92,7 @@ one of:: $ spin test -v $ spin test numpy/random # to run the tests in a specific module $ spin test -v -t numpy/_core/tests/test_nditer.py::test_iter_c_order + $ spin test -p auto # to run tests in parallel threads using pytest-run-parallel This builds NumPy first, so the first time it may take a few minutes. diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 9b2a7a5bab85..ba28e886282f 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -712,6 +712,7 @@ def __array__(self, dtype=None, copy=None): assert arr[0] is ArrayLike @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 04969cb74a4e..bad1aae78d5d 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -533,9 +533,6 @@ def test_nested_array_repr(self): ) @given(hynp.from_dtype(np.dtype("U"))) - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" - ) def test_any_text(self, text): # This test checks that, given any value that can be represented in an # array of dtype("U") (i.e. unicode string), ... @@ -1324,6 +1321,7 @@ async def main(): loop.close() @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +@pytest.mark.thread_unsafe(reason="test is already explicitly multi-threaded") def test_multithreaded_array_printing(): # the dragon4 implementation uses a static scratch space for performance # reasons this test makes sure it is set up in a thread-safe manner diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index cd9c93f6dad6..37fee7705504 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -361,6 +361,9 @@ def test_parenthesized_repeat_count(self, string): class TestAddNewdocUFunc(_DeprecationTestCase): # Deprecated in Numpy 2.2, 2024-11 + @pytest.mark.thread_unsafe( + reason="modifies and checks docstring which is global state" + ) def test_deprecated(self): doc = struct_ufunc.add_triplet.__doc__ # gh-26718 diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index e6c80d8d4212..611d5857fb15 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -2,7 +2,6 @@ import gc import operator import pickle -import random import sys import types from itertools import permutations @@ -1316,9 +1315,6 @@ def test_object_flag_not_inherited(self): @pytest.mark.slow @hypothesis.given(dtype=hynp.nested_dtypes()) - @pytest.mark.thread_unsafe( - reason="hynp.nested_dtypes thread unsafe (HypothesisWorks/hypothesis#4562)" - ) def test_make_canonical_hypothesis(self, dtype): canonical = np.result_type(dtype) self.check_canonical(dtype, canonical) @@ -1327,14 +1323,12 @@ def test_make_canonical_hypothesis(self, dtype): assert np.can_cast(two_arg_result, canonical, casting="no") @pytest.mark.slow - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" - ) @hypothesis.given( dtype=hypothesis.extra.numpy.array_dtypes( subtype_strategy=hypothesis.extra.numpy.array_dtypes(), - min_size=5, max_size=10, allow_subarrays=True)) - def test_structured(self, dtype): + min_size=5, max_size=10, allow_subarrays=True), + random=hypothesis.strategies.randoms()) + def test_structured(self, dtype, random): # Pick 4 of the fields at random. This will leave empty space in the # dtype (since we do not canonicalize it here). field_subset = random.sample(dtype.names, k=4) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c2d54dd15caf..ca34756e5882 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1003,6 +1003,7 @@ def test_too_big_error(self): @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np._core._exceptions._ArrayMemoryError): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index bbde90c08bbb..bfbc9a54cbfe 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -871,9 +871,7 @@ def recursionlimit(n): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) -@pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" -) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -885,9 +883,7 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), sampled_from(binary_operators_for_scalar_ints), sampled_from(types + [rational])) -@pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis (HypothesisWorks/hypothesis#4562)" -) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") def test_operator_object_right(o, op, type_): try: with recursionlimit(200): diff --git a/numpy/conftest.py b/numpy/conftest.py index c158a2251914..c3c96ef3bc39 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -117,7 +117,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): pytest.exit("GIL re-enabled during tests", returncode=1) # FIXME when yield tests are gone. -@pytest.hookimpl() +@pytest.hookimpl(tryfirst=True) def pytest_itemcollected(item): """ Check FPU precision mode was not changed during test collection. @@ -136,6 +136,11 @@ def pytest_itemcollected(item): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode + # mark f2py tests as thread unsafe + if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': + item.add_marker(pytest.mark.thread_unsafe( + reason="f2py tests are thread-unsafe")) + @pytest.fixture(scope="function", autouse=True) def check_fpu_mode(request): @@ -241,9 +246,3 @@ def warnings_errors_and_rng(test=None): 'numpy/random/_examples', 'numpy/f2py/_backends/_distutils.py', ] - -def pytest_collection_modifyitems(config, items): - for item in items: - if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests': - item.add_marker(pytest.mark.thread_unsafe( - reason="f2py tests are thread-unsafe")) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 57c8911be3fb..5c5f1b0315d0 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3938,10 +3938,6 @@ def test_quantile_monotonic(self, method): quantile = np.quantile([0., 1., 2., 3.], p0, method=method) assert_equal(np.sort(quantile), quantile) - @pytest.mark.thread_unsafe( - reason="gives unreliable results w/ hypothesis " - "(HypothesisWorks/hypothesis#4562)" - ) @hypothesis.given( arr=arrays(dtype=np.float64, shape=st.integers(min_value=3, max_value=1000), diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index e02e955cfa40..053e7130da63 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -165,6 +165,7 @@ def test_matrix_rank_rtol_argument(self, rtol): res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + @pytest.mark.thread_unsafe(reason="test is already testing threads with openblas") def test_openblas_threading(self): # gh-27036 # Test whether matrix multiplication involving a large matrix always From 0f8499daee14db8fc7e243bb0618abbfcd5bead4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 01:03:36 +0200 Subject: [PATCH 0715/1718] DEV: Set correct ``PYTHONPATH`` in ``spin stubtest`` (#30039) --- .spin/cmds.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 5c0c21198d78..be97fe228ea8 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -544,13 +544,19 @@ def mypy(ctx): default=False, help="Concise output format", ) -def stubtest(*, concise: bool) -> None: +@meson.build_dir_option +def stubtest(*, concise: bool, build_dir: str) -> None: """🧐 Run stubtest on NumPy's .pyi stubs Requires mypy to be installed """ - ctx = click.get_current_context() - ctx.invoke(build) + click.get_current_context().invoke(build) + meson._set_pythonpath(build_dir) + print(f"{build_dir = !r}") + + import sysconfig + purellib = sysconfig.get_paths()["purelib"] + print(f"{purellib = !r}") stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' @@ -565,6 +571,7 @@ def stubtest(*, concise: bool) -> None: if concise: cmd.append('--concise') cmd.append('numpy') + spin.util.run(cmd) From 39e3c5617007deb23f510f3631ae61a414b2d3f1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 01:06:18 +0200 Subject: [PATCH 0716/1718] DEV: separate stubtest allowlist for py312+ (#30045) --- .spin/cmds.py | 6 ++++-- tools/stubtest/allowlist.txt | 5 ----- tools/stubtest/allowlist_py312.txt | 8 ++++++++ 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 tools/stubtest/allowlist_py312.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index be97fe228ea8..925991c5c804 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -560,13 +560,15 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' - allowlist = stubtest_dir / 'allowlist.txt' + allowlists = [stubtest_dir / 'allowlist.txt'] + if sys.version_info >= (3, 12): + allowlists.append(stubtest_dir / 'allowlist_py312.txt') cmd = [ 'stubtest', '--ignore-disjoint-bases', f'--mypy-config-file={mypy_config}', - f'--allowlist={allowlist}', + *(f'--allowlist={allowlist}' for allowlist in allowlists), ] if concise: cmd.append('--concise') diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index c5d7fe2418d3..6b2c3a29bbc6 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -31,13 +31,8 @@ numpy\.f2py\.__main__ # inexpressible: the `dtype.type` class-attribute is `None` unless instantiated numpy(\..+)?\.dtype\.type -# mypy false positive "... is not a Union" errors (py314 only?) -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike - # distutils numpy\.distutils.* -numpy\.f2py\._backends\._distutils # import errors numpy\._build_utils.* diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt new file mode 100644 index 000000000000..867b2f1870a3 --- /dev/null +++ b/tools/stubtest/allowlist_py312.txt @@ -0,0 +1,8 @@ +# python >= 3.12 + +# false positive "... is not a Union" errors +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike + +# only exists before Python 3.12 +numpy\.f2py\._backends\._distutils From a824e79bf505edcaecd0082b7f96568450ab20f4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 22 Oct 2025 17:32:24 +0200 Subject: [PATCH 0717/1718] CI: Python 3.14 stable --- .github/workflows/linux.yml | 2 +- .github/workflows/windows.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 42a9601f33e1..45a9f65bd9f0 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -58,7 +58,7 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14-dev", "3.14t-dev"] + version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b86543189941..886a70e1be2b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -30,7 +30,7 @@ jobs: - name: Setup Python uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: "3.14t-dev" + python-version: "3.14t" - name: Install build dependencies from PyPI run: | @@ -97,7 +97,7 @@ jobs: - name: Build and install run: | python -m pip install . -v -Ccompile-args="-j2" -Csetup-args="-Dallow-noblas=true" 2>&1 | tee build.log - + - name: Check warnings uses: ./.github/check-warnings with: From b4c575a63af23cec0168ca392331fcc5f42054c5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 20:28:07 +0200 Subject: [PATCH 0718/1718] ENH: Add ``stable`` kwarg to ``chararray.argsort`` (#30000) * ENH: Add ``stable`` kwarg to ``chararray.argsort`` * TST: regression test for `chararray.argsort(stable=)` Co-authored-by: Tyler Reddy --------- Co-authored-by: Tyler Reddy --- numpy/_core/defchararray.py | 4 ++-- numpy/_core/tests/test_defchararray.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 098f4ab9ce9b..1a8750507f41 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -718,7 +718,7 @@ def __mod__(self, i): def __rmod__(self, other): return NotImplemented - def argsort(self, axis=-1, kind=None, order=None): + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): """ Return the indices that sort the array lexicographically. @@ -736,7 +736,7 @@ def argsort(self, axis=-1, kind=None, order=None): dtype='|S5') """ - return self.__array__().argsort(axis, kind, order) + return self.__array__().argsort(axis, kind, order, stable=stable) argsort.__doc__ = ndarray.argsort.__doc__ def capitalize(self): diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 04518bdbd671..e98632b62829 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -694,6 +694,11 @@ def B(self): return np.array([['efg', '456'], ['051', 'tuv']]).view(np.char.chararray) + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) + def test_add(self): A, B = self.A(), self.B() AB = np.array([['abcefg', '123456'], From 4cc4a5808842f9538e11cafc0b2676cecf235ec1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 22 Oct 2025 20:30:19 +0200 Subject: [PATCH 0719/1718] MAINT: Remove ``NDArrayOperatorsMixin.um`` class attribute ``umath`` reexport (#30007) --- numpy/lib/mixins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 831bb34cfb55..9a531c6b9022 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -1,6 +1,7 @@ """ Mixin classes for custom array types that don't inherit from ndarray. """ +from numpy._core import umath as um __all__ = ['NDArrayOperatorsMixin'] @@ -135,7 +136,6 @@ class that simply wraps a NumPy array and ensures that the result of any ArrayLike preserve a well-defined casting hierarchy. """ - from numpy._core import umath as um __slots__ = () # Like np.ndarray, this mixin class implements "Option 1" from the ufunc From 76e91189b23d4e0afc34130e95f4f460a3d57d95 Mon Sep 17 00:00:00 2001 From: Jake Vanderplas Date: Wed, 22 Oct 2025 11:46:57 -0700 Subject: [PATCH 0720/1718] BUG: support axis sequence in `np.trim_zeros` (#29947) --- .../upcoming_changes/29947.improvement.rst | 7 +++ numpy/lib/_function_base_impl.py | 27 +++++----- numpy/lib/tests/test_function_base.py | 53 +++++++++++++++++++ 3 files changed, 74 insertions(+), 13 deletions(-) create mode 100644 doc/release/upcoming_changes/29947.improvement.rst diff --git a/doc/release/upcoming_changes/29947.improvement.rst b/doc/release/upcoming_changes/29947.improvement.rst new file mode 100644 index 000000000000..99c67e598347 --- /dev/null +++ b/doc/release/upcoming_changes/29947.improvement.rst @@ -0,0 +1,7 @@ +Multiple axes are now supported in ``numpy.trim_zeros`` +------------------------------------------------------- +The ``axis`` argument of `numpy.trim_zeros` now accepts a sequence; for example +``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional +array ``x`` along axes 0 and 1. This fixes issue +`gh‑29945 `__ and was implemented +in pull request `gh‑29947 `__. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 54e6645f6182..9d5553cc7a34 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1986,6 +1986,14 @@ def trim_zeros(filt, trim='fb', axis=None): trim = trim.lower() if trim not in {"fb", "bf", "f", "b"}: raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + if axis is None: + axis_tuple = tuple(range(filt_.ndim)) + else: + axis_tuple = _nx.normalize_axis_tuple(axis, filt_.ndim, argname="axis") + + if not axis_tuple: + # No trimming requested -> return input unmodified. + return filt start, stop = _arg_trim_zeros(filt_) stop += 1 # Adjust for slicing @@ -2000,20 +2008,13 @@ def trim_zeros(filt, trim='fb', axis=None): if 'b' not in trim: stop = (None,) * filt_.ndim - if len(start) == 1: - # filt is 1D -> don't use multi-dimensional slicing to preserve + sl = tuple(slice(start[ax], stop[ax]) if ax in axis_tuple else slice(None) + for ax in range(filt_.ndim)) + if len(sl) == 1: + # filt is 1D -> avoid multi-dimensional slicing to preserve # non-array input types - sl = slice(start[0], stop[0]) - elif axis is None: - # trim all axes - sl = tuple(slice(*x) for x in zip(start, stop)) - else: - # only trim single axis - axis = normalize_axis_index(axis, filt_.ndim) - sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) - - trimmed = filt[sl] - return trimmed + return filt[sl[0]] + return filt[sl] def _extract_dispatcher(condition, arr): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5c5f1b0315d0..1d4026d8562f 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1380,6 +1380,36 @@ class TestTrimZeros: c = a.astype(complex) d = a.astype(object) + def construct_input_output(self, rng, shape, axis, trim): + """Construct an input/output test pair for trim_zeros""" + # Standardize axis to a tuple. + if axis is None: + axis = tuple(range(len(shape))) + elif isinstance(axis, int): + axis = (len(shape) + axis if axis < 0 else axis,) + else: + axis = tuple(len(shape) + ax if ax < 0 else ax for ax in axis) + + # Populate a random interior slice with nonzero entries. + data = np.zeros(shape) + i_start = rng.integers(low=0, high=np.array(shape) - 1) + i_end = rng.integers(low=i_start + 1, high=shape) + inner_shape = tuple(i_end - i_start) + inner_data = 1 + rng.random(inner_shape) + data[tuple(slice(i, j) for i, j in zip(i_start, i_end))] = inner_data + + # Construct the expected output of N-dimensional trim_zeros + # with the given axis and trim arguments. + if 'f' not in trim: + i_start = np.array([None for _ in shape]) + if 'b' not in trim: + i_end = np.array([None for _ in shape]) + idx = tuple(slice(i, j) if ax in axis else slice(None) + for ax, (i, j) in enumerate(zip(i_start, i_end))) + expected = data[idx] + + return data, expected + def values(self): attr_names = ('a', 'b', 'c', 'd') return (getattr(self, name) for name in attr_names) @@ -1465,6 +1495,29 @@ def test_unexpected_trim_value(self, trim): with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"): trim_zeros(arr, trim=trim) + @pytest.mark.parametrize("shape, axis", [ + [(5,), None], + [(5,), ()], + [(5,), 0], + [(5, 6), None], + [(5, 6), ()], + [(5, 6), 0], + [(5, 6), (-1,)], + [(5, 6, 7), None], + [(5, 6, 7), ()], + [(5, 6, 7), 1], + [(5, 6, 7), (0, 2)], + [(5, 6, 7, 8), None], + [(5, 6, 7, 8), ()], + [(5, 6, 7, 8), -2], + [(5, 6, 7, 8), (0, 1, 3)], + ]) + @pytest.mark.parametrize("trim", ['fb', 'f', 'b']) + def test_multiple_axes(self, shape, axis, trim): + rng = np.random.default_rng(4321) + data, expected = self.construct_input_output(rng, shape, axis, trim) + assert_array_equal(trim_zeros(data, axis=axis, trim=trim), expected) + class TestExtins: From 5a510b26958ee258009684b4d64a426e9d356b75 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:41:44 +0200 Subject: [PATCH 0721/1718] TYP: fix mypy error in `_array_api_info` --- numpy/_array_api_info.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 069ef478de92..396125143e92 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,5 +1,4 @@ from typing import ( - ClassVar, Literal, Never, TypeAlias, @@ -118,7 +117,7 @@ _EmptyDict: TypeAlias = dict[Never, Never] @final class __array_namespace_info__: - __module__: ClassVar[Literal["numpy"]] + __module__: Literal["numpy"] = "numpy" def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... From 6fed6c675161a3e5bf29eab0a2f4286b11180e25 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:51:38 +0200 Subject: [PATCH 0722/1718] TYP: ignore false-positive mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 82 ++++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 30b298e1d607..8d754d917ad5 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5002,6 +5002,10 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented # float64-specific operator overrides + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + @overload def __add__(self, other: _Float64_co, /) -> float64: ... @overload @@ -5010,10 +5014,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __radd__(self, other: _Float64_co, /) -> float64: ... + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5027,10 +5032,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rsub__(self, other: _Float64_co, /) -> float64: ... + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5044,10 +5050,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rmul__(self, other: _Float64_co, /) -> float64: ... + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5061,10 +5068,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rtruediv__(self, other: _Float64_co, /) -> float64: ... + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload @@ -5078,8 +5086,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload - def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload @@ -5097,10 +5106,11 @@ class float64(floating[_64Bit], float): # type: ignore[misc] ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + @overload - def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload def __rpow__( self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / @@ -5345,21 +5355,22 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __mod__(self, x: timedelta64, /) -> timedelta64: ... - # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads + # reflect. However, mypy does not seem to like this, so we ignore the errors. @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] @overload def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] # keep in sync with __mod__ @overload @@ -5369,7 +5380,9 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload @@ -5379,19 +5392,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] # keep in sync with __rmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __rdivmod__( # type: ignore[misc] + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] @overload def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @@ -5406,18 +5421,20 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @@ -5429,6 +5446,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload @@ -5442,6 +5460,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload @@ -5557,6 +5576,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self, x: datetime64, /) -> timedelta64: ... + # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... @overload @@ -5566,13 +5586,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... + def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] @overload def __lt__(self, other: datetime64, /) -> bool_: ... From 3e3edd6898266c8c07de436b78b8d6ec1105fa08 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:55:49 +0200 Subject: [PATCH 0723/1718] TYP: fix mypy `attr-defined` import error in `_core.defchararray` --- numpy/_core/defchararray.pyi | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 53ff96f450f5..62a1c59b3ae5 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -7,19 +7,10 @@ from typing import ( TypeAlias, overload, ) -from typing_extensions import TypeVar +from typing_extensions import Buffer, TypeVar import numpy as np -from numpy import ( - _OrderKACF, - _SupportsBuffer, - bytes_, - dtype, - int_, - ndarray, - object_, - str_, -) +from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ from numpy._core.multiarray import compare_chararrays from numpy._typing import ( NDArray, @@ -108,7 +99,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, unicode: L[False] = False, - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", @@ -119,7 +110,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", @@ -131,7 +122,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): itemsize: SupportsIndex | SupportsInt = 1, *, unicode: L[True], - buffer: _SupportsBuffer | None = None, + buffer: Buffer | np.ndarray | None = None, offset: SupportsIndex = 0, strides: _ShapeLike | None = None, order: _OrderKACF = "C", From 06fb0f5e7a0148a33b034d07d9bbbbb26fd9495b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 02:59:25 +0200 Subject: [PATCH 0724/1718] TYP: fix mypy `misc` error in `ctypeslib._ctypeslib._concrete_ndptr` --- numpy/ctypeslib/_ctypeslib.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 4ab670037e2b..abcd5a4264e8 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -73,7 +73,7 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[None] + _shape_: ClassVar[_AnyShape | None] _ndim_: ClassVar[int | None] _flags_: ClassVar[list[_FlagsKind] | None] From 9267d29c75bcd25b2811ff99dcc095a23c43c418 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:03:07 +0200 Subject: [PATCH 0725/1718] TYP: remove duplicate `_Ignored` type alias in `ma/core.pyi` --- numpy/ma/core.pyi | 2 -- 1 file changed, 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index aeb32bed01f9..f8e735e9294d 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -313,8 +313,6 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_Ignored: TypeAlias = object - ### MaskType = bool_ From 964cca5dacbc03c042301c38569834af6c12befb Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:09:06 +0200 Subject: [PATCH 0726/1718] TYP: ignore false-positive mypy errors in `lib/_index_tricks_impl.pyi` --- numpy/lib/_index_tricks_impl.pyi | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index cfd7ea0919eb..8f12e4a36d99 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -133,14 +133,16 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ndmin: _NDMinT_co trans1d: _Trans1DT_co - # + # NOTE: mypy does not understand that these default values are the same as the + # TypeVar defaults. Since the workaround would require us to write 16 overloads, + # we ignore the assignment type errors here. def __init__( self, /, - axis: _AxisT_co = 0, - matrix: _MatrixT_co = False, - ndmin: _NDMinT_co = 1, - trans1d: _Trans1DT_co = -1, + axis: _AxisT_co = 0, # type: ignore[assignment] + matrix: _MatrixT_co = False, # type: ignore[assignment] + ndmin: _NDMinT_co = 1, # type: ignore[assignment] + trans1d: _Trans1DT_co = -1, # type: ignore[assignment] ) -> None: ... # TODO(jorenham): annotate this From e3b61d4177c15579f47b439ad3013a8ba619ce24 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:31:20 +0200 Subject: [PATCH 0727/1718] TYP: Type-checking --- tools/stubtest/mypy.ini | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 307590b0f922..1144b84434f1 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -1,6 +1,23 @@ -; Stubtest requires mypy to pass before running, which would currently fail -; on numerous stubs. To allow running stubtest independently, we ignore mypy -; errors here. - [mypy] -ignore_errors = True +files = numpy/**/*.pyi +exclude = (?x)( + ^numpy/( + .+\.py$ + | _build_utils/ + | _core/code_generators/ + | distutils/ + ) + ) +namespace_packages = False + +enable_error_code = ignore-without-code, redundant-expr, truthy-bool +warn_unreachable = False +strict_bytes = True +allow_redefinition_new = True +local_partial_types = True + +; Stubtest requires mypy to pass before running, which would currently fail +; on numerous stubs. To allow running stubtest independently, we temporarily +; ignore these errors here. The goal is to eventually fix these mypy errors +; and remove the ignores. +disable_error_code = overload-cannot-match, overload-overlap, override From bce7e51d6f885d2b73a86935eb1b5f38128812c9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 03:31:57 +0200 Subject: [PATCH 0728/1718] MAINT: remove unused stubtest allowlist entries --- tools/stubtest/allowlist.txt | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 6b2c3a29bbc6..77ff582c9f66 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,7 +1,6 @@ # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes -numpy\.typing\.mypy_plugin\..* # internal testing code numpy\.conftest.* @@ -31,19 +30,8 @@ numpy\.f2py\.__main__ # inexpressible: the `dtype.type` class-attribute is `None` unless instantiated numpy(\..+)?\.dtype\.type -# distutils -numpy\.distutils.* - # import errors -numpy\._build_utils.* -numpy\._core\.code_generators.* -numpy\._core\.include\.numpy\.libdivide -numpy\._core\.src.* -numpy\._pyinstaller.* -numpy\.fft\.pocketfft -numpy\.random\._examples\..* -numpy\.random\.include -numpy\.random\.src.* +numpy\._pyinstaller\..* # known minor deviations from runtime numpy\.(\w+\.)*integer\.__index__ @@ -69,6 +57,7 @@ numpy\.core\.overrides numpy\.core\.records.* numpy\.core\.shape_base.* numpy\.core\.umath.* +numpy\.typing\.mypy_plugin # ufuncs numpy\.(\w+\.)*abs From e9eaffa8743cb52146f5e7f31debf17a42db2bc7 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 23 Oct 2025 19:12:42 +0900 Subject: [PATCH 0729/1718] BUG: allow division between object-dtype arrays and timedelta objects (#30054) Fixes a bug where dividing an object-dtype array containing Python timedelta objects by a NumPy timedelta64 scalar would raise a `_UFuncBinaryResolutionError`. The operation now falls back to the generic object ufunc loop, enabling elementwise Python evaluation consistent with other object operations. See gh-30025. * MAINT: refactor handling and tests for division between object-dtype arrays and timedelta objects --- numpy/_core/src/umath/ufunc_type_resolution.c | 7 +-- numpy/_core/tests/test_datetime.py | 48 +++++++++++++++++++ 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 8d617f3ddc6c..f5a203719b54 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1252,9 +1252,10 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc, type_num2 = PyArray_DESCR(operands[1])->type_num; /* Use the default when datetime and timedelta are not involved */ - if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - type_tup, out_dtypes); + if ((!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) || + (PyTypeNum_ISOBJECT(type_num1) || PyTypeNum_ISOBJECT(type_num2))) { + return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, + out_dtypes); } if (type_num1 == NPY_TIMEDELTA) { diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 18b32ea42da2..c7b11149ed43 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2700,6 +2700,54 @@ def test_timedelta_hash_big_positive(self, wk, unit): td2 = np.timedelta64(td, unit) _assert_equal_hash(td, td2) + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + class TestDateTimeData: From ab22f74e376722229c37b605520c3bac7a8d0f82 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:32:32 +0200 Subject: [PATCH 0730/1718] TYP: run mypy in strict mode --- tools/stubtest/mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 1144b84434f1..422372300b40 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -12,7 +12,7 @@ namespace_packages = False enable_error_code = ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False -strict_bytes = True +strict = True allow_redefinition_new = True local_partial_types = True From 93caf6f110c10e97c5940e157bc12dceab064056 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:02 +0200 Subject: [PATCH 0731/1718] TYP: Fix `override` mypy errors in `random.*` --- numpy/random/_mt19937.pyi | 2 +- numpy/random/_pcg64.pyi | 13 ++++--------- numpy/random/_philox.pyi | 11 +++-------- numpy/random/_sfc64.pyi | 11 +++-------- 4 files changed, 11 insertions(+), 26 deletions(-) diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index c8ea2dccda02..03373a6dd6ea 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -21,7 +21,7 @@ class MT19937(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... def jumped(self, jumps: int = 1) -> MT19937: ... - @property + @property # type: ignore[override] def state(self) -> _MT19937State: ... @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 6055c5d2921d..a9e81f7f181b 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -20,7 +20,7 @@ class _PCG64State(TypedDict): class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64: ... - @property + @property # type: ignore[override] def state( self, ) -> _PCG64State: ... @@ -34,13 +34,8 @@ class PCG64(BitGenerator): class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64DXSM: ... - @property - def state( - self, - ) -> _PCG64State: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index ab1aee31eb02..3089f11ea629 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -28,14 +28,9 @@ class Philox(BitGenerator): counter: _ArrayLikeInt_co | None = ..., key: _ArrayLikeInt_co | None = ..., ) -> None: ... - @property - def state( - self, - ) -> _PhiloxState: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... @state.setter - def state( - self, - value: _PhiloxState, - ) -> None: ... + def state(self, value: _PhiloxState) -> None: ... def jumped(self, jumps: int = 1) -> Philox: ... def advance(self, delta: int) -> Philox: ... diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 1e563fdebfde..f5f3fed9c251 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -19,12 +19,7 @@ class _SFC64State(TypedDict): class SFC64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... - @property - def state( - self, - ) -> _SFC64State: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... @state.setter - def state( - self, - value: _SFC64State, - ) -> None: ... + def state(self, value: _SFC64State) -> None: ... From 7b46bc98e7a98cfe4ecd2211e89e4bfb503fc2d4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:12 +0200 Subject: [PATCH 0732/1718] TYP: Fix `override` mypy errors in `chararray` --- numpy/_core/defchararray.pyi | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 62a1c59b3ae5..406d11ea0eb7 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -129,11 +129,11 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... - def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __eq__( self: _CharArray[str_], other: U_co, @@ -144,7 +144,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ne__( self: _CharArray[str_], other: U_co, @@ -155,7 +155,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __ge__( self: _CharArray[str_], other: U_co, @@ -166,7 +166,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __le__( self: _CharArray[str_], other: U_co, @@ -177,7 +177,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __gt__( self: _CharArray[str_], other: U_co, @@ -188,7 +188,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __lt__( self: _CharArray[str_], other: U_co, @@ -199,7 +199,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> NDArray[np.bool]: ... - @overload + @overload # type: ignore[override] def __add__( self: _CharArray[str_], other: U_co, @@ -210,7 +210,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): other: S_co, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def __radd__( self: _CharArray[str_], other: U_co, @@ -346,7 +346,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): chars: S_co | None = None, ) -> _CharArray[bytes_]: ... - @overload + @overload # type: ignore[override] def partition( self: _CharArray[str_], sep: U_co, From 7d0dd1d28e8a4f3bc3184b1d427a7e7ea1fa6832 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:24 +0200 Subject: [PATCH 0733/1718] TYP: Fix `override` mypy errors in `ctypeslib` --- numpy/ctypeslib/_ctypeslib.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index abcd5a4264e8..2c87833b5501 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -77,7 +77,7 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): _ndim_: ClassVar[int | None] _flags_: ClassVar[list[_FlagsKind] | None] - @overload + @overload # type: ignore[override] @classmethod def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... @overload From 97836e4762868010eb1d8bf02c8d9d5bd8b67ce8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:35 +0200 Subject: [PATCH 0734/1718] TYP: Fix `override` mypy errors in `ma.*` --- numpy/ma/core.pyi | 68 +++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f8e735e9294d..e613c26ee829 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -606,7 +606,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): return_scalar: bool = False, ) -> MaskedArray[_ShapeT, _DTypeT]: ... - @overload # () + @overload # type: ignore[override] # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... @overload # (dtype: DTypeT) def view( @@ -662,7 +662,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... - @shape.setter + @shape.setter # type: ignore[override] def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @@ -707,7 +707,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... - @overload + @overload # type: ignore[override] def compress( self, condition: _ArrayLikeBool_co, @@ -748,7 +748,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] # Keep in sync with `ndarray.__add__` - @overload + @overload # type: ignore[override] def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -796,7 +796,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__radd__` - @overload # signature equivalent to __add__ + @overload # type: ignore[override] # signature equivalent to __add__ def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -844,7 +844,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__sub__` - @overload + @overload # type: ignore[override] def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -882,7 +882,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rsub__` - @overload + @overload # type: ignore[override] def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -920,7 +920,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__mul__` - @overload + @overload # type: ignore[override] def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -962,7 +962,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rmul__` - @overload # signature equivalent to __mul__ + @overload # type: ignore[override] # signature equivalent to __mul__ def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1004,7 +1004,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__truediv__` - @overload + @overload # type: ignore[override] def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @@ -1036,7 +1036,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rtruediv__` - @overload + @overload # type: ignore[override] def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @@ -1066,7 +1066,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__floordiv__` - @overload + @overload # type: ignore[override] def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] @@ -1096,7 +1096,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rfloordiv__` - @overload + @overload # type: ignore[override] def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] @@ -1124,7 +1124,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) - @overload + @overload # type: ignore[override] def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1156,7 +1156,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) - @overload + @overload # type: ignore[override] def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] @@ -1189,10 +1189,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] get_imag: Any @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] get_real: Any # keep in sync with `np.ma.count` @@ -1290,12 +1290,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: bool | None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... - @overload + @overload # type: ignore[override] def all( self, axis: None = None, @@ -1340,7 +1340,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def any( self, axis: None = None, @@ -1423,7 +1423,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def sum( self, /, @@ -1460,7 +1460,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def prod( self, /, @@ -1499,7 +1499,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def mean( self, axis: _ShapeLike | None = None, @@ -1534,7 +1534,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... - @overload + @overload # type: ignore[override] def var( self, axis: _ShapeLike | None = None, @@ -1566,7 +1566,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> _ArrayT: ... - @overload + @overload # type: ignore[override] def std( self, axis: _ShapeLike | None = None, @@ -1606,7 +1606,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... - def argsort( + def argsort( # type: ignore[override] self, axis: SupportsIndex | _NoValueType = ..., kind: _SortKind | None = None, @@ -1851,7 +1851,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> _MaskedArray[intp]: ... # Keep in-sync with np.ma.take - @overload + @overload # type: ignore[override] def take( # type: ignore[overload-overlap] self: _MaskedArray[_ScalarT], indices: _IntLike_co, @@ -1943,7 +1943,7 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __iter__(self): ... def __len__(self): ... def filled(self, fill_value=None): ... - def tolist(self): ... + def tolist(self): ... # type: ignore[override] def isMaskedArray(x): ... isarray = isMaskedArray @@ -1955,19 +1955,19 @@ class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): # these overrides are no-ops @override - def __iadd__(self, other: _Ignored, /) -> Self: ... + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __isub__(self, other: _Ignored, /) -> Self: ... + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __imul__(self, other: _Ignored, /) -> Self: ... + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override def __ifloordiv__(self, other: _Ignored, /) -> Self: ... @override - def __itruediv__(self, other: _Ignored, /) -> Self: ... + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __ipow__(self, other: _Ignored, /) -> Self: ... + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] @override - def __deepcopy__(self, /, memo: _Ignored) -> Self: ... + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] @override def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... From 5238c595468273ae1b8c12d3c96974da8c91a19d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:55:44 +0200 Subject: [PATCH 0735/1718] TYP: Fix `override` mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 73 +++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8d754d917ad5..4c530888a95d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2222,7 +2222,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... # type: ignore[override] def squeeze( self, @@ -5006,7 +5006,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] # reflected methods. But since they are identical to the non-reflected versions, # these errors appear to be false positives. - @overload + @overload # type: ignore[override] def __add__(self, other: _Float64_co, /) -> float64: ... @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5015,7 +5015,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5024,7 +5024,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Float64_co, /) -> float64: ... @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5033,7 +5033,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5042,7 +5042,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Float64_co, /) -> float64: ... @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5051,7 +5051,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5060,7 +5060,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Float64_co, /) -> float64: ... @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5069,7 +5069,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @@ -5078,7 +5078,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __floordiv__(self, other: _Float64_co, /) -> float64: ... @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @@ -5096,7 +5096,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @@ -5107,7 +5107,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... - @overload + @overload # type: ignore[override] def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] @overload def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @@ -5153,59 +5153,63 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __complex__(self, /) -> complex: ... def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + @overload # type: ignore[override] def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __pow__( @@ -5215,7 +5219,8 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __pow__( self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... - @overload + + @overload # type: ignore[override] def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @@ -5242,37 +5247,37 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): def __getnewargs__(self, /) -> tuple[float, float]: ... # complex128-specific operator overrides - @overload + @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __radd__(self, other: _Complex128_co, /) -> complex128: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] - @overload + @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload def __pow__( self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] csingle: TypeAlias = complex64 cdouble: TypeAlias = complex128 @@ -5668,7 +5673,7 @@ class ufunc: @property def __qualname__(self) -> LiteralString: ... @property - def __doc__(self) -> str: ... + def __doc__(self) -> str: ... # type: ignore[override] @property def nin(self) -> int: ... @property @@ -5995,7 +6000,7 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeT_co, _DTypeT_co], + array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] context: tuple[ufunc, tuple[Any, ...], int] | None = None, return_scalar: builtins.bool = False, ) -> Any: ... From 096975101e8d132370b1669aaac831c7d9922571 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 13:56:00 +0200 Subject: [PATCH 0736/1718] TYP: Enable `override` mypy error code --- tools/stubtest/mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 422372300b40..f44e10cb883f 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -20,4 +20,4 @@ local_partial_types = True ; on numerous stubs. To allow running stubtest independently, we temporarily ; ignore these errors here. The goal is to eventually fix these mypy errors ; and remove the ignores. -disable_error_code = overload-cannot-match, overload-overlap, override +disable_error_code = overload-cannot-match, overload-overlap From bddee9203fecf6e411be2cd4a7a0081ce92b8dad Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:21:40 +0200 Subject: [PATCH 0737/1718] TYP: Fix `overload-cannot-match` mypy errors in `ctypeslib` --- numpy/ctypeslib/_ctypeslib.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 2c87833b5501..8881141f8ed5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -185,7 +185,7 @@ def as_ctypes(obj: intc) -> ctypes.c_int: ... @overload def as_ctypes(obj: long) -> ctypes.c_long: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... +def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... @overload @@ -195,7 +195,7 @@ def as_ctypes(obj: uintc) -> ctypes.c_uint: ... @overload def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... +def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: single) -> ctypes.c_float: ... @overload @@ -215,7 +215,7 @@ def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... @overload def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... +def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... @overload @@ -225,7 +225,7 @@ def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... @overload def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... +def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] @overload def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... @overload From 3c70d6ccf757e8f3bf7e3856a7f5a2d3820848c8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:23:57 +0200 Subject: [PATCH 0738/1718] TYP: Fix `overload-cannot-match` mypy errors in `__init__.pyi` --- numpy/__init__.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4c530888a95d..86cae3b3e48f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5334,7 +5334,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] __radd__ = __add__ @overload @@ -5424,7 +5424,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. # This confuses mypy, so we ignore the [misc] errors it reports. @@ -5437,7 +5437,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] From 1fe0bf986d067d5e89bfa9175c2d2260374f6122 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:24:33 +0200 Subject: [PATCH 0739/1718] TYP: Fix `overload-cannot-match` mypy errors in `linalg.matmul` --- numpy/linalg/_linalg.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index cc22e0f9dc39..60320b021c71 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -539,10 +539,10 @@ def cross( @overload def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload -def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... From 1c74b87f0ee89bf03f07e62c69e99fda1a04c3d6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 14:24:45 +0200 Subject: [PATCH 0740/1718] TYP: Enable `overload-cannot-match` mypy error code --- tools/stubtest/mypy.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index f44e10cb883f..ded91d7d56f3 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -13,6 +13,7 @@ namespace_packages = False enable_error_code = ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False strict = True +strict_bytes = True allow_redefinition_new = True local_partial_types = True @@ -20,4 +21,5 @@ local_partial_types = True ; on numerous stubs. To allow running stubtest independently, we temporarily ; ignore these errors here. The goal is to eventually fix these mypy errors ; and remove the ignores. -disable_error_code = overload-cannot-match, overload-overlap +; See also https://github.com/numpy/numpy/issues/27032 +disable_error_code = overload-overlap From 3aafa2f4c2b75c00b3a55e70933a19802c654594 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 17:40:17 +0200 Subject: [PATCH 0741/1718] TYP: Annotate ``ma.array()`` --- numpy/ma/core.pyi | 56 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index aeb32bed01f9..b795aa48f9b0 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1978,19 +1978,51 @@ masked_singleton: Final[MaskedConstant] = ... masked_array: TypeAlias = MaskedArray +# keep in sync with `MaskedArray.__new__` +@overload def array( - data, - dtype=None, - copy=False, - order=None, - mask=..., - fill_value=None, - keep_mask=True, - hard_mask=False, - shrink=True, - subok=True, - ndmin=0, -): ... + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... + +# def is_masked(x: object) -> bool: ... class _extrema_operation(_MaskedUFunc): From 9c2ca3a3e78124314433a9944feedaadd701002c Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 17:57:51 +0200 Subject: [PATCH 0742/1718] TYP: Annotate ``ma.asarray()`` and ``ma.asanyarray()`` --- numpy/ma/core.pyi | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index b795aa48f9b0..7fef6e29bd24 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -285,6 +285,7 @@ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=Tr _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) @@ -2022,6 +2023,24 @@ def array( ndmin: int = 0, ) -> _MaskedArray[_ScalarT]: ... +# keep in sync with `array` +@overload +def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray(a: _MArrayT, dtype: None = None) -> _MArrayT: ... +@overload +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_ScalarT]: ... + # def is_masked(x: object) -> bool: ... @@ -2365,8 +2384,6 @@ def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def asarray(a, dtype=None, order=None): ... -def asanyarray(a, dtype=None): ... def fromflex(fxarray): ... class _convert2ma: From 144b3cc43eb1f1ec70d98ae307af4720195cfe39 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:27:45 +0200 Subject: [PATCH 0743/1718] CI, TYP: stubtest (#30043) * CI: stubtest * CI: upgrade hypothesis before running stubtest to work around a mypy bug * CI: run stubtest using `spin stubtest` --- .github/workflows/stubtest.yml | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/stubtest.yml diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml new file mode 100644 index 000000000000..13554bfc427c --- /dev/null +++ b/.github/workflows/stubtest.yml @@ -0,0 +1,64 @@ +name: stubtest +permissions: read-all + +# Stubtest depends on different branches and paths than mypy does, so we have a separate workflow. + +on: + pull_request: + branches: + - "main" + - "maintenance/2.**" + # Stubtest requires numpy>=2.4 + - "!maintenance/2.[0-3].x" + paths: + - ".github/workflows/stubtest.yml" + - "numpy/**" + - "!numpy/**/tests/**" + - "requirements/test_requirements.txt" + - "tools/stubtest/**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + mypy: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + + name: stubtest + runs-on: ${{ matrix.os }}-latest + strategy: + fail-fast: false + matrix: + # TODO: consider including macos and windows + os: [ubuntu] + py: ["3.11", "3.14"] + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + with: + python-version: ${{ matrix.py }} + activate-environment: true + cache-dependency-glob: | + requirements/build_requirements.txt + requirements/test_requirements.txt + + - name: uv pip install + run: >- + uv pip install + -r requirements/build_requirements.txt + -r requirements/test_requirements.txt + + - name: spin build + run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv + + - name: spin stubtest + run: spin stubtest From 5fbdeca14c5bc9eb38c6467ee37559ae20442bf2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:31:28 +0200 Subject: [PATCH 0744/1718] CI: Skip test runs if all changes are docs or stubs (#30050) --- .github/workflows/compiler_sanitizers.yml | 4 ++++ .github/workflows/cygwin.yml | 4 ++++ .github/workflows/emscripten.yml | 4 ++++ .github/workflows/linux-ppc64le.yml | 4 ++++ .github/workflows/linux.yml | 4 ++++ .github/workflows/linux_blas.yml | 7 +++++-- .github/workflows/linux_qemu.yml | 4 ++++ .github/workflows/linux_simd.yml | 4 ++++ .github/workflows/macos.yml | 7 +++++-- .github/workflows/windows.yml | 4 ++++ 10 files changed, 42 insertions(+), 4 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 2721d77eb4a2..a86ff5ae8b53 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -8,6 +8,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 9d95da102fee..153f18d2956d 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -4,6 +4,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index e152870efd7d..159f6b719fde 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' env: FORCE_COLOR: 3 diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index a593bcd774a2..9b23c1939b00 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: permissions: diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 45a9f65bd9f0..5a9fb69c5178 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 98d45ab8c435..c282365ab012 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -40,6 +40,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: @@ -213,7 +217,7 @@ jobs: - name: Test run: spin test -j auto -- numpy/linalg --timeout=600 --durations=10 - + netlib-debian: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest @@ -404,4 +408,3 @@ jobs: - name: Test run: spin test -- numpy/linalg - diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index c11eca8dc5ab..5fd10b73b9be 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: defaults: diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index cfc45b8a6da4..de77a8410f18 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -31,6 +31,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' defaults: run: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f4393e71523c..399bec631713 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -5,7 +5,10 @@ on: branches: - main - maintenance/** - + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' permissions: contents: read # to fetch code (actions/checkout) @@ -146,7 +149,7 @@ jobs: - name: Test (fast tests) if: ${{ matrix.version != '3.14t' || matrix.build_runner[0] != 'macos-14' }} run: spin test -j2 -- --timeout=600 --durations=10 - + - name: Test in multiple threads if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} run: | diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 886a70e1be2b..96c175c07725 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -5,6 +5,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} From 9bb5dd875302f6b8dcbb9166058fa5de6b87eb6b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Oct 2025 18:35:14 +0200 Subject: [PATCH 0745/1718] TYP, STY: ``polynomial``: reformat the stubs (#30052) [skip azp] [skip circle] [skip cirrus] --- numpy/polynomial/__init__.pyi | 18 +- numpy/polynomial/_polybase.pyi | 35 +-- numpy/polynomial/_polytypes.pyi | 358 ++++++---------------------- numpy/polynomial/chebyshev.pyi | 33 +-- numpy/polynomial/hermite.pyi | 9 +- numpy/polynomial/hermite_e.pyi | 9 +- numpy/polynomial/polynomial.pyi | 30 +-- numpy/polynomial/polyutils.pyi | 406 ++++++++------------------------ 8 files changed, 222 insertions(+), 676 deletions(-) diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 6fb0fb5ec7fa..ad005a2dbe38 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -10,12 +10,18 @@ from .polynomial import Polynomial __all__ = [ "set_default_printstyle", - "polynomial", "Polynomial", - "chebyshev", "Chebyshev", - "legendre", "Legendre", - "hermite", "Hermite", - "hermite_e", "HermiteE", - "laguerre", "Laguerre", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", ] def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index fcece8376170..b16b06c8a734 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -53,15 +53,12 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): _symbol: str @property def symbol(self, /) -> str: ... - @property @abc.abstractmethod def domain(self) -> _Array2[np.float64 | Any]: ... - @property @abc.abstractmethod def window(self) -> _Array2[np.float64 | Any]: ... - @property @abc.abstractmethod def basis_name(self) -> _NameT_co: ... @@ -77,6 +74,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): symbol: str = "x", ) -> None: ... + # @overload def __call__(self, /, arg: _PolyT) -> _PolyT: ... @overload @@ -90,19 +88,21 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): @overload def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... - def __format__(self, fmt_str: str, /) -> str: ... - def __eq__(self, x: object, /) -> bool: ... - def __ne__(self, x: object, /) -> bool: ... + # unary ops def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... + + # binary ops def __add__(self, x: _AnyOther, /) -> Self: ... def __sub__(self, x: _AnyOther, /) -> Self: ... def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... def __truediv__(self, x: _AnyOther, /) -> Self: ... def __floordiv__(self, x: _AnyOther, /) -> Self: ... def __mod__(self, x: _AnyOther, /) -> Self: ... def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... - def __pow__(self, x: _AnyOther, /) -> Self: ... + + # reflected binary ops def __radd__(self, x: _AnyOther, /) -> Self: ... def __rsub__(self, x: _AnyOther, /) -> Self: ... def __rmul__(self, x: _AnyOther, /) -> Self: ... @@ -110,22 +110,29 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... def __rmod__(self, x: _AnyOther, /) -> Self: ... def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized def __len__(self, /) -> int: ... def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling def __getstate__(self, /) -> dict[str, Any]: ... def __setstate__(self, dict: dict[str, Any], /) -> None: ... + # def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... def has_sametype(self, /, other: object) -> TypeIs[Self]: ... + # def copy(self, /) -> Self: ... def degree(self, /) -> int: ... def cutdeg(self, /, deg: int) -> Self: ... def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... def truncate(self, /, size: _AnyInt) -> Self: ... + # @overload def convert( self, @@ -152,8 +159,8 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, ) -> Self: ... + # def mapparms(self, /) -> _Tuple2[Any]: ... - def integ( self, /, @@ -161,11 +168,8 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): k: _CoefLike_co | _SeriesLikeCoef_co = [], lbnd: _CoefLike_co | None = None, ) -> Self: ... - def deriv(self, /, m: SupportsIndex = 1) -> Self: ... - def roots(self, /) -> _CoefSeries: ... - def linspace( self, /, @@ -173,6 +177,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): domain: _SeriesLikeCoef_co | None = None, ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + # @overload @classmethod def fit( @@ -211,12 +216,14 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): deg: int | _SeriesLikeInt_co, domain: _SeriesLikeCoef_co | None, rcond: _FloatLike_co, - full: Literal[True], /, + full: Literal[True], + /, w: _SeriesLikeCoef_co | None = None, window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + # @classmethod def fromroots( cls, @@ -225,7 +232,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def identity( cls, @@ -233,7 +239,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def basis( cls, @@ -242,7 +247,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): window: _SeriesLikeCoef_co | None = None, symbol: str = "x", ) -> Self: ... - @classmethod def cast( cls, @@ -250,7 +254,6 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): domain: _SeriesLikeCoef_co | None = None, window: _SeriesLikeCoef_co | None = None, ) -> Self: ... - @classmethod def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... @classmethod diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 91cb27e3a923..58482aea1be9 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,6 +1,6 @@ -# ruff: noqa: PYI046, PYI047 +# ruff: noqa: PYI046 -from collections.abc import Callable, Sequence +from collections.abc import Sequence from typing import ( Any, Literal, @@ -40,15 +40,12 @@ _ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) class _SupportsCoefOps(Protocol[_T_contra]): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... - def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... def __sub__(self, x: _T_contra, /) -> Self: ... def __mul__(self, x: _T_contra, /) -> Self: ... def __pow__(self, x: _T_contra, /) -> Self | float: ... - def __radd__(self, x: _T_contra, /) -> Self: ... def __rsub__(self, x: _T_contra, /) -> Self: ... def __rmul__(self, x: _T_contra, /) -> Self: ... @@ -75,41 +72,15 @@ _CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] _CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = ( - _SupportsArray[np.dtype[np.bool]] - | Sequence[bool | np.bool] -) -_SeriesLikeInt_co: TypeAlias = ( - _SupportsArray[np.dtype[np.integer | np.bool]] - | Sequence[_IntLike_co] -) -_SeriesLikeFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] - | Sequence[_FloatLike_co] -) -_SeriesLikeComplex_co: TypeAlias = ( - _SupportsArray[np.dtype[np.inexact | np.integer | np.bool]] - | Sequence[_ComplexLike_co] -) -_SeriesLikeObject_co: TypeAlias = ( - _SupportsArray[np.dtype[np.object_]] - | Sequence[_CoefObjectLike_co] -) -_SeriesLikeCoef_co: TypeAlias = ( - _SupportsArray[np.dtype[np.number | np.bool | np.object_]] - | Sequence[_CoefLike_co] -) +_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] -_ArrayLikeCoefObject_co: TypeAlias = ( - _CoefObjectLike_co - | _SeriesLikeObject_co - | _NestedSequence[_SeriesLikeObject_co] -) -_ArrayLikeCoef_co: TypeAlias = ( - npt.NDArray[np.number | np.bool | np.object_] - | _ArrayLikeNumber_co - | _ArrayLikeCoefObject_co -) +_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co _Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] @@ -122,19 +93,9 @@ class _FuncLine(Protocol): @overload def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... @overload - def __call__( - self, - /, - off: complex, - scl: complex, - ) -> _Line[np.complex128]: ... + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... @overload - def __call__( - self, - /, - off: _SupportsCoefOps[Any], - scl: _SupportsCoefOps[Any], - ) -> _Line[np.object_]: ... + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... @type_check_only class _FuncFromRoots(Protocol): @@ -148,33 +109,13 @@ class _FuncFromRoots(Protocol): @type_check_only class _FuncBinOp(Protocol): @overload - def __call__( - self, - /, - c1: _SeriesLikeBool_co, - c2: _SeriesLikeBool_co, - ) -> NoReturn: ... + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, - ) -> _FloatSeries: ... + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, - ) -> _ComplexSeries: ... + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, - ) -> _ObjectSeries: ... + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... @type_check_only class _FuncUnOp(Protocol): @@ -197,29 +138,11 @@ class _FuncPoly2Ortho(Protocol): @type_check_only class _FuncPow(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _FloatSeries: ... + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ComplexSeries: ... + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... @overload - def __call__( - self, - /, - c: _SeriesLikeCoef_co, - pow: _IntLike_co, - maxpower: _IntLike_co | None = ..., - ) -> _ObjectSeries: ... + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... @type_check_only class _FuncDer(Protocol): @@ -228,27 +151,27 @@ class _FuncDer(Protocol): self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only @@ -258,136 +181,64 @@ class _FuncInteg(Protocol): self, /, c: _ArrayLikeFloat_co, - m: SupportsIndex = ..., - k: _FloatLike_co | _SeriesLikeFloat_co = ..., - lbnd: _FloatLike_co = ..., - scl: _FloatLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, ) -> _FloatArray: ... @overload def __call__( self, /, c: _ArrayLikeComplex_co, - m: SupportsIndex = ..., - k: _ComplexLike_co | _SeriesLikeComplex_co = ..., - lbnd: _ComplexLike_co = ..., - scl: _ComplexLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, ) -> _ComplexArray: ... @overload def __call__( self, /, c: _ArrayLikeCoef_co, - m: SupportsIndex = ..., - k: _CoefLike_co | _SeriesLikeCoef_co = ..., - lbnd: _CoefLike_co = ..., - scl: _CoefLike_co = ..., - axis: SupportsIndex = ..., + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, ) -> _ObjectArray: ... @type_check_only class _FuncVal(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - c: _SeriesLikeFloat_co, - tensor: bool = ..., - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - c: _SeriesLikeComplex_co, - tensor: bool = ..., - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - tensor: bool = ..., - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - tensor: bool = ..., - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - tensor: bool = ..., - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - c: _SeriesLikeObject_co, - tensor: bool = ..., - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... @type_check_only class _FuncVal2D(Protocol): @overload - def __call__( - self, - /, - x: _FloatLike_co, - y: _FloatLike_co, - c: _SeriesLikeFloat_co, - ) -> np.floating: ... + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... @overload - def __call__( - self, - /, - x: _NumberLike_co, - y: _NumberLike_co, - c: _SeriesLikeComplex_co, - ) -> np.complexfloating: ... + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - c: _ArrayLikeFloat_co, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - c: _ArrayLikeComplex_co, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - c: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: _CoefLike_co, - y: _CoefLike_co, - c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps[Any]: ... + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... @type_check_only class _FuncVal3D(Protocol): @@ -398,7 +249,7 @@ class _FuncVal3D(Protocol): x: _FloatLike_co, y: _FloatLike_co, z: _FloatLike_co, - c: _SeriesLikeFloat_co + c: _SeriesLikeFloat_co, ) -> np.floating: ... @overload def __call__( @@ -446,78 +297,29 @@ class _FuncVal3D(Protocol): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... -_AnyValF: TypeAlias = Callable[ - [npt.ArrayLike, npt.ArrayLike, bool], - _CoefArray, -] - @type_check_only class _FuncVander(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - deg: SupportsIndex, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - deg: SupportsIndex, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - deg: SupportsIndex, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - deg: SupportsIndex, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... _AnyDegrees: TypeAlias = Sequence[SupportsIndex] @type_check_only class _FuncVander2D(Protocol): @overload - def __call__( - self, - /, - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, - deg: _AnyDegrees, - ) -> _FloatArray: ... + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - deg: _AnyDegrees, - ) -> _ComplexArray: ... + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... @overload - def __call__( - self, - /, - x: _ArrayLikeCoef_co, - y: _ArrayLikeCoef_co, - deg: _AnyDegrees, - ) -> _ObjectArray: ... + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... @overload - def __call__( - self, - /, - x: npt.ArrayLike, - y: npt.ArrayLike, - deg: _AnyDegrees, - ) -> _CoefArray: ... + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... @type_check_only class _FuncVander3D(Protocol): @@ -596,7 +398,6 @@ class _FuncFit(Protocol): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_FloatArray, _FullFitResult]: ... - @overload def __call__( self, @@ -631,7 +432,6 @@ class _FuncFit(Protocol): full: Literal[True], w: _SeriesLikeFloat_co | None = ..., ) -> tuple[_ComplexArray, _FullFitResult]: ... - @overload def __call__( self, @@ -670,17 +470,9 @@ class _FuncFit(Protocol): @type_check_only class _FuncRoots(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Series[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Series[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... @@ -689,27 +481,15 @@ _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] @type_check_only class _FuncCompanion(Protocol): @overload - def __call__( - self, - /, - c: _SeriesLikeFloat_co, - ) -> _Companion[np.float64]: ... + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... @overload - def __call__( - self, - /, - c: _SeriesLikeComplex_co, - ) -> _Companion[np.complex128]: ... + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... @type_check_only class _FuncGauss(Protocol): - def __call__( - self, - /, - deg: SupportsIndex, - ) -> _Tuple2[_Series[np.float64]]: ... + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... @type_check_only class _FuncWeight(Protocol): diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 35043a001f10..d70842020ebe 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -82,16 +82,12 @@ __all__ = [ ] _NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) + def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... -def _zseries_div( - z1: npt.NDArray[_NumberOrObjectT], - z2: npt.NDArray[_NumberOrObjectT], -) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... @@ -129,8 +125,7 @@ chebweight: Final[_FuncWeight] = ... chebpts1: Final[_FuncPts] = ... chebpts2: Final[_FuncPts] = ... -# keep in sync with `Chebyshev.interpolate` -_RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload def chebinterpolate( func: np.ufunc, @@ -139,16 +134,16 @@ def chebinterpolate( ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _RT], + func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], deg: _IntLike_co, args: tuple[()] = (), -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_CoefScalarT]: ... @overload def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_RT]: ... +) -> npt.NDArray[_CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] @@ -168,10 +163,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None = None, *, @@ -181,10 +173,7 @@ class Chebyshev(ABCPolyBase[L["T"]]): @classmethod def interpolate( cls, - func: Callable[ - Concatenate[npt.NDArray[np.float64], ...], - _CoefSeries, - ], + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], deg: _IntLike_co, domain: _SeriesLikeCoef_co | None, args: Iterable[Any], diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 9d8a63a6b95d..04b8238735dd 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,6 +1,7 @@ from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -62,6 +63,8 @@ __all__ = [ "hermweight", ] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + poly2herm: Final[_FuncPoly2Ortho] = ... herm2poly: Final[_FuncUnOp] = ... @@ -92,11 +95,7 @@ hermfit: Final[_FuncFit] = ... hermcompanion: Final[_FuncCompanion] = ... hermroots: Final[_FuncRoots] = ... -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -def _normed_hermite_n( - x: np.ndarray[_ShapeT, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index b93975f246da..b996de52c6da 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,6 +1,7 @@ from typing import Any, ClassVar, Final, Literal as L, TypeVar import numpy as np +from numpy._typing import _Shape from ._polybase import ABCPolyBase from ._polytypes import ( @@ -62,6 +63,8 @@ __all__ = [ "hermeweight", ] +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + poly2herme: Final[_FuncPoly2Ortho] = ... herme2poly: Final[_FuncUnOp] = ... @@ -92,11 +95,7 @@ hermefit: Final[_FuncFit] = ... hermecompanion: Final[_FuncCompanion] = ... hermeroots: Final[_FuncRoots] = ... -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -def _normed_hermite_e_n( - x: np.ndarray[_ShapeT, np.dtype[np.float64]], - n: int | np.intp, -) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index ddd10fa4282e..86f288468a15 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -84,35 +84,15 @@ polyval2d: Final[_FuncVal2D] = ... polyval3d: Final[_FuncVal3D] = ... @overload -def polyvalfromroots( - x: _FloatLike_co, - r: _FloatLike_co, - tensor: bool = True, -) -> np.float64 | Any: ... +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... @overload -def polyvalfromroots( - x: _NumberLike_co, - r: _NumberLike_co, - tensor: bool = True, -) -> np.complex128 | Any: ... +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... @overload -def polyvalfromroots( - x: _ArrayLikeFloat_co, - r: _ArrayLikeFloat_co, - tensor: bool = True, -) -> npt.NDArray[np.float64 | Any]: ... +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... @overload -def polyvalfromroots( - x: _ArrayLikeNumber_co, - r: _ArrayLikeNumber_co, - tensor: bool = True, -) -> npt.NDArray[np.complex128 | Any]: ... +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... @overload -def polyvalfromroots( - x: _ArrayLikeCoef_co, - r: _ArrayLikeCoef_co, - tensor: bool = True, -) -> npt.NDArray[np.object_ | Any]: ... +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... polygrid2d: Final[_FuncVal2D] = ... polygrid3d: Final[_FuncVal3D] = ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index fd818ce0c90d..5487bfa4c2ec 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -13,7 +13,6 @@ from numpy._typing import ( from ._polytypes import ( _AnyInt, - _AnyValF, _Array2, _ArrayLikeCoef_co, _CoefArray, @@ -36,190 +35,96 @@ from ._polytypes import ( _Tuple2, ) -__all__: Final[Sequence[str]] = [ - "as_series", - "format_float", - "getdomain", - "mapdomain", - "mapparms", - "trimcoef", - "trimseq", -] +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] +_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) + +_AnyValF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike, bool], _CoefArray] _AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] _AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] _AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] @overload -def as_series( - alist: npt.NDArray[np.integer] | _FloatArray, - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: _ComplexArray, - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: _ObjectArray, - trim: bool = True, -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_FloatArray | npt.NDArray[np.integer]], - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_ComplexArray], - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_ObjectArray], - trim: bool = True, -) -> list[_ObjectSeries]: ... -@overload -def as_series( # type: ignore[overload-overlap] - alist: Iterable[_SeriesLikeFloat_co | float], - trim: bool = True, -) -> list[_FloatSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeComplex_co | complex], - trim: bool = True, -) -> list[_ComplexSeries]: ... -@overload -def as_series( - alist: Iterable[_SeriesLikeCoef_co | object], - trim: bool = True, -) -> list[_ObjectSeries]: ... +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... -_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) -def trimseq(seq: _T_seq) -> _T_seq: ... +# +def trimseq(seq: _SeqT) -> _SeqT: ... +# @overload -def trimcoef( # type: ignore[overload-overlap] - c: npt.NDArray[np.integer] | _FloatArray, - tol: _FloatLike_co = 0, -) -> _FloatSeries: ... +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _ComplexArray, - tol: _FloatLike_co = 0, -) -> _ComplexSeries: ... +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _ObjectArray, - tol: _FloatLike_co = 0, -) -> _ObjectSeries: ... +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... @overload -def trimcoef( # type: ignore[overload-overlap] - c: _SeriesLikeFloat_co | float, - tol: _FloatLike_co = 0, -) -> _FloatSeries: ... +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... @overload -def trimcoef( - c: _SeriesLikeComplex_co | complex, - tol: _FloatLike_co = 0, -) -> _ComplexSeries: ... +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... @overload -def trimcoef( - c: _SeriesLikeCoef_co | object, - tol: _FloatLike_co = 0, -) -> _ObjectSeries: ... +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +# @overload -def getdomain( # type: ignore[overload-overlap] - x: _FloatArray | npt.NDArray[np.integer], -) -> _Array2[np.float64]: ... +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _ComplexArray, -) -> _Array2[np.complex128]: ... +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _ObjectArray, -) -> _Array2[np.object_]: ... +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... @overload -def getdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co | float, -) -> _Array2[np.float64]: ... +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... @overload -def getdomain( - x: _SeriesLikeComplex_co | complex, -) -> _Array2[np.complex128]: ... +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... @overload -def getdomain( - x: _SeriesLikeCoef_co | object, -) -> _Array2[np.object_]: ... +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... +# @overload -def mapparms( # type: ignore[overload-overlap] - old: npt.NDArray[np.floating | np.integer], - new: npt.NDArray[np.floating | np.integer], -) -> _Tuple2[np.floating]: ... +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... @overload -def mapparms( - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _Tuple2[np.complexfloating]: ... +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... @overload -def mapparms( - old: npt.NDArray[np.object_ | np.number], - new: npt.NDArray[np.object_ | np.number], -) -> _Tuple2[object]: ... -@overload -def mapparms( # type: ignore[overload-overlap] - old: Sequence[float], - new: Sequence[float], -) -> _Tuple2[float]: ... -@overload -def mapparms( - old: Sequence[complex], - new: Sequence[complex], -) -> _Tuple2[complex]: ... -@overload -def mapparms( - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _Tuple2[np.floating]: ... -@overload -def mapparms( - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _Tuple2[np.complexfloating]: ... -@overload -def mapparms( - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _Tuple2[object]: ... +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... +@overload +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... +# @overload -def mapdomain( # type: ignore[overload-overlap] - x: _FloatLike_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> np.floating: ... +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... @overload -def mapdomain( - x: _NumberLike_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> np.complexfloating: ... +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... @overload -def mapdomain( # type: ignore[overload-overlap] +def mapdomain( x: npt.NDArray[np.floating | np.integer], old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer], ) -> _FloatSeries: ... @overload -def mapdomain( - x: npt.NDArray[np.number], - old: npt.NDArray[np.number], - new: npt.NDArray[np.number], -) -> _ComplexSeries: ... +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... @overload def mapdomain( x: npt.NDArray[np.object_ | np.number], @@ -227,34 +132,16 @@ def mapdomain( new: npt.NDArray[np.object_ | np.number], ) -> _ObjectSeries: ... @overload -def mapdomain( # type: ignore[overload-overlap] - x: _SeriesLikeFloat_co, - old: _SeriesLikeFloat_co, - new: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def mapdomain( - x: _SeriesLikeComplex_co, - old: _SeriesLikeComplex_co, - new: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def mapdomain( - x: _SeriesLikeCoef_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> _ObjectSeries: ... +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... @overload -def mapdomain( - x: _CoefLike_co, - old: _SeriesLikeCoef_co, - new: _SeriesLikeCoef_co, -) -> object: ... +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... -def _nth_slice( - i: SupportsIndex, - ndim: SupportsIndex, -) -> tuple[slice | None, ...]: ... +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... # keep in sync with `vander_nd_flat` @overload @@ -310,168 +197,68 @@ def _vander_nd_flat( # keep in sync with `._polytypes._FuncFromRoots` @overload -def _fromroots( # type: ignore[overload-overlap] - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeFloat_co, -) -> _FloatSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeComplex_co, -) -> _ComplexSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeObject_co, -) -> _ObjectSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... @overload -def _fromroots( - line_f: _AnyLineF, - mul_f: _AnyMulF, - roots: _SeriesLikeCoef_co, -) -> _CoefSeries: ... +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` @overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - *args: _FloatLike_co, -) -> np.floating: ... -@overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - *args: _NumberLike_co, -) -> np.complexfloating: ... -@overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - *args: _ArrayLikeFloat_co, -) -> _FloatArray: ... +def _valnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... @overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - *args: _ArrayLikeComplex_co, -) -> _ComplexArray: ... +def _valnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... @overload -def _valnd( - val_f: _AnyValF, - c: _SeriesLikeObject_co, - *args: _CoefObjectLike_co, -) -> _SupportsCoefOps[Any]: ... +def _valnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... @overload -def _valnd( - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - *args: _ArrayLikeCoef_co, -) -> _ObjectArray: ... +def _valnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... +@overload +def _valnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... +@overload +def _valnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... # keep in sync with `_valnd` @overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeFloat_co, - *args: _FloatLike_co, -) -> np.floating: ... -@overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeComplex_co, - *args: _NumberLike_co, -) -> np.complexfloating: ... -@overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeFloat_co, - *args: _ArrayLikeFloat_co, -) -> _FloatArray: ... +def _gridnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeComplex_co, - *args: _ArrayLikeComplex_co, -) -> _ComplexArray: ... +def _gridnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _SeriesLikeObject_co, - *args: _CoefObjectLike_co, -) -> _SupportsCoefOps[Any]: ... +def _gridnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... @overload -def _gridnd( - val_f: _AnyValF, - c: _ArrayLikeCoef_co, - *args: _ArrayLikeCoef_co, -) -> _ObjectArray: ... +def _gridnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... +@overload +def _gridnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... +@overload +def _gridnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... # keep in sync with `_polytypes._FuncBinOp` @overload -def _div( # type: ignore[overload-overlap] - mul_f: _AnyMulF, - c1: _SeriesLikeFloat_co, - c2: _SeriesLikeFloat_co, -) -> _Tuple2[_FloatSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeComplex_co, - c2: _SeriesLikeComplex_co, -) -> _Tuple2[_ComplexSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeObject_co, - c2: _SeriesLikeObject_co, -) -> _Tuple2[_ObjectSeries]: ... -@overload -def _div( - mul_f: _AnyMulF, - c1: _SeriesLikeCoef_co, - c2: _SeriesLikeCoef_co, -) -> _Tuple2[_CoefSeries]: ... +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... _add: Final[_FuncBinOp] = ... _sub: Final[_FuncBinOp] = ... # keep in sync with `_polytypes._FuncPow` @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeFloat_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _FloatSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeComplex_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _ComplexSeries: ... -@overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeObject_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _ObjectSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... @overload -def _pow( - mul_f: _AnyMulF, - c: _SeriesLikeCoef_co, - pow: _AnyInt, - maxpower: _AnyInt | None, -) -> _CoefSeries: ... +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... # keep in sync with `_polytypes._FuncFit` @overload -def _fit( # type: ignore[overload-overlap] +def _fit( vander_f: _AnyVanderF, x: _SeriesLikeFloat_co, y: _ArrayLikeFloat_co, @@ -522,5 +309,8 @@ def _fit( w: _SeriesLikeCoef_co | None = None, ) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +# def _as_int(x: SupportsIndex, desc: str) -> int: ... + +# def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... From b6811bab48feab327498b91095fc41ea6848a8d8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 23 Oct 2025 19:29:10 +0200 Subject: [PATCH 0746/1718] CI: Ignore stubtest distutils errors on py311 --- .spin/cmds.py | 4 +++- tools/stubtest/allowlist_py311.txt | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 tools/stubtest/allowlist_py311.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index 925991c5c804..453faa77d932 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -561,7 +561,9 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' allowlists = [stubtest_dir / 'allowlist.txt'] - if sys.version_info >= (3, 12): + if sys.version_info < (3, 12): + allowlists.append(stubtest_dir / 'allowlist_py311.txt') + else: allowlists.append(stubtest_dir / 'allowlist_py312.txt') cmd = [ diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt new file mode 100644 index 000000000000..4413f164f582 --- /dev/null +++ b/tools/stubtest/allowlist_py311.txt @@ -0,0 +1,3 @@ +# python == 3.11.* + +numpy\.distutils\..* From 33d52b80c194264d8c2e7a5225100b9177f22349 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 23 Oct 2025 11:33:24 -0600 Subject: [PATCH 0747/1718] BUG: fix int left shift UB in CPU feature detection (#30058) --- numpy/_core/src/common/npy_cpu_features.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 46ac4a9d8362..91dafa96de0a 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -554,7 +554,9 @@ npy__cpu_init_features(void) // Skylake-X npy__cpu_have[NPY_CPU_FEATURE_AVX512DQ] = (reg[1] & (1 << 17)) != 0; npy__cpu_have[NPY_CPU_FEATURE_AVX512BW] = (reg[1] & (1 << 30)) != 0; - npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (1 << 31)) != 0; + // cast and use of unsigned int literal silences UBSan warning: + // "runtime error: left shift of 1 by 31 places cannot be represented in type 'int'" + npy__cpu_have[NPY_CPU_FEATURE_AVX512VL] = (reg[1] & (int)(1u << 31)) != 0; // Cascade Lake npy__cpu_have[NPY_CPU_FEATURE_AVX512VNNI] = (reg[2] & (1 << 11)) != 0; // Cannon Lake From ca79fc7861e711a437639539ecfce149fe90de37 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:12:08 +0200 Subject: [PATCH 0748/1718] TYP: ``polynomial.polyutils``: fix callable type signatures (#30060) --- numpy/polynomial/polyutils.pyi | 53 ++++++++++++++-------------------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 5487bfa4c2ec..79ca317c12b0 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,5 +1,14 @@ from collections.abc import Callable, Iterable, Sequence -from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypeVar, overload +from typing import ( + Final, + Literal, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) import numpy as np import numpy.typing as npt @@ -17,7 +26,6 @@ from ._polytypes import ( _ArrayLikeCoef_co, _CoefArray, _CoefLike_co, - _CoefObjectLike_co, _CoefSeries, _ComplexArray, _ComplexSeries, @@ -31,18 +39,23 @@ from ._polytypes import ( _SeriesLikeFloat_co, _SeriesLikeInt_co, _SeriesLikeObject_co, - _SupportsCoefOps, _Tuple2, ) __all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] +_T = TypeVar("_T") _SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) -_AnyValF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike, bool], _CoefArray] -_AnyLineF: TypeAlias = Callable[[_CoefLike_co, _CoefLike_co], _CoefArray] -_AnyMulF: TypeAlias = Callable[[npt.ArrayLike, npt.ArrayLike], _CoefArray] -_AnyVanderF: TypeAlias = Callable[[npt.ArrayLike, SupportsIndex], _CoefArray] +_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] +_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] + +@type_check_only +class _ValFunc(Protocol[_T]): + def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... + +### @overload def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... @@ -206,32 +219,10 @@ def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... -@overload -def _valnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... -@overload -def _valnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... +def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... # keep in sync with `_valnd` -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeFloat_co, *args: _FloatLike_co) -> np.floating: ... -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeComplex_co, *args: _NumberLike_co) -> np.complexfloating: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeFloat_co, *args: _ArrayLikeFloat_co) -> _FloatArray: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeComplex_co, *args: _ArrayLikeComplex_co) -> _ComplexArray: ... -@overload -def _gridnd(val_f: _AnyValF, c: _SeriesLikeObject_co, *args: _CoefObjectLike_co) -> _SupportsCoefOps[Any]: ... -@overload -def _gridnd(val_f: _AnyValF, c: _ArrayLikeCoef_co, *args: _ArrayLikeCoef_co) -> _ObjectArray: ... +def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... # keep in sync with `_polytypes._FuncBinOp` @overload From 852091a09cc58e2eddd41f713665e449a209b1fa Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:12:54 +0200 Subject: [PATCH 0749/1718] TYP: ``polynomial``: Simplify ``chebpts{1,2}`` function stubs (#30065) --- numpy/polynomial/_polytypes.pyi | 4 ---- numpy/polynomial/chebyshev.pyi | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 58482aea1be9..b5a603b6ca85 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -499,7 +499,3 @@ class _FuncWeight(Protocol): def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... @overload def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... - -@type_check_only -class _FuncPts(Protocol): - def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index d70842020ebe..1cfb27829b2e 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,3 +1,4 @@ +from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable from typing import ( Any, @@ -29,7 +30,6 @@ from ._polytypes import ( _FuncLine, _FuncPoly2Ortho, _FuncPow, - _FuncPts, _FuncRoots, _FuncUnOp, _FuncVal, @@ -122,8 +122,8 @@ chebcompanion: Final[_FuncCompanion] = ... chebroots: Final[_FuncRoots] = ... chebgauss: Final[_FuncGauss] = ... chebweight: Final[_FuncWeight] = ... -chebpts1: Final[_FuncPts] = ... -chebpts2: Final[_FuncPts] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... # keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) @overload From df32a985df1459e2e1ab4aab94d56341ccb24b37 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 24 Oct 2025 03:14:18 +0200 Subject: [PATCH 0750/1718] TYP: ``numpy.ma``: Annotate 27 functions related to masks and fill-values (#30067) * TYP: Annotate ``ma.make_mask()`` and ``ma.make_mask_descr()`` * TYP: Annotate ``ma.make_mask_none()`` * TYP: ``ma.core``: move some of the classes to the top (for the sake of sanity) * TYP: ``ma.core``: introduce a type alias for boolean mask arrays * TYP: Annotate ``ma.getmaskarray()`` * TYP: Annotate ``ma.getdata()`` * TYP: update type-tests for ``ma.getmask()`` to use the literal `False` bool type * TYP: Annotate ``ma.mask_or()`` * TYP: Annotate ``ma.flatten_mask()`` * TYP: Annotate ``ma.flatten_structured_array()`` * TYP: Annotate ``ma.masked_invalid()`` * TYP: Annotate ``ma.masked_where()`` * TYP: Annotate ``ma.masked_object()`` and ``ma.masked_values()`` * TYP: Annotate ``ma.fix_invalid()`` * TYP: Annotate ``ma.*_fill_value()`` --- numpy/ma/core.pyi | 414 +++++++++++++++++++++++--- numpy/typing/tests/data/reveal/ma.pyi | 10 +- 2 files changed, 373 insertions(+), 51 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e3ef71adb09f..78114931cbac 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -91,6 +91,7 @@ from numpy._typing import ( _CharLike_co, _DTypeLike, _DTypeLikeBool, + _DTypeLikeVoid, _IntLike_co, _NestedSequence, _ScalarLike_co, @@ -281,6 +282,7 @@ __all__ = [ ] _ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) @@ -314,31 +316,19 @@ _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsInd _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` +_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] + ### -MaskType = bool_ -nomask: bool_[Literal[False]] +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -def default_fill_value(obj): ... -def minimum_fill_value(obj): ... -def maximum_fill_value(obj): ... -def set_fill_value(a, fill_value): ... -def common_fill_value(a, b): ... -@overload -def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... -@overload -def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... -@overload -def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... -def getdata(a, subok=True): ... -get_data = getdata - -def fix_invalid(a, mask=..., copy=True, fill_value=None): ... - class _MaskedUFunc: f: Any __doc__: Any @@ -367,6 +357,15 @@ class _DomainedBinaryOperation(_MaskedUFunc): def __init__(self, dbfunc, domain, fillx=..., filly=...): ... def __call__(self, a, b, *args, **kwargs): ... +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=1): ... + +masked_print_option: Final[_MaskedPrintOption] = ... + exp: _MaskedUnaryOperation conjugate: _MaskedUnaryOperation sin: _MaskedUnaryOperation @@ -422,50 +421,371 @@ remainder: _DomainedBinaryOperation fmod: _DomainedBinaryOperation mod: _DomainedBinaryOperation -def make_mask_descr(ndtype): ... +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` +@overload +def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... @overload -def getmask(a: _ScalarLike_co) -> bool_: ... +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def getmask(a: MaskedArray[_ShapeT_co, Any]) -> np.ndarray[_ShapeT_co, dtype[bool_]] | bool_: ... +def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... @overload -def getmask(a: ArrayLike) -> NDArray[bool_] | bool_: ... +def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... + +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid( + a: np.ndarray[_ShapeT, _DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def fix_invalid( + a: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +@overload +def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +@overload +def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... + +get_data = getdata + +# +@overload +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... +@overload +def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload +def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... get_mask = getmask -def getmaskarray(arr): ... +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, # which isn't necessarily a ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... -def make_mask(m, copy=False, shrink=True, dtype=...): ... -def make_mask_none(newshape, dtype=None): ... -def mask_or(m1, m2, copy=False, shrink=True): ... -def flatten_mask(mask): ... -def masked_where(condition, a, copy=True): ... -def masked_greater(x, value, copy=True): ... -def masked_greater_equal(x, value, copy=True): ... -def masked_less(x, value, copy=True): ... -def masked_less_equal(x, value, copy=True): ... -def masked_not_equal(x, value, copy=True): ... -def masked_equal(x, value, copy=True): ... -def masked_inside(x, v1, v2, copy=True): ... -def masked_outside(x, v1, v2, copy=True): ... -def masked_object(x, value, copy=True, shrink=True): ... -def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): ... -def masked_invalid(a, copy=True): ... +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... -class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=1): ... +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... -masked_print_option: _MaskedPrintOption +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... -def flatten_structured_array(a): ... +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... + +# +@overload +def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +@overload +def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where( + condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object( + x: np.ndarray[_ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values( + x: np.ndarray[_ShapeT, _DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def masked_values( + x: _ArrayLike[_ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[_ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[Incomplete]: ... # TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an # additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a6857ef0a3dd..e47e690ff0e7 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -6,7 +6,9 @@ from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) + MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_NoMaskType: TypeAlias = np.bool[Literal[False]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... @@ -347,13 +349,13 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) -assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) # PyRight detects this one correctly, but mypy doesn't: # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] -assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) -assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | np.bool) -assert_type(np.ma.getmask(np.int64(1)), np.bool) +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) assert_type(np.ma.is_mask(MAR_1d), bool) assert_type(np.ma.is_mask(AR_b), bool) From 1d053b3482b178ed057474402ae94c80701796e0 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Fri, 24 Oct 2025 11:17:29 -0400 Subject: [PATCH 0751/1718] TST: Remove unneeded test__datasource thread-unsafe markers (#30064) --- numpy/lib/tests/test__datasource.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 4c866511a5de..2dd19410bbf0 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -88,7 +88,6 @@ def invalid_httpfile(): return http_fakefile -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceOpen: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -157,7 +156,6 @@ def test_ValidBz2File(self, tmp_path): assert_equal(magic_line, result) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceExists: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -184,7 +182,6 @@ def test_InvalidFile(self, tmp_path): assert_equal(ds.exists(tmpfile), False) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestDataSourceAbspath: def test_ValidHTTP(self, tmp_path): ds = datasource.DataSource(tmp_path) @@ -247,7 +244,6 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryAbspath: def test_ValidHTTP(self, tmp_path): repos = datasource.Repository(valid_baseurl(), tmp_path) @@ -275,7 +271,6 @@ def test_windows_os_sep(self, tmp_path): os.sep = orig_os_sep -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestRepositoryExists: def test_ValidFile(self, tmp_path): # Create local temp file @@ -305,7 +300,6 @@ def test_CachedHTTPFile(self, tmp_path): assert_(repos.exists(tmpfile)) -@pytest.mark.thread_unsafe(reason="mkdtemp thread-unsafe") class TestOpenFunc: def test_DataSourceOpen(self, tmp_path): local_file = valid_textfile(tmp_path) From 07ef0177960daf11bd66541077a7bc98d76b883a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Fri, 24 Oct 2025 14:36:42 -0700 Subject: [PATCH 0752/1718] DOC: Correct a typo: an 1d -> a 1d [skip actions][skip azp][skip cirrus] --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 9d5553cc7a34..b712269dda9c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -706,7 +706,7 @@ def piecewise(x, condlist, funclist, *args, **kw): is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take a 1d array as input and give an 1d + condition is True. It should take a 1d array as input and give a 1d array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. From 41cebd0f891f54d3090d4544d3f7a932ec0d4782 Mon Sep 17 00:00:00 2001 From: abhi210 <27881020+Abhi210@users.noreply.github.com> Date: Fri, 24 Oct 2025 12:59:15 +0530 Subject: [PATCH 0753/1718] This PR reference #11521. It removes deprecated functionality and cleans up outdated code paths: - Removed the deprecated `style` parameter from `array2string` in `numpy/_core/arrayprint.py`, along with its related documentation and tests. - Replaced the deprecation warning for `np.sum` called on generators with a `TypeError` in `numpy/_core/fromnumeric.py`, since this behavior is now unsupported. - Removed a leftover docstring that was intended to be deleted in PR #26268. --- .../upcoming_changes/30068.expired.rst | 8 ++ numpy/_core/arrayprint.py | 18 +---- numpy/_core/arrayprint.pyi | 79 ------------------- numpy/_core/fromnumeric.py | 27 +------ numpy/_core/tests/test_arrayprint.py | 5 -- numpy/_core/tests/test_deprecations.py | 6 -- numpy/typing/tests/data/fail/arrayprint.pyi | 11 ++- 7 files changed, 19 insertions(+), 135 deletions(-) create mode 100644 doc/release/upcoming_changes/30068.expired.rst diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst new file mode 100644 index 000000000000..b651371f7061 --- /dev/null +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -0,0 +1,8 @@ +``numpy.array2string`` and ``numpy.sum`` deprecations finalized +--------------------------------------------- + +The following long-deprecated APIs have been removed or converted to errors: + +* The ``style`` parameter has been removed from ``numpy.array2string``. This argument had no effect since Numpy 1.14.0. + +* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 9eda8db40dfc..ec17d3063c09 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -619,7 +619,7 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - style=None, formatter=None, threshold=None, + formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, *, legacy=None): return (a,) @@ -628,7 +628,7 @@ def _array2string_dispatcher( @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=np._NoValue, formatter=None, threshold=None, + formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", *, legacy=None): """ @@ -663,10 +663,6 @@ def array2string(a, max_line_width=None, precision=None, wrapping is forced at the column ``max_line_width - len(suffix)``. It should be noted that the content of prefix and suffix strings are not included in the output. - style : _NoValue, optional - Has no effect, do not use. - - .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -786,16 +782,8 @@ def array2string(a, max_line_width=None, precision=None, options.update(overrides) if options['legacy'] <= 113: - if style is np._NoValue: - style = repr - if a.shape == () and a.dtype.names is None: - return style(a.item()) - elif style is not np._NoValue: - # Deprecation 11-9-2017 v1.14 - warnings.warn("'style' argument is deprecated and no longer functional" - " except in 1.13 'legacy' mode", - DeprecationWarning, stacklevel=2) + return repr(a.item()) if options['legacy'] > 113: options['linewidth'] -= len(suffix) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 57e2e1248c5e..6834565da97f 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -10,13 +10,10 @@ from typing import ( SupportsIndex, TypeAlias, TypedDict, - overload, type_check_only, ) -from typing_extensions import deprecated import numpy as np -from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co __all__ = [ @@ -94,7 +91,6 @@ def set_printoptions( def get_printoptions() -> _FormatOptions: ... # public numpy export -@overload # no style def array2string( a: NDArray[Any], max_line_width: int | None = None, @@ -102,7 +98,6 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", - style: _NoValueType = ..., formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, @@ -112,80 +107,6 @@ def array2string( *, legacy: _Legacy | None = None, ) -> str: ... -@overload # style= (positional), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (keyword), legacy="1.13" -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: Literal["1.13"], -) -> str: ... -@overload # style= (positional), legacy!="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None, - precision: SupportsIndex | None, - suppress_small: bool | None, - separator: str, - prefix: str, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - *, - legacy: _LegacyNoStyle | None = None, -) -> str: ... -@overload # style= (keyword), legacy="1.13" -@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") -def array2string( - a: NDArray[Any], - max_line_width: int | None = None, - precision: SupportsIndex | None = None, - suppress_small: bool | None = None, - separator: str = " ", - prefix: str = "", - *, - style: _ReprFunc, - formatter: _FormatDict | None = None, - threshold: int | None = None, - edgeitems: int | None = None, - sign: _Sign | None = None, - floatmode: _FloatMode | None = None, - suffix: str = "", - legacy: _LegacyNoStyle | None = None, -) -> str: ... def format_float_scientific( x: _FloatLike_co, diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 90493ef77626..33fb9ec4b39f 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -4,7 +4,6 @@ import functools import math import types -import warnings import numpy as np from numpy._utils import set_module @@ -755,8 +754,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): provided with a sequence of k-th it will partition all elements indexed by k-th of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. @@ -868,8 +865,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): sequence of k-th it will partition all of them into their sorted position at once. - .. deprecated:: 1.22.0 - Passing booleans as index is deprecated. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. @@ -2008,15 +2003,6 @@ def nonzero(a): To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. - .. note:: - - When called on a zero-d array or scalar, ``nonzero(a)`` is treated - as ``nonzero(atleast_1d(a))``. - - .. deprecated:: 1.17.0 - - Use `atleast_1d` explicitly if this behavior is deliberate. - Parameters ---------- a : array_like @@ -2430,19 +2416,12 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, """ if isinstance(a, _gentype): # 2018-02-25, 1.15.0 - warnings.warn( - "Calling np.sum(generator) is deprecated, and in the future will " - "give a different result. Use np.sum(np.fromiter(generator)) or " + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " "the python sum builtin instead.", - DeprecationWarning, stacklevel=2 ) - res = _sum_(a) - if out is not None: - out[...] = res - return out - return res - return _wrapreduction( a, np.add, 'sum', axis, dtype, out, keepdims=keepdims, initial=initial, where=where diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index bad1aae78d5d..009a42fdb1b9 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -735,11 +735,6 @@ def test_0d_arrays(self): # str is unaffected assert_equal(str(x), "1") - # check `style` arg raises - pytest.warns(DeprecationWarning, np.array2string, - np.array(1.), style=repr) - # but not in legacy mode - np.array2string(np.array(1.), style=repr, legacy='1.13') # gh-10934 style was broken in legacy mode, check it works np.array2string(np.array(1.), legacy='1.13') diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 37fee7705504..7cb1fee9b890 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -145,12 +145,6 @@ def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist)) -class TestGeneratorSum(_DeprecationTestCase): - # 2018-02-25, 1.15.0 - def test_generator_sum(self): - self.assert_deprecated(np.sum, args=((i for i in range(5)),)) - - class BuiltInRoundComplexDType(_DeprecationTestCase): # 2020-03-31 1.19.0 deprecated_types = [np.csingle, np.cdouble, np.clongdouble] diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index 224a4105b8a6..3c9dc9330a2b 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -8,9 +8,8 @@ AR: npt.NDArray[np.float64] func1: Callable[[Any], str] func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # type: ignore[call-overload] -np.array2string(AR, legacy="1.14") # type: ignore[call-overload] -np.array2string(AR, sign="*") # type: ignore[call-overload] -np.array2string(AR, floatmode="default") # type: ignore[call-overload] -np.array2string(AR, formatter={"A": func1}) # type: ignore[call-overload] -np.array2string(AR, formatter={"float": func2}) # type: ignore[call-overload] +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] From 9e42499c6088e895b99fb593a2d4b194948d62fb Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 25 Oct 2025 03:43:48 -0400 Subject: [PATCH 0754/1718] DOC: Fix a couple typos in generalized-ufuncs.rst. (#30074) [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/c-api/generalized-ufuncs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index b4750688b5e6..c55336092c71 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -239,7 +239,7 @@ In this case, the ufunc author might define the function like this: .. code-block:: c - int minmax_process_core_dims(PyUFuncObject ufunc, + int minmax_process_core_dims(PyUFuncObject *ufunc, npy_intp *core_dim_sizes) { npy_intp n = core_dim_sizes[0]; @@ -267,7 +267,7 @@ dimension size will result in an exception being raised. With the can set the output size to whatever is appropriate for the ufunc. In the array passed to the "hook" function, core dimensions that -were not determined by the input are indicating by having the value -1 +were not determined by the input are indicated by having the value -1 in the ``core_dim_sizes`` array. The function can replace the -1 with whatever value is appropriate for the ufunc, based on the core dimensions that occurred in the input arrays. From 8afe11889d9216807cfa0644991f36df31279e64 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 25 Oct 2025 10:33:14 -0600 Subject: [PATCH 0755/1718] BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) --- numpy/_core/src/multiarray/calculation.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 0d855281d57a..b95b37987f8e 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -652,7 +652,15 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) else { op1 = n_ops.true_divide; op2 = n_ops.multiply; - decimals = -decimals; + if (decimals == INT_MIN) { + // not technically correct but it doesn't matter because no one in + // this millenium is using floating point numbers with enough + // accuracy for this to matter + decimals = INT_MAX; + } + else { + decimals = -decimals; + } } if (!out) { if (PyArray_ISINTEGER(a)) { From 0959a545c464211191686120dad8a6e38453d84c Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 26 Oct 2025 12:30:31 +0200 Subject: [PATCH 0756/1718] BUG: prefer passing a pointer to the helper function to avoid punning aliasing (#30077) --- .../multiarray/lowlevel_strided_loops.c.src | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 368d299d5d01..050207ea188c 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -958,7 +958,9 @@ npy_uint64 _npy_halfbits_to_doublebits(npy_uint16 h){ * Check various modes of failure to accurately cast src_value to dst */ static GCC_CAST_OPT_LEVEL int -@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ src_value) { +@prefix@_check_same_value_@name1@_to_@name2@(@rtype1@ *src_valueP) { + + @rtype1@ src_value = *src_valueP; /* 1. NaN/Infs always work for float to float and otherwise never */ #if (@is_float1@ || @is_emu_half1@ || @is_double1@ || @is_native_half1@) @@ -1097,10 +1099,10 @@ static GCC_CAST_OPT_LEVEL int dst_value[0] = _CONVERT_FN(src_value[0]); dst_value[1] = _CONVERT_FN(src_value[1]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[1]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[1]) < 0) { return -1; } # endif //same_value @@ -1110,7 +1112,7 @@ static GCC_CAST_OPT_LEVEL int # else dst_value = _CONVERT_FN(src_value[0]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } if (src_value[1] != 0) { @@ -1125,7 +1127,7 @@ static GCC_CAST_OPT_LEVEL int # else *(_TYPE2 *)dst = _CONVERT_FN(src_value[0]); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value[0]) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value[0]) < 0) { return -1; } if (src_value[1] != 0) { @@ -1140,14 +1142,14 @@ static GCC_CAST_OPT_LEVEL int # if !@aligned@ dst_value[0] = _CONVERT_FN(src_value); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { return -1; } # endif //same_value # else //!aligned dst_value[0] = _CONVERT_FN(*(_TYPE1 *)src); # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)src) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)src) < 0) { return -1; } # endif //same_value @@ -1157,7 +1159,7 @@ static GCC_CAST_OPT_LEVEL int dst_value = _CONVERT_FN(src_value); # if !@is_bool2@ # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*(@rtype1@ *)&src_value) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@((@rtype1@ *)&src_value) < 0) { return -1; } # endif //same_value @@ -1166,7 +1168,7 @@ static GCC_CAST_OPT_LEVEL int *(_TYPE2 *)dst = _CONVERT_FN(*(_TYPE1 *)src); # if !@is_bool2@ # if @same_value@ - if (@prefix@_check_same_value_@name1@_to_@name2@(*((@rtype1@ *)src)) < 0) { + if (@prefix@_check_same_value_@name1@_to_@name2@(((@rtype1@ *)src)) < 0) { return -1; } # endif //same_value From 0218a25f39d957a39c7f392cfefb97a143f2ef5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 12:30:04 -0600 Subject: [PATCH 0757/1718] MAINT: Bump actions/upload-artifact from 4.6.2 to 5.0.0 (#30080) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.2 to 5.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/ea165f8d65b6e75b540449e92b4886f43607fa02...330a01c490aca151604b8cf639adc76d48f6c5d4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 153f18d2956d..c010cd5c9783 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -67,7 +67,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 8dfeac05d4ba..d8e4a7eb5817 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -74,7 +74,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -82,7 +82,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -96,7 +96,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact/merge@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9587fc4e026d..5e31a49b08bd 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e53e4bbefc57..8049b79edef0 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -98,7 +98,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From b1b9bbd5882c5cb17547d4b1254e6fddb9f87d12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 19:38:33 +0000 Subject: [PATCH 0758/1718] MAINT: Bump astral-sh/setup-uv from 7.1.1 to 7.1.2 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.1 to 7.1.2. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/2ddd2b9cb38ad8efd50337e8ab201519a34c9f24...85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 753b75330aea..5fbc4afba8e9 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 13554bfc427c..f5b2fbfbbbae 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # v7.1.1 + - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 with: python-version: ${{ matrix.py }} activate-environment: true From 46503eec3f0c8ffd7ff59edb2637e2f51225e864 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 13:53:50 -0600 Subject: [PATCH 0759/1718] MAINT: Bump github/codeql-action from 4.30.9 to 4.31.0 (#30081) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.30.9 to 4.31.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/16140ae1a102900babc80a33c44059580f687047...4e94bd11f71e507f7f87df81788dff88d1dacbfb) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4c033a26e15c..b147f3e78c1a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/autobuild@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@16140ae1a102900babc80a33c44059580f687047 # v4.30.9 + uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5e31a49b08bd..e0d5de712c0f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # v2.1.27 + uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v2.1.27 with: sarif_file: results.sarif From 6cf1fddea79029c238dbd3c165b45132d2f1f867 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 27 Oct 2025 19:16:46 -0400 Subject: [PATCH 0760/1718] DOC: Fix the first small 'process_core_dims()' example. (#30083) [skip actions] [skip azp] [skip cirrus] --- doc/source/reference/c-api/generalized-ufuncs.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index c55336092c71..755ab2141cbd 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -244,8 +244,9 @@ In this case, the ufunc author might define the function like this: { npy_intp n = core_dim_sizes[0]; if (n == 0) { - PyExc_SetString("minmax requires the core dimension " - "to be at least 1."); + PyErr_SetString(PyExc_ValueError, + "minmax requires the core dimension to " + "be at least 1."); return -1; } return 0; From a3a2f2fc2283f0b643e65b38b28592b63a9a786d Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Tue, 28 Oct 2025 06:29:19 -0400 Subject: [PATCH 0761/1718] MAINT: replace use of asanyarray with ... to keep arrays (#29951) With gh-28576, it has become possible to ensure that when calling a ufunc, the output is guaranteed to be an array. This PR uses that to replace np.asanyarray calls in some functions. I'm not sure this is complete -- these were just parts of code for which test failed if np.anyarray(scalar) is made to return a read-only array (see gh-29876). I do think there are all small improvements, though. --- numpy/_core/_methods.py | 13 ++++++------- numpy/lib/_function_base_impl.py | 10 +++++----- numpy/lib/_ufunclike_impl.py | 2 +- numpy/lib/tests/test_function_base.py | 2 +- numpy/linalg/_linalg.py | 8 ++------ 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index b013fd952beb..1c29831bca20 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -185,15 +185,14 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # Compute sum of squared deviations from mean # Note that x may not be inexact and that we need it to be an array, # not a scalar. - x = asanyarray(arr - arrmean) - + x = um.subtract(arr, arrmean, out=...) if issubclass(arr.dtype.type, (nt.floating, nt.integer)): - x = um.multiply(x, x, out=x) + x = um.square(x, out=x) # Fast-paths for built-in complex types - elif x.dtype in _complex_to_float: - xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) - um.multiply(xv, xv, out=xv) - x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) # Most general case; includes handling object arrays containing imaginary # numbers and complex types with non-native byteorder else: diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b712269dda9c..29288388f9ed 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -41,6 +41,7 @@ arctan2, cos, exp, + floor, frompyfunc, less_equal, minimum, @@ -4568,9 +4569,8 @@ def _lerp(a, b, t, out=None): out : array_like Output array. """ - diff_b_a = subtract(b, a) - # asanyarray is a stop-gap until gh-13105 - lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + diff_b_a = b - a + lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out) subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, casting='unsafe', dtype=type(lerp_interpolation.dtype)) if lerp_interpolation.ndim == 0 and out is None: @@ -4662,8 +4662,8 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): (previous_indexes, next_indexes): Tuple A Tuple of virtual_indexes neighbouring indexes """ - previous_indexes = np.asanyarray(np.floor(virtual_indexes)) - next_indexes = np.asanyarray(previous_indexes + 1) + previous_indexes = floor(virtual_indexes, out=...) + next_indexes = add(previous_indexes, 1, out=...) indexes_above_bounds = virtual_indexes >= valid_values_count - 1 # When indexes is above max index, take the max value of the array if indexes_above_bounds.any(): diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 695aab1b8922..79cec5aa08b6 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -57,7 +57,7 @@ def fix(x, out=None): """ # promote back to an array if flattened - res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.ceil(x, out=... if out is None else out) res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) # when no out argument is passed and no subclasses are involved, flatten diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 1d4026d8562f..57228e363e71 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3925,7 +3925,7 @@ def test_fraction(self): q = np.quantile(x, .5) assert_equal(q, 1.75) - assert_equal(type(q), np.float64) + assert isinstance(q, float) q = np.quantile(x, Fraction(1, 2)) assert_equal(q, Fraction(7, 4)) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 8dde643781b8..c3cbf3d0ef98 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2016,18 +2016,14 @@ def cond(x, p=None): r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries - r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = inf elif nan_mask: - r[()] = inf - - # Convention is to return scalars instead of 0d arrays - if r.ndim == 0: - r = r[()] + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) return r From 2d73f33a0263584f1966be6ebb843be571ed47b4 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 28 Oct 2025 17:56:42 +0100 Subject: [PATCH 0762/1718] TYP: ``numpy.ma``: Annotate the callable wrapper classes (#30084) * TYP: stub ``ma.isMaskedArray`` * TYP: stub (subclasses of) `ma.core._MaskedUFunc` * TYP: work around false postiive stubtest errors for `ma.isarray` and `ma.isMA` * TYP: stub `ma.masked_print_option` * TYP: stub `ma.core._frommethod` (partially) * TYP: "dot" the remaining callable wrapper instances --- numpy/ma/core.pyi | 391 +++++++++++++++++++++++++++++----------------- 1 file changed, 244 insertions(+), 147 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 78114931cbac..ae66384098d8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,9 +2,10 @@ import datetime as dt from _typeshed import Incomplete -from collections.abc import Sequence +from collections.abc import Callable, Sequence from typing import ( Any, + Concatenate, Final, Generic, Literal, @@ -16,9 +17,10 @@ from typing import ( SupportsIndex, SupportsInt, TypeAlias, + final, overload, ) -from typing_extensions import TypeIs, TypeVar, override +from typing_extensions import ParamSpec, TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -292,6 +294,15 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_UFuncT_co = TypeVar( + "_UFuncT_co", + # the `| Callable` simplifies self-binding to the ufunc's callable signature + bound=np.ufunc | Callable[..., object], + default=np.ufunc, + covariant=True, +) +_Pss = ParamSpec("_Pss") +_T = TypeVar("_T") _Ignored: TypeAlias = object @@ -319,6 +330,10 @@ _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | numbe _NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` _MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] +_FillValue: TypeAlias = complex | None # int | float | complex | None +_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] +_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] + ### MaskType = np.bool_ @@ -329,97 +344,171 @@ class MaskedArrayFutureWarning(FutureWarning): ... class MAError(Exception): ... class MaskError(MAError): ... -class _MaskedUFunc: - f: Any - __doc__: Any - __name__: Any - def __init__(self, ufunc): ... - -class _MaskedUnaryOperation(_MaskedUFunc): - fill: Any - domain: Any - def __init__(self, mufunc, fill=..., domain=...): ... - def __call__(self, a, *args, **kwargs): ... - -class _MaskedBinaryOperation(_MaskedUFunc): - fillx: Any - filly: Any - def __init__(self, mbfunc, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... - def reduce(self, target, axis=0, dtype=None): ... - def outer(self, a, b): ... - def accumulate(self, target, axis=0): ... - -class _DomainedBinaryOperation(_MaskedUFunc): - domain: Any - fillx: Any - filly: Any - def __init__(self, dbfunc, domain, fillx=..., filly=...): ... - def __call__(self, a, b, *args, **kwargs): ... +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + /, + a: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final class _MaskedPrintOption: - def __init__(self, display): ... - def display(self): ... - def set_display(self, s): ... - def enabled(self): ... - def enable(self, shrink=1): ... + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... masked_print_option: Final[_MaskedPrintOption] = ... -exp: _MaskedUnaryOperation -conjugate: _MaskedUnaryOperation -sin: _MaskedUnaryOperation -cos: _MaskedUnaryOperation -arctan: _MaskedUnaryOperation -arcsinh: _MaskedUnaryOperation -sinh: _MaskedUnaryOperation -cosh: _MaskedUnaryOperation -tanh: _MaskedUnaryOperation -abs: _MaskedUnaryOperation -absolute: _MaskedUnaryOperation -angle: _MaskedUnaryOperation -fabs: _MaskedUnaryOperation -negative: _MaskedUnaryOperation -floor: _MaskedUnaryOperation -ceil: _MaskedUnaryOperation -around: _MaskedUnaryOperation -logical_not: _MaskedUnaryOperation -sqrt: _MaskedUnaryOperation -log: _MaskedUnaryOperation -log2: _MaskedUnaryOperation -log10: _MaskedUnaryOperation -tan: _MaskedUnaryOperation -arcsin: _MaskedUnaryOperation -arccos: _MaskedUnaryOperation -arccosh: _MaskedUnaryOperation -arctanh: _MaskedUnaryOperation - -add: _MaskedBinaryOperation -subtract: _MaskedBinaryOperation -multiply: _MaskedBinaryOperation -arctan2: _MaskedBinaryOperation -equal: _MaskedBinaryOperation -not_equal: _MaskedBinaryOperation -less_equal: _MaskedBinaryOperation -greater_equal: _MaskedBinaryOperation -less: _MaskedBinaryOperation -greater: _MaskedBinaryOperation -logical_and: _MaskedBinaryOperation +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_or: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation = ... def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... -logical_xor: _MaskedBinaryOperation -bitwise_and: _MaskedBinaryOperation -bitwise_or: _MaskedBinaryOperation -bitwise_xor: _MaskedBinaryOperation -hypot: _MaskedBinaryOperation - -divide: _DomainedBinaryOperation -true_divide: _DomainedBinaryOperation -floor_divide: _DomainedBinaryOperation -remainder: _DomainedBinaryOperation -fmod: _DomainedBinaryOperation -mod: _DomainedBinaryOperation +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... # `obj` can be anything (even `object()`), and is too "flexible", so we can't # meaningfully annotate it, or its return type. @@ -2266,9 +2355,9 @@ class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def filled(self, fill_value=None): ... def tolist(self): ... # type: ignore[override] -def isMaskedArray(x): ... -isarray = isMaskedArray -isMA = isMaskedArray +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray # 0D float64 array class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): @@ -2362,16 +2451,6 @@ def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_Scala # def is_masked(x: object) -> bool: ... -class _extrema_operation(_MaskedUFunc): - compare: Any - fill_value_func: Any - def __init__(self, ufunc, compare, fill_value): ... - # NOTE: in practice `b` has a default value, but users should - # explicitly provide a value here as the default is deprecated - def __call__(self, a, b): ... - def reduce(self, target, axis=...): ... - def outer(self, a, b): ... - @overload def min( obj: _ArrayLike[_ScalarT], @@ -2474,38 +2553,58 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... +@final class _frommethod: - __name__: Any - __doc__: Any - reversed: Any - def __init__(self, methodname, reversed=...): ... - def getdoc(self): ... - def __call__(self, a, *args, **params): ... - -all: _frommethod -anomalies: _frommethod -anom: _frommethod -any: _frommethod -compress: _frommethod -cumprod: _frommethod -cumsum: _frommethod -copy: _frommethod -diagonal: _frommethod -harden_mask: _frommethod -ids: _frommethod -mean: _frommethod -nonzero: _frommethod -prod: _frommethod -product: _frommethod -ravel: _frommethod -repeat: _frommethod -soften_mask: _frommethod -std: _frommethod -sum: _frommethod -swapaxes: _frommethod -trace: _frommethod -var: _frommethod - + __name__: str + __doc__: str | None + reversed: Final[bool] + + def __init__(self, /, methodname: str, reversed: bool = False) -> None: ... + # TODO: specializable callable signatures + def __call__(self, /, a: Incomplete, *args: Incomplete, **params: Incomplete) -> Incomplete: ... + def getdoc(self, /) -> str | None: ... + +# (self, /) +ids: _frommethod = ... +nonzero: _frommethod = ... +harden_mask: _frommethod = ... +soften_mask: _frommethod = ... +shrink_mask: _frommethod = ... +# (self, /, order='C') +ravel: _frommethod = ... +# (self, /, *args, **params) +copy: _frommethod = ... +diagonal: _frommethod = ... +repeat: _frommethod = ... +swapaxes: _frommethod = ... +# (self, /, axis=None, dtype=None) +anomalies: _frommethod = ... +anom: _frommethod = ... # == anomalies +# (self, /, axis=None, out=None, keepdims=) +all: _frommethod = ... +any: _frommethod = ... +# (condition, self, /, axis=None, out=None) +# NOTE: this is the only `_frommethod` instance with `reversed=True` +compress: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None) +cumprod: _frommethod = ... +cumsum: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None, keepdims=) +mean: _frommethod = ... +prod: _frommethod = ... +product: _frommethod = ... +sum: _frommethod = ... +# (self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) +trace: _frommethod = ... +# (self, /, axis=None, dtype=None, out=None, ddof=0, keepdims=, mean=) +std: _frommethod = ... +var: _frommethod = ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime @overload def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... @overload @@ -2515,6 +2614,7 @@ def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[Tru @overload def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... +# NOTE: this is a `_frommethod` instance at runtime @overload def argmin( a: ArrayLike, @@ -2552,7 +2652,7 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -# +# keep in sync with `argmin` @overload def argmax( a: ArrayLike, @@ -2590,9 +2690,6 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -minimum: _extrema_operation -maximum: _extrema_operation - @overload def take( a: _ArrayLike[_ScalarT], @@ -2709,19 +2806,19 @@ class _convert2ma: def __call__(self, /, *args: object, **params: object) -> Any: ... def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... -arange: _convert2ma -clip: _convert2ma -empty: _convert2ma -empty_like: _convert2ma -frombuffer: _convert2ma -fromfunction: _convert2ma -identity: _convert2ma -indices: _convert2ma -ones: _convert2ma -ones_like: _convert2ma -squeeze: _convert2ma -zeros: _convert2ma -zeros_like: _convert2ma +arange: _convert2ma = ... +clip: _convert2ma = ... +empty: _convert2ma = ... +empty_like: _convert2ma = ... +frombuffer: _convert2ma = ... +fromfunction: _convert2ma = ... +identity: _convert2ma = ... +indices: _convert2ma = ... +ones: _convert2ma = ... +ones_like: _convert2ma = ... +squeeze: _convert2ma = ... +zeros: _convert2ma = ... +zeros_like: _convert2ma = ... def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... From e6478c508435e5dcaba256561eed4e117121a9d9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 Oct 2025 19:14:20 +0100 Subject: [PATCH 0763/1718] BUG: Fix resize when it contains references (#29970) --- numpy/_core/src/multiarray/shape.c | 29 ++++++++++++++++++---------- numpy/_core/tests/test_multiarray.py | 14 ++++++++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 340fe7289ac8..83de9f19a574 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -121,6 +121,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "no memory handler found but OWNDATA flag set"); return NULL; } + if (newnbytes < oldnbytes) { + /* Clear now removed data (if dtype has references) */ + if (PyArray_ClearBuffer( + PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, + elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { + return NULL; + } + } + new_data = PyDataMem_UserRENEW(PyArray_DATA(self), newnbytes == 0 ? elsize : newnbytes, handler); @@ -130,17 +139,17 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, return NULL; } ((PyArrayObject_fields *)self)->data = new_data; - } - if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros (PyLong zero for object arrays) */ - npy_intp stride = elsize; - npy_intp size = newsize - oldsize; - char *data = PyArray_BYTES(self) + oldnbytes; - int aligned = PyArray_ISALIGNED(self); - if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, - stride, size, aligned) < 0) { - return NULL; + if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { + /* Fill new memory with zeros (PyLong zero for object arrays) */ + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return NULL; + } } } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index ca34756e5882..c62fb5b4e905 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6312,6 +6312,20 @@ def test_obj_obj(self): assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize("dtype", ["O", "O,O"]) + def test_obj_obj_shrinking(self, dtype): + # check that memory is freed when shrinking an array. + test_obj = object() + expected = sys.getrefcount(test_obj) + a = np.array([test_obj, test_obj, test_obj], dtype=dtype) + assert a.size == 3 + a.resize((2, 1)) # two elements, not three! + assert a.size == 2 + del a + # if all is well, then we reclaimed all references + assert sys.getrefcount(test_obj) == expected + def test_empty_view(self): # check that sizes containing a zero don't trigger a reallocate for # already empty arrays From b205e91730e5be25a1da917a7d765b077a9f800f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 28 Oct 2025 22:27:24 +0100 Subject: [PATCH 0764/1718] BUG: Fix ``ma.core._frommethod`` function signatures --- numpy/ma/core.py | 61 ++++++++++++++++++------------------- numpy/ma/tests/test_core.py | 14 +++++++++ 2 files changed, 44 insertions(+), 31 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 621dbd94640b..7803556fb660 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7047,7 +7047,7 @@ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): ############################################################################## -class _frommethod: +def _frommethod(methodname: str, reversed: bool = False): """ Define functions from existing MaskedArray methods. @@ -7055,44 +7055,47 @@ class _frommethod: ---------- methodname : str Name of the method to transform. - + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. """ + method = getattr(MaskedArray, methodname) + assert callable(method) - def __init__(self, methodname, reversed=False): - self.__name__ = methodname - self.__qualname__ = methodname - self.__doc__ = self.getdoc() - self.reversed = reversed + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' - def getdoc(self): - "Return the doc of the function (from the doc of the method)." - meth = getattr(MaskedArray, self.__name__, None) or\ - getattr(np, self.__name__, None) - signature = self.__name__ + get_object_signature(meth) - if meth is not None: - doc = f""" {signature} -{getattr(meth, '__doc__', None)}""" - return doc + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] + + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) - def __call__(self, a, *args, **params): - if self.reversed: - args = list(args) - a, args[0] = args[0], a + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) + + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname - marr = asanyarray(a) - method_name = self.__name__ - method = getattr(type(marr), method_name, None) - if method is None: - # use the corresponding np function - method = getattr(np, method_name) + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" - return method(marr, *args, **params) + return wrapper all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') compress = _frommethod('compress', reversed=True) +count = _frommethod('count') cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') @@ -7116,7 +7119,6 @@ def __call__(self, a, *args, **params): trace = _frommethod('trace') var = _frommethod('var') -count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ @@ -7205,9 +7207,6 @@ def power(a, b, third=None): return result -argmin = _frommethod('argmin') -argmax = _frommethod('argmax') - def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None): "Function version of the eponymous method." diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2da03158d317..bf7b94d720ce 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6,6 +6,7 @@ __author__ = "Pierre GF Gerard-Marchant" import copy +import inspect import itertools import operator import pickle @@ -5963,3 +5964,16 @@ def test_uint_fill_value_and_filled(): # And this ensures things like filled work: np.testing.assert_array_equal( a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature From aaf43cab66ef41b22f8e7533d83d3483c78502e4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 00:19:13 +0100 Subject: [PATCH 0765/1718] TYP: remove unused stubtest allowlist entries for ``numpy.ma`` --- tools/stubtest/allowlist.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 77ff582c9f66..2cfa30d3c0b1 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -182,8 +182,3 @@ numpy\.linalg\._umath_linalg\.qr_complete numpy\.linalg\._umath_linalg\.qr_reduced numpy\.linalg\._umath_linalg\.solve numpy\.linalg\._umath_linalg\.solve1 - -# ma.core._frommethod callables -numpy\.ma\.(core\.)?argmax -numpy\.ma\.(core\.)?argmin -numpy\.ma\.(core\.)?count From f0e732b6c06626675bb7680e1d0e3e979d55ab1d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 02:21:58 +0100 Subject: [PATCH 0766/1718] TYP: Annotate the ``ma.core._frommethod`` functions --- numpy/_core/fromnumeric.pyi | 5 + numpy/lib/_function_base_impl.pyi | 1 + numpy/ma/core.pyi | 435 ++++++++++++++++++++++++++---- 3 files changed, 388 insertions(+), 53 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index d0e0a2a1f67c..a2c4f47ecb2f 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -269,6 +269,7 @@ def choose( mode: _ModeKind = "raise", ) -> _ArrayT: ... +# keep in sync with `ma.core.repeat` @overload def repeat( a: _ArrayLike[_ScalarT], @@ -294,6 +295,7 @@ def repeat( axis: SupportsIndex, ) -> NDArray[Any]: ... +# def put( a: NDArray[Any], ind: _ArrayLikeInt_co, @@ -301,6 +303,7 @@ def put( mode: _ModeKind = "raise", ) -> None: ... +# keep in sync with `ma.core.swapaxes` @overload def swapaxes( a: _ArrayLike[_ScalarT], @@ -507,6 +510,7 @@ def squeeze( axis: _ShapeLike | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.diagonal` @overload def diagonal( a: _ArrayLike[_ScalarT], @@ -522,6 +526,7 @@ def diagonal( axis2: SupportsIndex = 1, # >= 2D array ) -> NDArray[Any]: ... +# keep in sync with `ma.core.trace` @overload def trace( a: ArrayLike, # >= 2D array diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 651ece6b3447..bad00641072c 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -251,6 +251,7 @@ def select( default: ArrayLike = 0, ) -> NDArray[Any]: ... +# keep roughly in sync with `ma.core.copy` @overload def copy( a: _ArrayT, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ae66384098d8..8e6309504445 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,6 +1,7 @@ # pyright: reportIncompatibleMethodOverride=false import datetime as dt +import types from _typeshed import Incomplete from collections.abc import Callable, Sequence from typing import ( @@ -1117,6 +1118,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments @overload # type: ignore[override] def compress( self, @@ -1615,6 +1617,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + # keep roughly in sync with `ma.core.ravel` def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... # Keep in sync with `ndarray.reshape` @@ -1705,6 +1708,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... + # Keep in sync with `ma.core.all` @overload # type: ignore[override] def all( self, @@ -1750,6 +1754,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... + # Keep in sync with `ma.core.any` @overload # type: ignore[override] def any( self, @@ -1795,9 +1800,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - def nonzero(self) -> tuple[_Array1D[intp], ...]: ... - - # Keep in sync with `ndarray.trace` + # Keep in sync with `ndarray.trace` and `ma.core.trace` @overload def trace( self, # >= 2D MaskedArray @@ -1833,6 +1836,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + # Keep in sync with `ma.core.sum` @overload # type: ignore[override] def sum( self, @@ -1862,7 +1866,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - # Keep in sync with `ndarray.cumsum` + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... @overload # out: ndarray @@ -1870,6 +1874,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `ma.core.prod` @overload # type: ignore[override] def prod( self, @@ -1899,9 +1904,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... - product: Any + product = prod - # Keep in sync with `ndarray.cumprod` + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... @overload # out: ndarray @@ -1909,6 +1914,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + # Keep in sync with `ma.core.mean` @overload # type: ignore[override] def mean( self, @@ -1937,6 +1943,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... + # keep roughly in sync with `ma.core.anom` @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @overload @@ -1944,6 +1951,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + # keep in sync with `std` and `ma.core.var` @overload # type: ignore[override] def var( self, @@ -1976,6 +1984,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> _ArrayT: ... + # keep in sync with `var` and `ma.core.std` @overload # type: ignore[override] def std( self, @@ -2299,6 +2308,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): diagonal: Any flatten: Any + # keep in sync with `ndarray.repeat` @overload def repeat( self, @@ -2314,11 +2324,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): squeeze: Any + # keep in sync with `ndarray.swapaxes` def swapaxes( self, axis1: SupportsIndex, axis2: SupportsIndex, - / + /, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # @@ -2553,52 +2564,370 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> _ArrayT: ... -@final -class _frommethod: - __name__: str - __doc__: str | None - reversed: Final[bool] - - def __init__(self, /, methodname: str, reversed: bool = False) -> None: ... - # TODO: specializable callable signatures - def __call__(self, /, a: Incomplete, *args: Incomplete, **params: Incomplete) -> Incomplete: ... - def getdoc(self, /) -> str | None: ... - -# (self, /) -ids: _frommethod = ... -nonzero: _frommethod = ... -harden_mask: _frommethod = ... -soften_mask: _frommethod = ... -shrink_mask: _frommethod = ... -# (self, /, order='C') -ravel: _frommethod = ... -# (self, /, *args, **params) -copy: _frommethod = ... -diagonal: _frommethod = ... -repeat: _frommethod = ... -swapaxes: _frommethod = ... -# (self, /, axis=None, dtype=None) -anomalies: _frommethod = ... -anom: _frommethod = ... # == anomalies -# (self, /, axis=None, out=None, keepdims=) -all: _frommethod = ... -any: _frommethod = ... -# (condition, self, /, axis=None, out=None) -# NOTE: this is the only `_frommethod` instance with `reversed=True` -compress: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None) -cumprod: _frommethod = ... -cumsum: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None, keepdims=) -mean: _frommethod = ... -prod: _frommethod = ... -product: _frommethod = ... -sum: _frommethod = ... -# (self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) -trace: _frommethod = ... -# (self, /, axis=None, dtype=None, out=None, ddof=0, keepdims=, mean=) -std: _frommethod = ... -var: _frommethod = ... +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask(a: _MArrayT) -> _MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +@overload +def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +@overload # a: array-like, dtype=None +def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None +) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `product` and `MaskedArray.prod` +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Incomplete: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... # (a, b) minimum: _extrema_operation = ... From 959d38fdf1004b601f1e375ea78ffd3debda321d Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 29 Oct 2025 02:48:41 +0100 Subject: [PATCH 0767/1718] DOC: fix cross-reference warning for ambiguous `.ravel` --- doc/source/user/basics.copies.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index ec373673d815..6d8e78488e7e 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -123,8 +123,8 @@ shape attribute of the array. For example:: AttributeError: Incompatible shape for in-place modification. Use `.reshape()` to make a copy with the desired shape. -Taking the example of another operation, :func:`.ravel` returns a contiguous -flattened view of the array wherever possible. On the other hand, +Taking the example of another operation, :func:`numpy.ravel` returns a +contiguous flattened view of the array wherever possible. On the other hand, :meth:`.ndarray.flatten` always returns a flattened copy of the array. However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable. From d01d4a709d773c484afd62f4cb3d294330f6d377 Mon Sep 17 00:00:00 2001 From: star1327p Date: Tue, 28 Oct 2025 19:48:30 -0700 Subject: [PATCH 0768/1718] DOC: Correct grammatical usage like a/an [skip actions][skip azp][skip cirrus] --- numpy/_core/src/common/simd/simd.h | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/src/umath/fast_loop_macros.h | 2 +- numpy/_core/tests/test_ufunc.py | 2 +- numpy/distutils/exec_command.py | 2 +- numpy/f2py/symbolic.py | 4 ++-- numpy/lib/_index_tricks_impl.py | 6 +++--- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index fe4ca4da92f5..fd1be6e0c867 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -23,7 +23,7 @@ extern "C" { /* * clang commit an aggressive optimization behaviour when flag `-ftrapping-math` * isn't fully supported that's present at -O1 or greater. When partially loading a - * vector register for a operations that requires to fill up the remaining lanes + * vector register for an operation that requires to fill up the remaining lanes * with certain value for example divide operation needs to fill the remaining value * with non-zero integer to avoid fp exception divide-by-zero. * clang optimizer notices that the entire register is not needed for the store diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 14d8bf7585e4..2d63dd6e3602 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -160,7 +160,7 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, * valid self (a generic scalar) and an other item. * May fill self_item and/or other_arr (but not both) with non-NULL values. * - * Why this dance? When the other object is a exactly Python scalar something + * Why this dance? When the other object is exactly a Python scalar something * awkward happens historically in NumPy. * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` * which is the same as `scalar.item()`. And that operation converts e.g. diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index 5143f414606e..42c2c9d8d04f 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -16,7 +16,7 @@ /* * largest simd vector size in bytes numpy supports - * it is currently a extremely large value as it is only used for memory + * it is currently an extremely large value as it is only used for memory * overlap checks */ #if NPY_SIMD > 0 diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 2b01d37989cc..0211449c577f 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1922,7 +1922,7 @@ def test_identityless_reduction_nonreorderable(self): assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): - # If we have a n x m array and do a reduction with axis=1, then we are + # If we have an n x m array and do a reduction with axis=1, then we are # doing n reductions, and each reduction takes an m-element array. For # a reduction operation without an identity, then: # n > 0, m > 0: fine diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index 2d06585a1497..c701465d9ade 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -114,7 +114,7 @@ def get_pythonexe(): return pythonexe def find_executable(exe, path=None, _cache={}): - """Return full path of a executable or None. + """Return full path of an executable or None. Symbolic links are not followed. """ diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 820ae2ec9b9f..409b84da531c 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -155,14 +155,14 @@ def ewarn(message): class Expr: - """Represents a Fortran expression as a op-data pair. + """Represents a Fortran expression as an op-data pair. Expr instances are hashable and sortable. """ @staticmethod def parse(s, language=Language.C): - """Parse a Fortran expression to a Expr. + """Parse a Fortran expression to an Expr. """ return fromstring(s, language=language) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 666171f7c2aa..1d9358d6f8d2 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -472,9 +472,9 @@ class RClass(AxisConcatenator): Optional character strings placed as the first element of the index expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) - matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 - (column) matrix is produced. If the result is 2-D then both provide the - same matrix result. + matrix is produced. If the result is 1-D and 'c' is specified, then + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication From d8e5d849789f501d890250430e48fdbcb43879b1 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 28 Oct 2025 20:21:18 -0700 Subject: [PATCH 0769/1718] Update numpy/lib/_index_tricks_impl.py [skip actions][skip azp][skip cirrus] Co-authored-by: Joren Hammudoglu --- numpy/lib/_index_tricks_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 1d9358d6f8d2..40fef85b1853 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -473,8 +473,8 @@ class RClass(AxisConcatenator): expression can be used to change the output. The strings 'r' or 'c' result in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) matrix is produced. If the result is 1-D and 'c' is specified, then - an N x 1 (column) matrix is produced. - If the result is 2-D then both provide the same matrix result. + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication From b20568e62a39c79ee1a8eecdd1a36ccb35dbbbc0 Mon Sep 17 00:00:00 2001 From: Riku Sakamoto Date: Thu, 30 Oct 2025 00:26:04 +0900 Subject: [PATCH 0770/1718] CI: Update ARM job (armhf_test) to use Ubuntu 24.04 Update the GitHub Actions image for the `armhf_test` job to Ubuntu 24.04-arm. (See gh-30086) --- .github/workflows/linux.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5a9fb69c5178..b749c60f3cc3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -166,7 +166,7 @@ jobs: # running on aarch64 (ARM 64-bit) GitHub runners. needs: [smoke_test] if: github.repository == 'numpy/numpy' - runs-on: ubuntu-22.04-arm + runs-on: ubuntu-24.04-arm steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: @@ -177,11 +177,9 @@ jobs: - name: Creates new container run: | docker run --name the_container --interactive \ - -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/linux32 /bin/bash -c " + -v $(pwd):/numpy arm32v7/ubuntu:24.04 /bin/linux32 /bin/bash -c " apt update && - apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv && - python -m pip install -r /numpy/requirements/build_requirements.txt && - python -m pip install -r /numpy/requirements/test_requirements.txt + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv " docker commit the_container the_container @@ -190,7 +188,12 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && + python -m venv venv && + source venv/bin/activate && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt && + spin build '" - name: Meson Log @@ -202,7 +205,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -m full -- --timeout=600 --durations=10 + cd /numpy && source venv/bin/activate && spin test -m full -- --timeout=600 --durations=10 '" benchmark: From fa84b04c51ae903caf8bcff66e47f74b514582af Mon Sep 17 00:00:00 2001 From: Koki Watanabe <56009584+math-hiyoko@users.noreply.github.com> Date: Thu, 30 Oct 2025 10:48:47 +0900 Subject: [PATCH 0771/1718] ENH: np.unique: support hash based unique for complex dtype (#29537) * save: editting * enh: implement unique_complex * fix: spin build * enh: impl unique_numeric for integer, float and complex * fix: case when complex is nan * fix: case when order is not guaranteed * fix: lint * fix: type of data * fix: error * enh: change template type for unordered_set * enh: set function * enh: refactoring * enh: refactoring * enh: refactor test * enh: apply inline * doc: performance and change * enh: test check -0.0 and -np.nan * enh: use fnv-hash for complex * fix: add const * fix: use memcpy * fix: use static_cast * fix: remove support for float * fix: change variable name * fix: inline * fix: template type * fix: padding buffer * fix: padding buffer * fix: use buffer * fix: add constexpr * fix: copy of long double * fix: valie_real -> value_imag * fix: reinterpret value pointer * fix: use NPY_BITSOF_* * fix: use fnv magic prime * enh: refactoring * doc: fix docs * doc: fix docs * enh: add comment * fix: use sizeof to hash complex * fix: use reinterpret_cast * fix: impl hash_complex_large * fix: use constexpr * fix: define hash_complex_clongdouble * fix: use NPY_SIZEOF_LONGDOUBLE * fix: use IEEEl2bitsrep * fix: remove designated initializers * fix: comment * fix: small fix * fix: change case for handling bit expression * fix: refactoring --- doc/release/upcoming_changes/29537.change.rst | 7 + .../upcoming_changes/29537.performance.rst | 9 + numpy/_core/meson.build | 1 + numpy/_core/src/multiarray/unique.cpp | 218 ++++++++++++++++-- numpy/lib/tests/test_arraysetops.py | 27 ++- 5 files changed, 233 insertions(+), 29 deletions(-) create mode 100644 doc/release/upcoming_changes/29537.change.rst create mode 100644 doc/release/upcoming_changes/29537.performance.rst diff --git a/doc/release/upcoming_changes/29537.change.rst b/doc/release/upcoming_changes/29537.change.rst new file mode 100644 index 000000000000..63abbbb5a347 --- /dev/null +++ b/doc/release/upcoming_changes/29537.change.rst @@ -0,0 +1,7 @@ +``unique_values`` for complex dtypes may return unsorted data +------------------------------------------------------------- +np.unique now supports hash‐based duplicate removal for complex dtypes. +This enhancement extends the hash‐table algorithm +to all complex types ('c'), and their extended precision variants. +The hash‐based method provides faster extraction of unique values +but does not guarantee that the result will be sorted. diff --git a/doc/release/upcoming_changes/29537.performance.rst b/doc/release/upcoming_changes/29537.performance.rst new file mode 100644 index 000000000000..8c78dc202a2e --- /dev/null +++ b/doc/release/upcoming_changes/29537.performance.rst @@ -0,0 +1,9 @@ +Performance improvements to ``np.unique`` for complex dtypes +------------------------------------------------------------ +The hash-based algorithm for unique extraction now also supports +complex dtypes, offering noticeable performance gains. + +In our benchmarks on complex128 arrays with 200,000 elements, +the hash-based approach was about 1.4–1.5× faster +than the sort-based baseline when there were 20% of unique values, +and about 5× faster when there were 0.2% of unique values. diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index a3d8416cdffa..dc07586bcf8e 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1282,6 +1282,7 @@ unique_hash_so = static_library( include_directories: [ 'include', 'src/common', + 'src/npymath', ], dependencies: [ py_dep, diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 979fb8fae0a4..910c5f7c731e 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -17,6 +17,8 @@ extern "C" { #include "fnv.h" #include "npy_argparse.h" + #include "numpy/npy_math.h" + #include "numpy/halffloat.h" } // This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. @@ -34,11 +36,135 @@ FinalAction finally(F f) { } template +size_t hash_integer(const T *value, npy_bool equal_nan) { + return std::hash{}(*value); +} + +template +size_t hash_complex(const T *value, npy_bool equal_nan) { + S value_real = real(*value); + S value_imag = imag(*value); + int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + const unsigned char* value_bytes = reinterpret_cast(value); + size_t hash = npy_fnv1a(value_bytes, sizeof(T)); + + return hash; +} + +size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { + npy_longdouble value_real = npy_creall(*value); + npy_longdouble value_imag = npy_cimagl(*value); + int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + if (equal_nan && hasnan) { + return 0; + } + + // Now, equal_nan is false or neither of the values is not NaN. + // So we don't need to worry about NaN here. + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or + // unused bits in their binary representation + // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). + // Because hashing the raw bit pattern would make the hash depend on those + // undefined bits, we extract the mantissa, exponent, and sign components + // explicitly and pack them into a buffer to ensure the hash is well-defined. + #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ + defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ + defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); + constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); + constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); + constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); + unsigned char buffer[SIZEOF_BUFFER]; + + union IEEEl2bitsrep bits_real{value_real}, bits_imag{value_imag}; + size_t offset = 0; + + for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { + ldouble_man_t manh = GET_LDOUBLE_MANH(bits); + ldouble_man_t manl = GET_LDOUBLE_MANL(bits); + ldouble_exp_t exp = GET_LDOUBLE_EXP(bits); + ldouble_sign_t sign = GET_LDOUBLE_SIGN(bits); + + std::memcpy(buffer + offset, &manh, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &manl, SIZEOF_LDOUBLE_MAN); + offset += SIZEOF_LDOUBLE_MAN; + std::memcpy(buffer + offset, &exp, SIZEOF_LDOUBLE_EXP); + offset += SIZEOF_LDOUBLE_EXP; + std::memcpy(buffer + offset, &sign, SIZEOF_LDOUBLE_SIGN); + offset += SIZEOF_LDOUBLE_SIGN; + } + #else + constexpr size_t SIZEOF_BUFFER = NPY_SIZEOF_CLONGDOUBLE; + const unsigned char* buffer = reinterpret_cast(value); + #endif + + size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); + + return hash; +} + +template +int equal_integer(const T *lhs, const T *rhs, npy_bool equal_nan) { + return *lhs == *rhs; +} + +template +int equal_complex(const T *lhs, const T *rhs, npy_bool equal_nan) { + S lhs_real = real(*lhs); + S lhs_imag = imag(*lhs); + int lhs_isnan = npy_isnan(lhs_real) || npy_isnan(lhs_imag); + S rhs_real = real(*rhs); + S rhs_imag = imag(*rhs); + int rhs_isnan = npy_isnan(rhs_real) || npy_isnan(rhs_imag); + + if (lhs_isnan && rhs_isnan) { + return equal_nan; + } + if (lhs_isnan || rhs_isnan) { + return false; + } + // Now both lhs and rhs are not NaN. + return (lhs_real == rhs_real) && (lhs_imag == rhs_imag); +} + +template +void copy_integer(char *data, T *value) { + std::copy_n(value, 1, (T *)data); + return; +} + +template < + typename S, + typename T, + S (*real)(T), + S (*imag)(T), + void (*setreal)(T *, const S), + void (*setimag)(T *, const S) +> +void copy_complex(char *data, T *value) { + setreal((T *)data, real(*value)); + setimag((T *)data, imag(*value)); + return; +} + +template < + typename T, + size_t (*hash_func)(const T *, npy_bool), + int (*equal_func)(const T *, const T *, npy_bool), + void (*copy_func)(char *, T *) +> static PyObject* -unique_integer(PyArrayObject *self, npy_bool equal_nan) +unique_numeric(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of integer. + * Returns a new NumPy array containing the unique values of the input array of numeric (integer or complex). * This function uses hashing to identify uniqueness efficiently. */ NPY_ALLOW_C_API_DEF; @@ -52,18 +178,27 @@ unique_integer(PyArrayObject *self, npy_bool equal_nan) // number of elements in the input array npy_intp isize = PyArray_SIZE(self); + auto hash = [equal_nan](const T *value) -> size_t { + return hash_func(value, equal_nan); + }; + auto equal = [equal_nan](const T *lhs, const T *rhs) -> bool { + return equal_func(lhs, rhs, equal_nan); + }; + // Reserve hashset capacity in advance to minimize reallocations and collisions. // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: // - Reserving for all elements (isize) may over-allocate when there are few unique values. // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset(std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS)); + std::unordered_set hashset( + std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal + ); // Input array is one-dimensional, enabling efficient iteration using strides. char *idata = PyArray_BYTES(self); npy_intp istride = PyArray_STRIDES(self)[0]; for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert(*(T *)idata); + hashset.insert((T *)idata); } npy_intp length = hashset.size(); @@ -95,7 +230,7 @@ unique_integer(PyArrayObject *self, npy_bool equal_nan) npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; // Output array is one-dimensional, enabling efficient iteration using strides. for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - *(T *)odata = *it; + copy_func(odata, *it); } return res_obj; @@ -311,25 +446,60 @@ unique_vstring(PyArrayObject *self, npy_bool equal_nan) // this map contains the functions used for each item size. typedef std::function function_type; std::unordered_map unique_funcs = { - {NPY_BYTE, unique_integer}, - {NPY_UBYTE, unique_integer}, - {NPY_SHORT, unique_integer}, - {NPY_USHORT, unique_integer}, - {NPY_INT, unique_integer}, - {NPY_UINT, unique_integer}, - {NPY_LONG, unique_integer}, - {NPY_ULONG, unique_integer}, - {NPY_LONGLONG, unique_integer}, - {NPY_ULONGLONG, unique_integer}, - {NPY_INT8, unique_integer}, - {NPY_INT16, unique_integer}, - {NPY_INT32, unique_integer}, - {NPY_INT64, unique_integer}, - {NPY_UINT8, unique_integer}, - {NPY_UINT16, unique_integer}, - {NPY_UINT32, unique_integer}, - {NPY_UINT64, unique_integer}, - {NPY_DATETIME, unique_integer}, + {NPY_BYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_UBYTE, unique_numeric, equal_integer, copy_integer>}, + {NPY_SHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_USHORT, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_LONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_ULONGLONG, unique_numeric, equal_integer, copy_integer>}, + {NPY_CFLOAT, unique_numeric< + npy_cfloat, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CDOUBLE, unique_numeric< + npy_cdouble, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_CLONGDOUBLE, unique_numeric< + npy_clongdouble, + hash_complex_clongdouble, + equal_complex, + copy_complex + > + }, + {NPY_INT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_INT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT8, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT16, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT32, unique_numeric, equal_integer, copy_integer>}, + {NPY_UINT64, unique_numeric, equal_integer, copy_integer>}, + {NPY_DATETIME, unique_numeric, equal_integer, copy_integer>}, + {NPY_COMPLEX64, unique_numeric< + npy_complex64, + hash_complex, + equal_complex, + copy_complex + > + }, + {NPY_COMPLEX128, unique_numeric< + npy_complex128, + hash_complex, + equal_complex, + copy_complex + > + }, {NPY_STRING, unique_string}, {NPY_UNICODE, unique_string}, {NPY_VSTRING, unique_vstring}, diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 86d93a569d98..9d5a4d693fa5 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -726,7 +726,10 @@ def test_unique_1d(self): # test for ticket #2799 aa = [1. + 0.j, 1 - 1.j, 1] - assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + assert_array_equal( + np.sort(np.unique(aa)), + [1. - 1.j, 1.], + ) # test for ticket #4785 a = [(1, 2), (1, 2), (2, 3)] @@ -761,7 +764,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 1] ua_inv = [1, 2, 0, 2] ua_cnt = [1, 1, 2] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -772,7 +776,8 @@ def test_unique_1d(self): ua_idx = [2, 0, 3] ua_inv = [1, 2, 0, 2, 2] ua_cnt = [1, 1, 3] - assert_equal(np.unique(a), ua) + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) @@ -1199,7 +1204,13 @@ def test_unique_nanequals(self): assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) def test_unique_array_api_functions(self): - arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + arr = np.array( + [ + np.nan, 1.0, 0.0, 4.0, -np.nan, + -0.0, 1.0, 3.0, 4.0, np.nan, + 5.0, -0.0, 1.0, -np.nan, 0.0, + ], + ) for res_unique_array_api, res_unique in [ ( @@ -1226,8 +1237,14 @@ def test_unique_array_api_functions(self): ) ]: assert len(res_unique_array_api) == len(res_unique) + if not isinstance(res_unique_array_api, tuple): + res_unique_array_api = (res_unique_array_api,) + if not isinstance(res_unique, tuple): + res_unique = (res_unique,) + for actual, expected in zip(res_unique_array_api, res_unique): - assert_array_equal(actual, expected) + # Order of output is not guaranteed + assert_equal(np.sort(actual), np.sort(expected)) def test_unique_inverse_shape(self): # Regression test for https://github.com/numpy/numpy/issues/25552 From 32887277983a3e8ff0f24413ba48e6da1f2b4337 Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Thu, 30 Oct 2025 09:40:22 +0530 Subject: [PATCH 0772/1718] Update doc/release/upcoming_changes/30068.expired.rst Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/30068.expired.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index b651371f7061..a5363771c4b7 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -1,5 +1,5 @@ ``numpy.array2string`` and ``numpy.sum`` deprecations finalized ---------------------------------------------- +--------------------------------------------------------------- The following long-deprecated APIs have been removed or converted to errors: From dc3549896bdc187d96275534b371c50778b6bf6f Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 30 Oct 2025 07:41:17 +0200 Subject: [PATCH 0773/1718] BLD: use blobless checkout on CircleCI [skip actions][skip azp][skip cirrus] --- .circleci/config.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1760eca727dd..62c29203dac1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,7 +17,8 @@ jobs: build: <<: *defaults steps: - - checkout + - checkout: + method: blobless - run: name: check skip @@ -110,7 +111,8 @@ jobs: deploy: <<: *defaults steps: - - checkout + - checkout: + method: blobless - attach_workspace: at: ~/repo From 823c9a85f3c33052beb0292d66319faa1139f431 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 11:25:07 +0100 Subject: [PATCH 0774/1718] ENH: Make FPE blas check a runtime check for all arm systems If an arm system uses SME some BLAS versions just set FPEs spuriously. The culprit is really Accelerate so we might limit this to Mac OS as well (and then with an SME check also -- see changes prior to this commit). However, some OpenBLAS versions also caused this, although OpenBLAS is likely to clear the FPEs on their side. Closes gh-29820 --- numpy/_core/src/common/blas_utils.c | 108 ++++++------------ numpy/_core/src/common/blas_utils.h | 10 +- numpy/_core/src/multiarray/multiarraymodule.c | 19 ++- numpy/testing/_private/utils.py | 9 +- 4 files changed, 59 insertions(+), 87 deletions(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 409d3818ae0f..1c2ffdebbcac 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,6 +1,9 @@ +#include + #include "numpy/npy_math.h" // npy_get_floatstatus_barrier #include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN #include "blas_utils.h" +#include "npy_cblas.h" #include #include @@ -11,92 +14,50 @@ #endif #if NPY_BLAS_CHECK_FPE_SUPPORT - -/* Return whether we're running on macOS 15.4 or later +/* + * Static variable to cache runtime check of BLAS FPE support. */ -static inline bool -is_macOS_version_15_4_or_later(void){ -#if !defined(__APPLE__) - return false; -#else - char *osProductVersion = NULL; - size_t size = 0; - bool ret = false; - - // Query how large OS version string should be - if(-1 == sysctlbyname("kern.osproductversion", NULL, &size, NULL, 0)){ - goto cleanup; - } - - osProductVersion = malloc(size + 1); - - // Get the OS version string - if(-1 == sysctlbyname("kern.osproductversion", osProductVersion, &size, NULL, 0)){ - goto cleanup; - } - - osProductVersion[size] = '\0'; - - // Parse the version string - int major = 0, minor = 0; - if(2 > sscanf(osProductVersion, "%d.%d", &major, &minor)) { - goto cleanup; - } - - if (major > 15 || (major == 15 && minor >= 4)) { - ret = true; - } + static bool blas_supports_fpe = true; -cleanup: - if(osProductVersion){ - free(osProductVersion); - } - - return ret; -#endif -} - -/* ARM Scalable Matrix Extension (SME) raises all floating-point error flags +/* + * ARM Scalable Matrix Extension (SME) raises all floating-point error flags * when it's used regardless of values or operations. As a consequence, * when SME is used, all FPE state is lost and special handling is needed. * * For NumPy, SME is not currently used directly, but can be used via * BLAS / LAPACK libraries. This function does a runtime check for whether * BLAS / LAPACK can use SME and special handling around FPE is required. + * + * This may be an Accelerate bug (at least OpenBLAS consider it that way) + * but when we find an ARM system with SVE we do a runtime check for whether + * FPEs are spuriously given. */ -static inline bool -BLAS_can_use_ARM_SME(void) +static inline int +set_BLAS_causes_spurious_FPEs(void) { -#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) - // ARM SME can be used by Apple's Accelerate framework for BLAS / LAPACK - // - macOS 15.4+ - // - Apple silicon M4+ - - // Does OS / Accelerate support ARM SME? - if(!is_macOS_version_15_4_or_later()){ - return false; + // These are all small, so just work on stack to not worry about error + // handling. + double *x = PyMem_Malloc(20*20*3*sizeof(double)); + if (x == NULL) { + PyErr_NoMemory(); + return -1; } + double *y = x + 20*20; + double *res = y + 20*20; - // Does hardware support SME? - int has_SME = 0; - size_t size = sizeof(has_SME); - if(-1 == sysctlbyname("hw.optional.arm.FEAT_SME", &has_SME, &size, NULL, 0)){ - return false; - } + npy_clear_floatstatus_barrier((char *)x); - if(has_SME){ - return true; - } -#endif + CBLAS_FUNC(cblas_dgemm)( + CblasRowMajor, CblasNoTrans, CblasNoTrans, 20, 20, 20, 1., + x, 20, y, 20, 0., res, 20); + PyMem_Free(x); - // default assume SME is not used - return false; + int fpe_status = npy_get_floatstatus_barrier((char *)x); + // Entries were all zero, so we shouldn't see any FPEs + blas_supports_fpe = fpe_status != 0; + return 0; } -/* Static variable to cache runtime check of BLAS FPE support. - */ -static bool blas_supports_fpe = true; - #endif // NPY_BLAS_CHECK_FPE_SUPPORT @@ -110,19 +71,20 @@ npy_blas_supports_fpe(void) #endif } -NPY_VISIBILITY_HIDDEN void +NPY_VISIBILITY_HIDDEN int npy_blas_init(void) { #if NPY_BLAS_CHECK_FPE_SUPPORT - blas_supports_fpe = !BLAS_can_use_ARM_SME(); + return set_BLAS_causes_spurious_FPEs(); #endif + return 0; } NPY_VISIBILITY_HIDDEN int npy_get_floatstatus_after_blas(void) { #if NPY_BLAS_CHECK_FPE_SUPPORT - if(!blas_supports_fpe){ + if (!blas_supports_fpe){ // BLAS does not support FPE and we need to return FPE state. // Instead of clearing and then grabbing state, just return // that no flags are set. diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 34d6321c2920..115e60576557 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -2,10 +2,14 @@ #include -/* NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check +/* + * NPY_BLAS_CHECK_FPE_SUPPORT controls whether we need a runtime check * for floating-point error (FPE) support in BLAS. + * The known culprit right now is SVM likely only on mac, but that is not + * quite clear. + * This checks always on all ARM (it is a small check overall). */ -#if defined(__APPLE__) && defined(__aarch64__) && defined(ACCELERATE_NEW_LAPACK) +#if defined(__aarch64__) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 @@ -13,7 +17,7 @@ /* Initialize BLAS environment, if needed */ -NPY_VISIBILITY_HIDDEN void +NPY_VISIBILITY_HIDDEN int npy_blas_init(void); /* Runtime check if BLAS supports floating-point errors. diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 4ab3f5bae02c..44ac8a678bbb 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4448,6 +4448,17 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) } +static PyObject * +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { + if (npy_blas_supports_fpe()) { + Py_RETURN_TRUE; + } + else { + Py_RETURN_FALSE; + } +} + + static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #if !defined(PYPY_VERSION) @@ -4688,6 +4699,8 @@ static struct PyMethodDef array_module_methods[] = { METH_NOARGS, NULL}, {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, + {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, + METH_NOARGS, NULL}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, @@ -4904,9 +4917,9 @@ _multiarray_umath_exec(PyObject *m) { return -1; } -#if NPY_BLAS_CHECK_FPE_SUPPORT - npy_blas_init(); -#endif + if (npy_blas_init() < 0) { + return -1; + } #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 9be98f9d2fbe..967d67e14a13 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -90,14 +90,7 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -BLAS_SUPPORTS_FPE = True -if platform.system() == 'Darwin' or platform.machine() == 'arm64': - try: - blas = np.__config__.CONFIG['Build Dependencies']['blas'] - if blas['name'] == 'accelerate': - BLAS_SUPPORTS_FPE = False - except KeyError: - pass +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe() HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 From 7bc099dc9f3ce73d08caf001af57cb825691fa81 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 11:48:53 +0100 Subject: [PATCH 0775/1718] Guard for HAVE_CBLAS and always compile the helpers --- numpy/_core/meson.build | 2 +- numpy/_core/src/common/blas_utils.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dc07586bcf8e..6dcbaea0cf1a 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1109,6 +1109,7 @@ endforeach # ------------------------------ src_multiarray_umath_common = [ 'src/common/array_assign.c', + 'src/common/blas_utils.c', 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', @@ -1123,7 +1124,6 @@ src_multiarray_umath_common = [ ] if have_blas src_multiarray_umath_common += [ - 'src/common/blas_utils.c', 'src/common/cblasfuncs.c', 'src/common/python_xerbla.c', ] diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 115e60576557..3f5bb735281d 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -9,7 +9,7 @@ * quite clear. * This checks always on all ARM (it is a small check overall). */ -#if defined(__aarch64__) +#if defined(__aarch64__) && defined(HAVE_CBLAS) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 From fbd08f47676bea39aae58fe7b89dd72642be4644 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 30 Oct 2025 12:31:19 +0100 Subject: [PATCH 0776/1718] ooops, needs to be a calloc of course. --- numpy/_core/src/common/blas_utils.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 1c2ffdebbcac..365289067b88 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -37,13 +37,13 @@ set_BLAS_causes_spurious_FPEs(void) { // These are all small, so just work on stack to not worry about error // handling. - double *x = PyMem_Malloc(20*20*3*sizeof(double)); + double *x = PyMem_Calloc(20 * 20 * 3, sizeof(double)); if (x == NULL) { PyErr_NoMemory(); return -1; } - double *y = x + 20*20; - double *res = y + 20*20; + double *y = x + 20 * 20; + double *res = y + 20 * 20; npy_clear_floatstatus_barrier((char *)x); From 56243c991894dfbae2a6f9142927441e98c0f3f3 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 30 Oct 2025 13:41:35 +0200 Subject: [PATCH 0777/1718] BLD: update scipy-openblas, use -Dpkg_config_path (#30049) --- .github/workflows/linux_blas.yml | 5 +---- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index c282365ab012..7b7647044002 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -89,16 +89,13 @@ jobs: fi mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $GITHUB_ENV - ld_library_path=$(python -c"import scipy_openblas32 as ob32; print(ob32.get_lib_dir())") - echo "LD_LIBRARY_PATH=$ld_library_path" >> $GITHUB_ENV - name: Build shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color run: - spin build -- --werror -Dallow-noblas=false + spin build -- --werror -Dallow-noblas=false -Dpkg_config_path=${PWD}/.openblas - name: Check build-internal dependencies run: diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 4cb678d5d047..19e6b119b73f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.15 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.1 +scipy-openblas32==0.3.30.0.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index aab147bf8c17..605132ee720a 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.15 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.1 -scipy-openblas64==0.3.30.0.1 +scipy-openblas32==0.3.30.0.6 +scipy-openblas64==0.3.30.0.6 From 2f291c2899d59a974a96a875c1f4ffd7c7215a00 Mon Sep 17 00:00:00 2001 From: Britney Whittington <103079612+bwhitt7@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:27:27 -0400 Subject: [PATCH 0778/1718] TST: Add thread-safe testing guidelines (#30101) * DOC: Add parallel testing guidelines * DOC: Fix code example * STY: Fix header length --- doc/TESTS.rst | 97 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 26 deletions(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index b74dc700b1b9..e57cf7711da8 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -88,8 +88,17 @@ To run the test suite in multiple threads:: $ spin test -p 4 # run each test under 4 threads $ spin test -p auto -- --skip-thread-unsafe=true # run ONLY tests that are thread-safe -When you write new tests (see below), it is worth testing to make sure they do not fail -under ``pytest-run-parallel``, since the CI jobs makes use of it. +When you write new tests, it is worth testing to make sure they do not fail +under ``pytest-run-parallel``, since the CI jobs make use of it. Some tips on how to +write thread-safe tests can be found `here <#writing-thread-safe-tests>`_. + +.. note:: + + Ideally you should run ``pytest-run-parallel`` using a `free-threaded build of Python + `_ that is 3.14 or + higher. If you decide to use a version of Python that is not free-threaded, you will + need to set the environment variables ``PYTHON_CONTEXT_AWARE_WARNINGS`` and + ``PYTHON_THREAD_INHERIT_CONTEXT`` to 1. Running doctests ---------------- @@ -225,36 +234,34 @@ Similarly for methods:: def test_simple(self): assert_(zzz() == 'Hello from zzz') -Easier setup and teardown functions / methods ---------------------------------------------- - -Testing looks for module-level or class method-level setup and teardown -functions by name; thus:: - - def setup_module(): - """Module-level setup""" - print('doing setup') - - def teardown_module(): - """Module-level teardown""" - print('doing teardown') +Setup and teardown methods +-------------------------- +NumPy originally used xunit setup and teardown, a feature of `pytest`. We now encourage +the usage of setup and teardown methods that are called explicitly by the tests that +need them:: class TestMe: - def setup_method(self): - """Class-level setup""" + def setup(self): print('doing setup') + return 1 - def teardown_method(): - """Class-level teardown""" + def teardown(self): print('doing teardown') + def test_xyz(self): + x = self.setup() + assert x == 1 + self.teardown() + +This approach is thread-safe, ensuring tests can run under ``pytest-run-parallel``. +Using pytest setup fixtures (such as xunit setup methods) is generally not thread-safe +and will likely cause thread-safety test failures. -Setup and teardown functions to functions and methods are known as "fixtures", -and they should be used sparingly. ``pytest`` supports more general fixture at various scopes which may be used -automatically via special arguments. For example, the special argument name -``tmpdir`` is used in test to create a temporary directory. +automatically via special arguments. For example, the special argument name +``tmp_path`` is used in tests to create temporary directories. However, +fixtures should be used sparingly. Parametric tests ---------------- @@ -397,9 +404,9 @@ Tests on random data Tests on random data are good, but since test failures are meant to expose new bugs or regressions, a test that passes most of the time but fails occasionally with no code changes is not helpful. Make the random data -deterministic by setting the random number seed before generating it. Use -either Python's ``random.seed(some_number)`` or NumPy's -``numpy.random.seed(some_number)``, depending on the source of random numbers. +deterministic by setting the random number seed before generating it. +Use ``rng = numpy.random.RandomState(some_number)`` to set a seed on a +local instance of `numpy.random.RandomState`. Alternatively, you can use `Hypothesis`_ to generate arbitrary data. Hypothesis manages both Python's and Numpy's random seeds for you, and @@ -410,6 +417,44 @@ The advantages over random generation include tools to replay and share failures without requiring a fixed seed, reporting *minimal* examples for each failure, and better-than-naive-random techniques for triggering bugs. +Writing thread-safe tests +------------------------- + +Writing thread-safe tests may require some trial-and-error. Generally you should +follow the guidelines stated so far, especially when it comes to `setup methods +<#setup-and-teardown-methods>`_ and `seeding random data <#tests-on-random-data>`_. +Explicit setup and the usage of local RNG are thread-safe practices. Here are tips +for some other common problems you may run into. + +Using ``pytest.mark.parametrize`` may occasionally cause thread-safety issues. +To fix this, you can use ``copy()``:: + + @pytest.mark.parametrize('dimensionality', [3, 10, 25]) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_solve(dimensionality, dtype): + dimen = dimensionality.copy() + d = dtype.copy() + # use these copied variables instead + ... + +If you are testing something that is inherently thread-unsafe, you can label your +test with ``pytest.mark.thread_unsafe`` so that it will run under a single thread +and not cause test failures:: + + @pytest.mark.thread_unsafe(reason="reason this test is thread-unsafe") + def test_thread_unsafe(): + ... + +Some examples of what should be labeled as thread-unsafe: + +- Usage of ``sys.stdout`` and ``sys.stderr`` +- Mutation of global data, like docstrings, modules, garbage collectors, etc. +- Tests that require a lot of memory, since they could cause crashes. + +Additionally, some ``pytest`` fixtures are thread-unsafe, such as ``monkeypatch`` and +``capsys``. However, ``pytest-run-parallel`` will automatically mark these as +thread-unsafe if you decide to use them. Some fixtures have been patched to be +thread-safe, like ``tmp_path``. Documentation for ``numpy.test`` -------------------------------- From d8eb2256ee4f21c90e8498c6008e08c960b3d174 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 13:00:33 -0600 Subject: [PATCH 0779/1718] MAINT: Bump github/codeql-action from 4.31.0 to 4.31.2 (#30106) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.0 to 4.31.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4e94bd11f71e507f7f87df81788dff88d1dacbfb...0499de31b99561a6d14a36a5f662c2a54f91beee) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b147f3e78c1a..45b88b851312 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v4.31.0 + uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index e0d5de712c0f..0a65e932b12d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v2.1.27 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v2.1.27 with: sarif_file: results.sarif From 40f5f6fff96934999850337e9212f2b5470513fe Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 30 Oct 2025 16:20:30 +0100 Subject: [PATCH 0780/1718] BUG: ufunc method signatures Co-authored-by: Sebastian Berg --- numpy/_core/_add_newdocs.py | 28 +++++++++++++++++++++++-- numpy/_core/function_base.py | 16 +++++++------- numpy/_core/tests/test_function_base.py | 2 +- numpy/_core/tests/test_ufunc.py | 16 ++++++++++++++ 4 files changed, 52 insertions(+), 10 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 80f82736aeaf..98bdec4e3c74 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4799,7 +4799,7 @@ add_newdoc('numpy._core.multiarray', 'get_handler_name', """ - get_handler_name(a: ndarray) -> str,None + get_handler_name(a: ndarray) -> str | None Return the name of the memory handler used by `a`. If not provided, return the name of the memory handler that will be used to allocate data for the @@ -5171,6 +5171,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduce', """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) Reduces `array`'s dimension by one, by applying ufunc along one axis. @@ -5297,6 +5300,9 @@ add_newdoc('numpy._core', 'ufunc', ('accumulate', """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5375,6 +5381,9 @@ add_newdoc('numpy._core', 'ufunc', ('reduceat', """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + reduceat(array, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. @@ -5483,6 +5492,9 @@ add_newdoc('numpy._core', 'ufunc', ('outer', r""" + outer($self, A, B, /, **kwargs) + -- + outer(A, B, /, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. @@ -5554,6 +5566,9 @@ add_newdoc('numpy._core', 'ufunc', ('at', """ + at($self, a, indices, b=None, /) + -- + at(a, indices, b=None, /) Performs unbuffered in place operation on operand 'a' for elements @@ -5605,6 +5620,9 @@ add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) Find the dtypes NumPy will use for the operation. Both input and @@ -5677,6 +5695,9 @@ add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) See `numpy.ufunc.resolve_dtypes` for parameter information. This @@ -5700,6 +5721,9 @@ add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + _get_strided_loop(call_info, /, *, fixed_strides=None) This function fills in the ``call_info`` capsule to include all @@ -6453,7 +6477,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', """ - __ge__(value, /) + __gt__(value, /) Return ``self > value``. diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 12ab2a7ef546..7fcc6b4f770a 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -1,4 +1,5 @@ import functools +import inspect import operator import types import warnings @@ -477,6 +478,9 @@ def _add_docstring(obj, doc, warn_on_python): "Prefer to attach it directly to the source.", UserWarning, stacklevel=3) + + doc = inspect.cleandoc(doc) + try: add_docstring(obj, doc) except Exception: @@ -494,10 +498,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): ---------- place : str The absolute name of the module to import from - obj : str or None + obj : str | None The name of the object to add documentation to, typically a class or function name. - doc : {str, Tuple[str, str], List[Tuple[str, str]]} + doc : str | tuple[str, str] | list[tuple[str, str]] If a string, the documentation to apply to `obj` If a tuple, then the first element is interpreted as an attribute @@ -534,12 +538,10 @@ def add_newdoc(place, obj, doc, warn_on_python=True): if isinstance(doc, str): if "${ARRAY_FUNCTION_LIKE}" in doc: doc = overrides.get_array_function_like_doc(new, doc) - _add_docstring(new, doc.strip(), warn_on_python) + _add_docstring(new, doc, warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc - _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + _add_docstring(getattr(new, attr), docstring, warn_on_python) elif isinstance(doc, list): for attr, docstring in doc: - _add_docstring( - getattr(new, attr), docstring.strip(), warn_on_python - ) + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index 3a8552de2d36..c6e10397b3ff 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -492,7 +492,7 @@ def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." assert_equal(np._core.flatiter.index.__doc__[:len(tgt)], tgt) - assert_(len(np._core.ufunc.identity.__doc__) > 300) + assert_(len(np._core.ufunc.identity.__doc__) > 250) assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 0211449c577f..a29fae0539e4 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1,4 +1,5 @@ import ctypes as ct +import inspect import itertools import pickle import sys @@ -2976,6 +2977,21 @@ def test_ufunc_input_floatingpoint_error(bad_offset): np.add(arr, arr, dtype=np.intp, casting="unsafe") +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + def test_trivial_loop_invalid_cast(): # This tests the fast-path "invalid cast", see gh-19904. with pytest.raises(TypeError, From 9d2f82eda826f952d06c971803fd6d004f6f33ec Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 30 Oct 2025 21:48:27 +0100 Subject: [PATCH 0781/1718] TYP: update the `ufunc` method stubs to match their new signatures --- numpy/__init__.pyi | 43 +- numpy/_typing/_ufunc.pyi | 487 +++++++++++----------- numpy/typing/tests/data/reveal/ufuncs.pyi | 38 +- 3 files changed, 304 insertions(+), 264 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 86cae3b3e48f..6f4163626c3c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -6,7 +6,7 @@ import ctypes as ct import array as _array import datetime as dt from abc import abstractmethod -from types import EllipsisType, GetSetDescriptorType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal from fractions import Fraction from uuid import UUID @@ -5671,7 +5671,7 @@ class ufunc: @property def __name__(self) -> LiteralString: ... @property - def __qualname__(self) -> LiteralString: ... + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def __doc__(self) -> str: ... # type: ignore[override] @property @@ -5700,18 +5700,43 @@ class ufunc: @property def signature(self) -> LiteralString | None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... - def outer(self, *args: Any, **kwargs: Any) -> Any: ... - # Similarly at won't be defined for ufuncs that return multiple + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - def at(self, /, *args: Any, **kwargs: Any) -> None: ... + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... # def resolve_dtypes( diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index bcb423e58110..9d3fa0e5335c 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -6,12 +6,14 @@ four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. """ # noqa: PYI021 +from _typeshed import Incomplete from types import EllipsisType from typing import ( Any, Generic, Literal, LiteralString, + Never, NoReturn, Protocol, SupportsIndex, @@ -49,7 +51,7 @@ _Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) _NIn = TypeVar("_NIn", bound=int, covariant=True) _NOut = TypeVar("_NOut", bound=int, covariant=True) _ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) @type_check_only class _SupportsArrayUFunc(Protocol): @@ -69,6 +71,11 @@ class _UFunc3Kwargs(TypedDict, total=False): subok: bool signature: _3Tuple[str | None] | str | None +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True + # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. # In such cases the respective methods return `NoReturn` @@ -86,7 +93,7 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -105,62 +112,57 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i self, x1: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload def __call__( self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _2Tuple[str | None] = ..., - ) -> Any: ... + ) -> Incomplete: ... - def at( - self, - a: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - /, - ) -> None: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -184,121 +186,141 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def __call__( self, x1: ArrayLike, - x2: NDArray[Any], + x2: np.ndarray, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[Any], + x1: np.ndarray, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]], + out: np.ndarray | tuple[np.ndarray], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... - def at( + def accumulate( self, - a: NDArray[Any], - indices: _ArrayLikeInt_co, - b: ArrayLike, + array: ArrayLike, /, - ) -> None: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + @overload # type: ignore[override] + def reduce( # out=None (default), keepdims=False (default) + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... def reduce( self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - keepdims: bool = ..., - initial: Any = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - - def accumulate( + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( self, array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - ) -> NDArray[Any]: ... + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... def reduceat( self, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | EllipsisType | None = ..., - ) -> NDArray[Any]: ... + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... - @overload # (scalar, scalar) -> scalar - def outer( + @overload # type: ignore[override] + def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, B: _ScalarLike_co, /, *, - out: EllipsisType | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> Any: ... + ) -> Incomplete: ... @overload # (array-like, array) -> array def outer( self, A: ArrayLike, - B: NDArray[Any], + B: np.ndarray, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[Any], + A: np.ndarray, B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like, out=array) -> array def outer( self, @@ -306,10 +328,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]], + out: np.ndarray | tuple[np.ndarray] | EllipsisType, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... @overload # (array-like, array-like) -> array | scalar def outer( self, @@ -317,17 +339,25 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, + out: None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], - ) -> NDArray[Any] | Any: ... + ) -> NDArray[Incomplete] | Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -350,58 +380,58 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, *, out: EllipsisType | None = ..., - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, x1: ArrayLike, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... @overload def __call__( self, x1: _SupportsArrayUFunc, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -425,43 +455,43 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, *, out: EllipsisType | None = ..., - where: _ArrayLikeBool_co | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[Any]: ... + ) -> _2Tuple[Incomplete]: ... @overload def __call__( self, x1: ArrayLike, x2: ArrayLike, - out1: NDArray[Any] | EllipsisType | None = ..., - out2: NDArray[Any] | None = ..., + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., /, *, - out: _2Tuple[NDArray[Any]] | EllipsisType = ..., - where: _ArrayLikeBool_co | None = ..., + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _4Tuple[str | None] = ..., - ) -> _2Tuple[NDArray[Any]]: ... + ) -> _2Tuple[NDArray[Incomplete]]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property - def __qualname__(self) -> _NameType: ... + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] @property def ntypes(self) -> _NTypes: ... @property @@ -482,36 +512,36 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] x1: ArrayLike, x2: ArrayLike, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, *, + dtype: DTypeLike | None = None, casting: _CastingKind = ..., order: _OrderKACF = ..., - dtype: DTypeLike | None = ..., subok: bool = ..., signature: str | _3Tuple[str | None] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., - ) -> NDArray[Any]: ... + ) -> NDArray[Incomplete]: ... - def at(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... - def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... - def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... - def outer(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): @@ -569,7 +599,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -577,7 +607,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -593,15 +623,16 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] @@ -624,7 +655,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -651,151 +682,127 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def __call__( self, x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... - - @overload - def reduce( + @overload # type: ignore[override] + def accumulate( self, + array: ArrayLike, /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def accumulate( + self, array: ArrayLike, - axis: _ShapeLike | None, - dtype: DTypeLike | None, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, out: _ArrayT, - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... - @overload - def reduce( + + @overload # type: ignore[override] + def reduce( # out=array self, - /, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, *, out: _ArrayT | tuple[_ArrayT], - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], ) -> _ArrayT: ... - @overload + @overload # out=... def reduce( self, + array: ArrayLike, /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, *, keepdims: Literal[True], - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[np.object_]: ... @overload def reduce( self, - /, array: ArrayLike, - axis: _ShapeLike | None = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., - keepdims: bool = ..., - initial: _ScalarLike_co = ..., - where: _ArrayLikeBool_co = ..., + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], ) -> _ReturnType_co | NDArray[np.object_]: ... - @overload + @overload # type: ignore[override] def reduceat( self, - /, array: ArrayLike, - indices: _ArrayLikeInt_co, - axis: SupportsIndex, - dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... - @overload - def reduceat( - self, /, - array: ArrayLike, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, *, out: _ArrayT | tuple[_ArrayT], ) -> _ArrayT: ... @overload def reduceat( self, - /, array: ArrayLike, + /, indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload def reduceat( self, - /, array: _SupportsArrayUFunc, - indices: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., - ) -> Any: ... - - @overload - def accumulate( - self, /, - array: ArrayLike, - axis: SupportsIndex, - dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... - @overload - def accumulate( - self, - /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... - @overload - def accumulate( - self, - /, - array: ArrayLike, - axis: SupportsIndex = ..., - dtype: DTypeLike | None = ..., - out: EllipsisType | None = ..., - ) -> NDArray[np.object_]: ... + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... - @overload + @overload # type: ignore[override] def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -805,7 +812,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -825,9 +832,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: _SupportsArrayUFunc | ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... @overload def outer( self, @@ -835,9 +842,17 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno B: _SupportsArrayUFunc | ArrayLike, /, *, - out: EllipsisType | None = ..., + out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> Any: ... + ) -> Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... @type_check_only class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] @@ -893,15 +908,15 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] @@ -949,12 +964,12 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> Any: ... + ) -> Incomplete: ... - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... - def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 0bfe4df9ad8d..eda92f2117c6 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -99,44 +99,44 @@ assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) def test_absolute_outer_invalid() -> None: - assert_type(np.absolute.outer(), NoReturn) + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_outer_invalid() -> None: - assert_type(np.frexp.outer(), NoReturn) + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_outer_invalid() -> None: - assert_type(np.divmod.outer(), NoReturn) + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_outer_invalid() -> None: - assert_type(np.matmul.outer(), NoReturn) + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] def test_absolute_reduceat_invalid() -> None: - assert_type(np.absolute.reduceat(), NoReturn) + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_frexp_reduceat_invalid() -> None: - assert_type(np.frexp.reduceat(), NoReturn) + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_divmod_reduceat_invalid() -> None: - assert_type(np.divmod.reduceat(), NoReturn) + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_matmul_reduceat_invalid() -> None: - assert_type(np.matmul.reduceat(), NoReturn) + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] def test_absolute_reduce_invalid() -> None: - assert_type(np.absolute.reduce(), NoReturn) + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_reduce_invalid() -> None: - assert_type(np.frexp.reduce(), NoReturn) + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_reduce_invalid() -> None: - assert_type(np.divmod.reduce(), NoReturn) + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_reduce_invalid() -> None: - assert_type(np.matmul.reduce(), NoReturn) + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] def test_absolute_accumulate_invalid() -> None: - assert_type(np.absolute.accumulate(), NoReturn) + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_accumulate_invalid() -> None: - assert_type(np.frexp.accumulate(), NoReturn) + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_divmod_accumulate_invalid() -> None: - assert_type(np.divmod.accumulate(), NoReturn) + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_accumulate_invalid() -> None: - assert_type(np.matmul.accumulate(), NoReturn) + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] def test_frexp_at_invalid() -> None: - assert_type(np.frexp.at(), NoReturn) + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] def test_divmod_at_invalid() -> None: - assert_type(np.divmod.at(), NoReturn) + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] def test_matmul_at_invalid() -> None: - assert_type(np.matmul.at(), NoReturn) + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] From 3150dec424f75b3504a21e7fc990ffbb3075abab Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Thu, 30 Oct 2025 23:00:31 -0400 Subject: [PATCH 0782/1718] DOC: Add a plot to the 'unwrap' docstring. [skip actions] [skip azp] [skip cirrus] --- numpy/lib/_function_base_impl.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 29288388f9ed..44db48f7a00c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1786,6 +1786,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): Examples -------- >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1803,6 +1804,23 @@ def unwrap(p, discont=None, axis=-1, *, period=2 * pi): array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 180., 220., 260., 300., 340., 380., 420., 460., 500., 540.]) + + This example plots the unwrapping of the wrapped input signal `w`. + First generate `w`, then apply `unwrap` to get `u`. + + >>> t = np.linspace(0, 25, 801) + >>> w = np.mod(1.5 * np.sin(1.1 * t + 0.26) * (1 - t / 6 + (t / 23) ** 3), 2.0) - 1 + >>> u = np.unwrap(w, period=2.0) + + Plot `w` and `u`. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, w, label='w (a signal wrapped to [-1, 1])') + >>> plt.plot(t, u, linewidth=2.5, alpha=0.5, label='unwrap(w, period=2)') + >>> plt.xlabel('t') + >>> plt.grid(alpha=0.6) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() """ p = asarray(p) nd = p.ndim From e535e3c3b64fd77c7c816b7cb06da67a5434825b Mon Sep 17 00:00:00 2001 From: Abhishek Tiwari Date: Fri, 31 Oct 2025 12:46:53 +0530 Subject: [PATCH 0783/1718] Update numpy/_core/tests/test_arrayprint.py Co-authored-by: Matti Picus --- doc/release/upcoming_changes/30068.expired.rst | 7 +++++-- numpy/_core/tests/test_arrayprint.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index a5363771c4b7..56678465e4b9 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -3,6 +3,9 @@ The following long-deprecated APIs have been removed or converted to errors: -* The ``style`` parameter has been removed from ``numpy.array2string``. This argument had no effect since Numpy 1.14.0. +* The ``style`` parameter has been removed from ``numpy.array2string``. + This argument had no effect since Numpy 1.14.0. -* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. +* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. + This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` + or the python ``sum`` builtin instead. diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 009a42fdb1b9..06d1306dd408 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -735,7 +735,7 @@ def test_0d_arrays(self): # str is unaffected assert_equal(str(x), "1") - # gh-10934 style was broken in legacy mode, check it works + # check it works np.array2string(np.array(1.), legacy='1.13') def test_float_spacing(self): From 5d92c7ac7953e060bea20803e45cc8810796cb48 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 31 Oct 2025 17:14:18 +0100 Subject: [PATCH 0784/1718] BUG, TYP: Fix ``ma.core._convert2ma`` function signatures (#30099) * BUG: Fix ``ma.core._convert2ma`` function signatures * TYP: Annotate `ma.core.arange` * TYP: Annotate `ma.core.clip` * TYP: Annotate `ma.core.{empty, zeros, ones}` * TYP: Annotate `ma.core.{empty, zeros, ones}_like` * TYP: fix incorrect `ma.empty` call in a type-test * TYP: Annotate `ma.core.identity` * TYP: Annotate `ma.core.indices` * TYP: Annotate `ma.core.squeeze` * TYP: Update stubs for `ma.core._convert2ma` * BUG: Doctest bug workaround --- numpy/ma/core.py | 123 +++-- numpy/ma/core.pyi | 531 ++++++++++++++++++- numpy/ma/tests/test_core.py | 17 + numpy/typing/tests/data/pass/recfunctions.py | 11 +- 4 files changed, 596 insertions(+), 86 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 7803556fb660..95bec501dc73 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -134,6 +134,7 @@ def doc_note(initialdoc, note): return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) +# TODO: remove/deprecate once `ma.extras._fromnxfunction.getdoc` no longer uses it def get_object_signature(obj): """ Get the signature from obj @@ -8740,78 +8741,76 @@ def fromflex(fxarray): return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} - """ - Convert functions from numpy to numpy.ma. + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} - Parameters - ---------- - _methodname : string - Name of the method to transform. + result = func.__call__(*args, **kwargs).view(MaskedArray) - """ - __doc__ = None + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) - def __init__(self, funcname, np_ret, np_ma_ret, params=None): - self._func = getattr(np, funcname) - self.__doc__ = self.getdoc(np_ret, np_ma_ret) - self._extras = params or {} + return result - def getdoc(self, np_ret, np_ma_ret): - "Return the doc of the function (from the doc of the method)." - doc = getattr(self._func, '__doc__', None) - sig = get_object_signature(self._func) - if doc: - doc = self._replace_return_type(doc, np_ret, np_ma_ret) - # Add the signature of the function at the beginning of the doc - if sig: - sig = f"{self._func.__name__}{sig}\n" - doc = sig + doc - return doc + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ - def _replace_return_type(self, doc, np_ret, np_ma_ret): - """ - Replace documentation of ``np`` function's return type. + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) - Replaces it with the proper type for the ``np.ma`` function. + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) - Parameters - ---------- - doc : str - The documentation of the ``np`` method. - np_ret : str - The return type string of the ``np`` method that we want to - replace. (e.g. "out : ndarray") - np_ma_ret : str - The return type string of the ``np.ma`` method. - (e.g. "out : MaskedArray") - """ - if np_ret not in doc: - raise RuntimeError( - f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " - f"The documentation string for return type, {np_ret}, is not " - f"found in the docstring for `np.{self._func.__name__}`. " - f"Fix the docstring for `np.{self._func.__name__}` or " - "update the expected string for return type." - ) + signature = signature.replace(parameters=sig_params) - return doc.replace(np_ret, np_ma_ret) + wrapper.__signature__ = signature - def __call__(self, *args, **params): - # Find the common parameters to the call and the definition - _extras = self._extras - common_params = set(params).intersection(_extras) - # Drop the common parameters from the call - for p in common_params: - _extras[p] = params.pop(p) - # Get the result - result = self._func.__call__(*args, **params).view(MaskedArray) - if "fill_value" in common_params: - result.fill_value = _extras.get("fill_value", None) - if "hardmask" in common_params: - result._hardmask = bool(_extras.get("hard_mask", False)) - return result + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) + + return wrapper arange = _convert2ma( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 8e6309504445..1656db47fe0b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -18,10 +18,11 @@ from typing import ( SupportsIndex, SupportsInt, TypeAlias, + Unpack, final, overload, ) -from typing_extensions import ParamSpec, TypeIs, TypeVar, override +from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override import numpy as np from numpy import ( @@ -30,6 +31,7 @@ from numpy import ( _HasDTypeWithRealAndImag, _ModeKind, _OrderACF, + _OrderCF, _OrderKACF, _PartitionKind, _SortKind, @@ -67,6 +69,7 @@ from numpy import ( unsignedinteger, void, ) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, @@ -100,6 +103,8 @@ from numpy._typing import ( _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, ) from numpy._typing._dtype_like import _VoidDTypeLike @@ -295,6 +300,7 @@ _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number) _RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) _UFuncT_co = TypeVar( "_UFuncT_co", # the `| Callable` simplifies self-binding to the ufunc's callable signature @@ -327,6 +333,7 @@ _ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co _ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None _ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 _NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` _MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] @@ -3130,24 +3137,508 @@ def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float def fromflex(fxarray): ... -class _convert2ma: - def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... - def __call__(self, /, *args: object, **params: object) -> Any: ... - def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... - -arange: _convert2ma = ... -clip: _convert2ma = ... -empty: _convert2ma = ... -empty_like: _convert2ma = ... -frombuffer: _convert2ma = ... -fromfunction: _convert2ma = ... -identity: _convert2ma = ... -indices: _convert2ma = ... -ones: _convert2ma = ... -ones_like: _convert2ma = ... -squeeze: _convert2ma = ... -zeros: _convert2ma = ... -zeros_like: _convert2ma = ... - def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep roughly in sync with `_core.multiarray.arange` +@overload # int, dtype=None (default) +def arange( + start: int, + stop: int = ..., + step: int = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.int_]]: ... +@overload # float, dtype=None (default) +def arange( + start: float, + stop: float = ..., + step: float = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64 | Any]]: ... +@overload # integer | floating | datetime64 | timedelta64, dtype=None (default) +def arange( + start: _ArangeScalarT, + stop: _ArangeScalarT = ..., + step: _ArangeScalarT = ..., + /, + dtype: None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ArangeScalarT]]: ... +@overload # dtype: known dtype-like (positional) +def arange( + start: _ArangeScalar, + stop: _ArangeScalar, + step: _ArangeScalar, + /, + dtype: _DTypeLike[_ScalarT], + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload # dtype: known dtype-like (keyword) +def arange( + start: _ArangeScalar, + stop: _ArangeScalar = ..., + step: _ArangeScalar = ..., + /, + *, + dtype: _DTypeLike[_ScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload # dtype: unknown dtype +def arange( + start: _ArangeScalar, + stop: _ArangeScalar = ..., + step: _ArangeScalar = ..., + /, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... +@overload +def clip( + a: NDArray[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... +@overload # known shape +def empty( + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT]: ... +@overload # unknown shape +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, _DTypeT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like( + a: _MArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MArrayT: ... +@overload +def empty_like( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction( + function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[_ShapeT, _DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index bf7b94d720ce..0e4d42da0a5b 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5977,3 +5977,20 @@ def test_uint_fill_value_and_filled(): ) def test_frommethod_signature(fn, signature): assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.empty, "(*args, fill_value=None, hardmask=False, **kwargs)"), + (np.ma.empty_like, "(*args, **kwargs)"), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index cca0c3988708..586f0502b366 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -150,12 +150,15 @@ def test_stack_arrays() -> None: def test_find_duplicates() -> None: ndtype = np.dtype([("a", int)]) - a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) - assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) assert_type( rfn.find_duplicates(a, ignoremask=True, return_index=True), tuple[ - np.ma.MaskedArray[Any, np.dtype[np.void]], - np.ndarray[Any, np.dtype[np.int_]], + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], ], ) From 779929c42dd5dfa32f07f1311233abea9f026310 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 31 Oct 2025 17:18:40 +0100 Subject: [PATCH 0785/1718] TYP: shape-type-aware ``swapaxes`` (#30108) --- numpy/__init__.pyi | 8 +--- numpy/_core/fromnumeric.pyi | 16 ++----- numpy/ma/core.pyi | 10 +--- numpy/typing/tests/data/reveal/ma.pyi | 2 +- .../reveal/ndarray_shape_manipulation.pyi | 48 +++++++++++-------- 5 files changed, 38 insertions(+), 46 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 6f4163626c3c..fbd344ad3b43 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2229,16 +2229,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... @overload def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @overload - def transpose(self, *axes: SupportsIndex) -> Self: ... + def transpose(self, /, *axes: SupportsIndex) -> Self: ... @overload def all( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index a2c4f47ecb2f..29431efef532 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -303,19 +303,13 @@ def put( mode: _ModeKind = "raise", ) -> None: ... -# keep in sync with `ma.core.swapaxes` +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes( - a: _ArrayLike[_ScalarT], - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[_ScalarT]: ... +def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... @overload -def swapaxes( - a: ArrayLike, - axis1: SupportsIndex, - axis2: SupportsIndex, -) -> NDArray[Any]: ... +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload def transpose( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 1656db47fe0b..84e9b0660daf 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2331,14 +2331,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): squeeze: Any - # keep in sync with `ndarray.swapaxes` - def swapaxes( - self, - axis1: SupportsIndex, - axis2: SupportsIndex, - /, - ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - # def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... @@ -2638,6 +2630,8 @@ def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _Mas # keep in sync with `_core.fromnumeric.swapaxes` @overload +def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +@overload def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index e47e690ff0e7..8eef32ddd593 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -374,7 +374,7 @@ assert_type(MAR_c16.imag, MaskedArray[np.float64]) assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) -assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 4447bb13d2ad..0ce599a40310 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,39 +1,47 @@ -from typing import assert_type +from typing import TypeAlias, assert_type import numpy as np import numpy.typing as npt -nd: npt.NDArray[np.int64] +_ArrayND: TypeAlias = npt.NDArray[np.int64] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] + +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D # reshape -assert_type(nd.reshape(None), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value # transpose -assert_type(nd.transpose(), npt.NDArray[np.int64]) -assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) -assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) # swapaxes -assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) # flatten -assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) -assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze -assert_type(nd.squeeze(), npt.NDArray[np.int64]) -assert_type(nd.squeeze(0), npt.NDArray[np.int64]) -assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) From d2c06ce177ae941c1b19c00767aaa1f99fad3255 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 04:27:22 +0100 Subject: [PATCH 0786/1718] BUG: ``ndarray`` method signatures --- numpy/_core/_add_newdocs.py | 209 ++++++++++++++++++++++++++- numpy/_core/tests/test_multiarray.py | 30 ++++ 2 files changed, 232 insertions(+), 7 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..d2cc95c7f869 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2982,6 +2982,7 @@ [5, 7]]]) """)) + ############################################################################## # # ndarray methods @@ -2991,6 +2992,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ + __array__($self, dtype=None, /, *, copy=None) + -- + a.__array__([dtype], *, copy=None) For ``dtype`` parameter it returns a new reference to self if @@ -3010,6 +3014,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', """ + __array_finalize__($self, obj, /) + -- + a.__array_finalize__(obj, /) Present so subclasses can call super. Does nothing. @@ -3019,7 +3026,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', """ - a.__array_wrap__(array[, context], /) + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) Returns a view of `array` with the same type as self. @@ -3028,6 +3038,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', """ + __copy__($self, /) + -- + a.__copy__() Used if :func:`copy.copy` is called on an array. Returns a copy of the array. @@ -3039,6 +3052,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', """ + __class_getitem__($cls, item, /) + -- + a.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3069,6 +3085,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', """ + __deepcopy__($self, memo, /) + -- + a.__deepcopy__(memo, /) Used if :func:`copy.deepcopy` is called on an array. @@ -3078,6 +3097,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ + __reduce__($self, /) + -- + a.__reduce__() For pickling. @@ -3087,6 +3109,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ + __setstate__($self, state, /) + -- + a.__setstate__(state, /) For unpickling. @@ -3109,6 +3134,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('all', """ + all($self, /, axis=None, out=None, keepdims=False, *, where=True) + -- + a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if all elements evaluate to True. @@ -3124,6 +3152,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('any', """ + any($self, /, axis=None, out=None, keepdims=False, *, where=True) + -- + a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns True if any of the elements of `a` evaluate to True. @@ -3139,6 +3170,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', """ + argmax($self, /, axis=None, out=None, *, keepdims=False) + -- + a.argmax(axis=None, out=None, *, keepdims=False) Return indices of the maximum values along the given axis. @@ -3154,6 +3188,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', """ + argmin($self, /, axis=None, out=None, *, keepdims=False) + -- + a.argmin(axis=None, out=None, *, keepdims=False) Return indices of the minimum values along the given axis. @@ -3169,7 +3206,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', """ - a.argsort(axis=-1, kind=None, order=None) + argsort($self, /, axis=-1, kind=None, order=None, *, stable=None) + -- + + a.argsort(axis=-1, kind=None, order=None, *, stable=None) Returns the indices that would sort this array. @@ -3184,6 +3224,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + a.argpartition(kth, axis=-1, kind='introselect', order=None) Returns the indices that would partition this array. @@ -3199,6 +3242,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', """ + astype($self, /, dtype, order='K', casting='unsafe', subok=True, copy=True) + -- + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. @@ -3278,6 +3324,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', """ + byteswap($self, /, inplace=False) + -- + a.byteswap(inplace=False) Swap the bytes of the array elements @@ -3333,6 +3382,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', """ + choose($self, /, choices, out=None, mode='raise') + -- + a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. @@ -3348,6 +3400,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', """ + clip($self, /, min=None, max=None, out=None, **kwargs) + -- + a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. @@ -3364,6 +3419,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', """ + compress($self, /, condition, axis=None, out=None) + -- + a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. @@ -3379,6 +3437,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', """ + conj($self, /) + -- + a.conj() Complex-conjugate all elements. @@ -3394,6 +3455,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', """ + conjugate($self, /) + -- + a.conjugate() Return the complex conjugate, element-wise. @@ -3409,6 +3473,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', """ + copy($self, /, order='C') + -- + a.copy(order='C') Return a copy of the array. @@ -3482,6 +3549,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', """ + cumprod($self, /, axis=None, dtype=None, out=None) + -- + a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. @@ -3497,6 +3567,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', """ + cumsum($self, /, axis=None, dtype=None, out=None) + -- + a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. @@ -3512,6 +3585,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', """ + diagonal($self, /, offset=0, axis1=0, axis2=1) + -- + a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a @@ -3527,11 +3603,23 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', + """ + dot($self, other, /, out=None) + -- + + a.dot(other, /, out=None) + + Refer to :func:`numpy.dot` for full documentation. + + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', """ + dump($self, /, file) + -- + a.dump(file) Dump a pickle of the array to the specified file. @@ -3547,6 +3635,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', """ + dumps($self, /) + -- + a.dumps() Returns the pickle of the array as a string. @@ -3561,6 +3652,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', """ + fill($self, /, value) + -- + a.fill(value) Fill the array with a scalar value. @@ -3605,6 +3699,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', """ + flatten($self, /, order='C') + -- + a.flatten(order='C') Return a copy of the array collapsed into one dimension. @@ -3644,6 +3741,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', """ + getfield($self, /, dtype, offset=0) + -- + a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. @@ -3687,6 +3787,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('item', """ + item($self, /, *args) + -- + a.item(*args) Copy an element of an array to a standard Python scalar and return it. @@ -3753,6 +3856,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('max', """ + max($self, /, axis=None, out=None, **kwargs) + -- + a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the maximum along a given axis. @@ -3768,6 +3874,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', """ + mean($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) Returns the average of the array elements along given axis. @@ -3783,6 +3892,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('min', """ + min($self, /, axis=None, out=None, **kwargs) + -- + a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the minimum along a given axis. @@ -3798,6 +3910,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', """ + nonzero($self, /) + -- + a.nonzero() Return the indices of the elements that are non-zero. @@ -3813,6 +3928,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', """ + prod($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) @@ -3829,6 +3947,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('put', """ + put($self, indices, values, /, mode='raise') + -- + a.put(indices, values, mode='raise') Set ``a.flat[n] = values[n]`` for all `n` in indices. @@ -3844,6 +3965,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', """ + ravel($self, /, order='C') + -- + a.ravel([order]) Return a flattened array. @@ -3861,6 +3985,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', """ + repeat($self, repeats, /, axis=None) + -- + a.repeat(repeats, axis=None) Repeat elements of an array. @@ -3876,6 +4003,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', """ + reshape($self, /, *shape, order='C', copy=None) + -- + a.reshape(shape, /, *, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3898,6 +4028,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', """ + resize($self, /, *new_shape, refcheck=True) + -- + a.resize(new_shape, refcheck=True) Change shape and size of array in-place. @@ -3992,6 +4125,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('round', """ + round($self, /, decimals=0, out=None) + -- + a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. @@ -4007,6 +4143,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', """ + searchsorted($self, v, /, side='left', sorter=None) + -- + a.searchsorted(v, side='left', sorter=None) Find indices where elements of v should be inserted in a to maintain order. @@ -4022,6 +4161,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', """ + setfield($self, val, /, dtype, offset=0) + -- + a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. @@ -4074,6 +4216,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', """ + setflags($self, /, *, write=None, align=None, uic=None) + -- + a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, @@ -4151,7 +4296,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', """ - a.sort(axis=-1, kind=None, order=None) + sort($self, /, axis=-1, kind=None, order=None, *, stable=None) + -- + + a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4171,6 +4319,13 @@ be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 See Also -------- @@ -4211,6 +4366,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + a.partition(kth, axis=-1, kind='introselect', order=None) Partially sorts the elements in the array in such a way that the value of @@ -4270,6 +4428,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', """ + squeeze($self, /, axis=None) + -- + a.squeeze(axis=None) Remove axes of length one from `a`. @@ -4285,6 +4446,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('std', """ + std($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) + -- + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the standard deviation of the array elements along given axis. @@ -4300,6 +4464,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', """ + sum($self, /, axis=None, dtype=None, out=None, **kwargs) + -- + a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) Return the sum of the array elements over the given axis. @@ -4315,7 +4482,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', """ - a.swapaxes(axis1, axis2) + swapaxes($self, axis1, axis2, /) + -- + + a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4330,6 +4500,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('take', """ + take($self, indices, /, axis=None, out=None, mode='raise') + -- + a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. @@ -4345,7 +4518,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', """ - a.tofile(fid, sep="", format="%s") + tofile($self, fid, /, sep='', format='%s') + -- + + a.tofile(fid, sep='', format='%s') Write array to a file as text or binary (default). @@ -4385,6 +4561,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', """ + tolist($self, /) + -- + a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. @@ -4448,7 +4627,11 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ +add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', + """ + tobytes($self, /, order='C') + -- + a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4491,6 +4674,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', """ + trace($self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) + -- + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. @@ -4506,6 +4692,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', """ + transpose($self, /, *axes) + -- + a.transpose(*axes) Returns a view of the array with axes transposed. @@ -4563,6 +4752,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('var', """ + var($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) + -- + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) Returns the variance of the array elements, along given axis. @@ -4578,6 +4770,9 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('view', """ + view($self, /, *args, **kwargs) + -- + a.view([dtype][, type]) New view of array with the same data. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c62fb5b4e905..38e710179067 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4,6 +4,7 @@ import functools import gc import importlib +import inspect import io import itertools import mmap @@ -10872,3 +10873,32 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): a = np.array([1], dtype=dtype) b = a.__array__(None) assert_array_equal(a, b, strict=True) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_wrap__", + "__copy__", "__deepcopy__", "__reduce__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", + "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", + "tolist", "tobytes", "trace", "transpose", "var", "view", + ], +) +def test_array_method_signatures(methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 3911b2fb5473ae02edf356ea68083ee502c91411 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 04:52:05 +0100 Subject: [PATCH 0787/1718] TYP: update the ``ndarray`` method stubs to match their new signatures --- numpy/__init__.pyi | 244 +++++++++--------- .../tests/data/pass/ndarray_conversion.py | 6 - 2 files changed, 129 insertions(+), 121 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..c44b34cef794 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1696,18 +1696,23 @@ class _ArrayOrScalarCommon: def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... - def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... # NOTE: for `generic`, these two methods don't do anything - def fill(self, value: Incomplete, /) -> None: ... - def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... # NOTE: even on `generic` this seems to work def setflags( - self, /, write: builtins.bool | None = None, align: builtins.bool | None = None, uic: builtins.bool | None = None + self, + /, + *, + write: builtins.bool | None = None, + align: builtins.bool | None = None, + uic: builtins.bool | None = None, ) -> None: ... @property @@ -1813,9 +1818,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def max( @@ -1823,9 +1829,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def max( @@ -1834,9 +1841,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1845,9 +1852,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def min( @@ -1855,9 +1863,10 @@ class _ArrayOrScalarCommon: /, axis: _ShapeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def min( @@ -1866,9 +1875,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1878,9 +1887,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def sum( @@ -1889,9 +1899,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def sum( @@ -1901,9 +1912,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 0, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1913,9 +1924,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def prod( @@ -1924,9 +1936,10 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def prod( @@ -1936,9 +1949,9 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - initial: _NumberLike_co = 1, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1947,9 +1960,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def mean( @@ -1958,9 +1971,9 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload def mean( @@ -1970,8 +1983,8 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: _ArrayT, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -1981,11 +1994,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( @@ -1994,11 +2007,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def std( @@ -2008,10 +2021,10 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload @@ -2021,11 +2034,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( @@ -2034,11 +2047,11 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... @overload def var( @@ -2048,10 +2061,10 @@ class _ArrayOrScalarCommon: *, out: _ArrayT, ddof: float = 0, - keepdims: builtins.bool = False, - where: _ArrayLikeBool_co = True, - mean: _ArrayLikeNumber_co = ..., - correction: float = ..., + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @@ -2218,14 +2231,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - - def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... # type: ignore[override] + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... def squeeze( self, + /, axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @@ -2314,8 +2326,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def partition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2323,8 +2335,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def partition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2334,8 +2346,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self, - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, @@ -2343,8 +2355,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def argpartition( self: NDArray[void], - /, kth: _ArrayLikeInt, + /, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, @@ -2353,19 +2365,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # def diagonal( self, - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, ) -> ndarray[_AnyShape, _DTypeT_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @overload - def dot(self, b: _ScalarLike_co, out: None = None) -> NDArray[Any]: ... + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: None = None) -> Any: ... + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... @@ -2374,49 +2386,55 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def searchsorted( self, # >= 1D array v: _ScalarLike_co, # 0D array-like - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> intp: ... @overload def searchsorted( self, # >= 1D array v: ArrayLike, - side: _SortSide = ..., - sorter: _ArrayLikeInt_co | None = ..., + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, ) -> NDArray[intp]: ... def sort( self, - axis: SupportsIndex = ..., - kind: _SortKind | None = ..., - order: str | Sequence[str] | None = ..., + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, *, - stable: builtins.bool | None = ..., + stable: builtins.bool | None = None, ) -> None: ... # Keep in sync with `MaskedArray.trace` @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, out: None = None, ) -> Any: ... @overload def trace( self, # >= 2D array - offset: SupportsIndex = ..., - axis1: SupportsIndex = ..., - axis2: SupportsIndex = ..., - dtype: DTypeLike | None = ..., + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def trace( self, # >= 2D array + /, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, @@ -2428,6 +2446,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self: NDArray[_ScalarT], indices: _IntLike_co, + /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., @@ -2436,6 +2455,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., @@ -2444,6 +2464,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None = ..., *, out: _ArrayT, @@ -2453,23 +2474,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None, out: _ArrayT, mode: _ModeKind = ..., ) -> _ArrayT: ... @overload - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: None = None, - ) -> ndarray[tuple[int], _DTypeT_co]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... @overload - def repeat( - self, - repeats: _ArrayLikeInt_co, - axis: SupportsIndex, - ) -> ndarray[_AnyShape, _DTypeT_co]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2592,11 +2606,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: ?, type: T) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... @overload - def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... @@ -3659,7 +3673,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: Never = ..., ) -> Never: ... def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] - def swapaxes(self: Never, /, axis1: Never, axis2: Never) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] @@ -3755,7 +3769,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... diff --git a/numpy/typing/tests/data/pass/ndarray_conversion.py b/numpy/typing/tests/data/pass/ndarray_conversion.py index 76da1dadd327..a4e0bcf34bdd 100644 --- a/numpy/typing/tests/data/pass/ndarray_conversion.py +++ b/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -74,14 +74,8 @@ # setflags nd.setflags() - -nd.setflags(True) nd.setflags(write=True) - -nd.setflags(True, True) nd.setflags(write=True, align=True) - -nd.setflags(True, True, False) nd.setflags(write=True, align=True, uic=False) # fill is pretty simple From d4469b5e9db3f17a1ffbda1fba8c7947246219df Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:26:01 +0100 Subject: [PATCH 0788/1718] BUG: scalar constructor signatures --- numpy/__init__.pyi | 8 +- numpy/_core/_add_newdocs_scalars.py | 455 +++++++++++------------ numpy/_core/tests/test_scalar_methods.py | 54 ++- 3 files changed, 276 insertions(+), 241 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..c83c9fe8ed6d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5648,18 +5648,18 @@ class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # t class bytes_(character[bytes], bytes): # type: ignore[misc] @overload - def __new__(cls, o: object = ..., /) -> Self: ... + def __new__(cls, value: object = b"", /) -> Self: ... @overload - def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... # def __bytes__(self, /) -> bytes: ... class str_(character[str], str): # type: ignore[misc] @overload - def __new__(cls, value: object = ..., /) -> Self: ... + def __new__(cls, value: object = "", /) -> Self: ... @overload - def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 9d4cb48825e0..8c7911e182ce 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -48,7 +48,7 @@ def type_aliases_gen(): ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), - ]) +]) def _get_platform_and_machine(): @@ -67,258 +67,232 @@ def _get_platform_and_machine(): _system, _machine = _get_platform_and_machine() _doc_alias_string = f":Alias on this platform ({_system} {_machine}):" +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- -def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): +{docstring}""" + +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: # note: `:field: value` is rST syntax which renders as field lists. - o = getattr(_numerictypes, obj) - - character_code = dtype(o).char - canonical_name_doc = "" if obj == o.__name__ else \ - f":Canonical name: `numpy.{obj}`\n " - if fixed_aliases: - alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " - for alias in fixed_aliases) - else: - alias_doc = '' - alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " - for (alias_type, alias, doc) in possible_aliases if alias_type is o) - - docstring = f""" - {doc.strip()} - - :Character code: ``'{character_code}'`` - {canonical_name_doc}{alias_doc} - """ + cls = getattr(_numerictypes, name) + module = cls.__module__ - add_newdoc('numpy._core.numerictypes', obj, docstring) + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") -_bool_docstring = ( - """ - Boolean type (True or False), stored as a byte. + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) - .. warning:: + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) - The :class:`bool` type is not a subclass of the :class:`int_` type - (the :class:`bool` is not even a number type). This is different - than Python's default implementation of :class:`bool` as a - sub-class of :class:`int`. - """ -) -add_newdoc_for_scalar_type('bool', [], _bool_docstring) +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. -add_newdoc_for_scalar_type('bool_', [], _bool_docstring) +.. warning:: -add_newdoc_for_scalar_type('byte', [], - """ - Signed integer type, compatible with C ``char``. - """) + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") -add_newdoc_for_scalar_type('short', [], - """ - Signed integer type, compatible with C ``short``. - """) +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") -add_newdoc_for_scalar_type('intc', [], - """ - Signed integer type, compatible with C ``int``. - """) +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") + +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") # TODO: These docs probably need an if to highlight the default rather than # the C-types (and be correct). -add_newdoc_for_scalar_type('int_', [], - """ - Default signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('longlong', [], - """ - Signed integer type, compatible with C ``long long``. - """) - -add_newdoc_for_scalar_type('ubyte', [], - """ - Unsigned integer type, compatible with C ``unsigned char``. - """) - -add_newdoc_for_scalar_type('ushort', [], - """ - Unsigned integer type, compatible with C ``unsigned short``. - """) - -add_newdoc_for_scalar_type('uintc', [], - """ - Unsigned integer type, compatible with C ``unsigned int``. - """) - -add_newdoc_for_scalar_type('uint', [], - """ - Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit - systems. - """) - -add_newdoc_for_scalar_type('ulonglong', [], - """ - Signed integer type, compatible with C ``unsigned long long``. - """) - -add_newdoc_for_scalar_type('half', [], - """ - Half-precision floating-point number type. - """) - -add_newdoc_for_scalar_type('single', [], - """ - Single-precision floating-point number type, compatible with C ``float``. - """) - -add_newdoc_for_scalar_type('double', [], - """ - Double-precision floating-point number type, compatible with Python - :class:`float` and C ``double``. - """) - -add_newdoc_for_scalar_type('longdouble', [], - """ - Extended-precision floating-point number type, compatible with C - ``long double`` but not necessarily with IEEE 754 quadruple-precision. - """) - -add_newdoc_for_scalar_type('csingle', [], - """ - Complex number type composed of two single-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('cdouble', [], - """ - Complex number type composed of two double-precision floating-point - numbers, compatible with Python :class:`complex`. - """) - -add_newdoc_for_scalar_type('clongdouble', [], - """ - Complex number type composed of two extended-precision floating-point - numbers. - """) - -add_newdoc_for_scalar_type('object_', [], - """ - Any Python object. - """) - -add_newdoc_for_scalar_type('str_', [], - r""" - A unicode string. - - This type strips trailing null codepoints. - - >>> s = np.str_("abc\x00") - >>> s - 'abc' - - Unlike the builtin :class:`str`, this supports the - :ref:`python:bufferobjects`, exposing its contents as UCS4: - - >>> m = memoryview(np.str_("abc")) - >>> m.format - '3w' - >>> m.tobytes() - b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' - """) - -add_newdoc_for_scalar_type('bytes_', [], - r""" - A byte string. - - When used in arrays, this type strips trailing null bytes. - """) - -add_newdoc_for_scalar_type('void', [], - r""" - np.void(length_or_data, /, dtype=None) - - Create a new structured or unstructured void scalar. - - Parameters - ---------- - length_or_data : int, array-like, bytes-like, object - One of multiple meanings (see notes). The length or - bytes data of an unstructured void. Or alternatively, - the data to be stored in the new scalar when `dtype` - is provided. - This can be an array-like, in which case an array may - be returned. - dtype : dtype, optional - If provided the dtype of the new scalar. This dtype must - be "void" dtype (i.e. a structured or unstructured void, - see also :ref:`defining-structured-types`). - - .. versionadded:: 1.24 - - Notes - ----- - For historical reasons and because void scalars can represent both - arbitrary byte data and structured dtypes, the void constructor - has three calling conventions: - - 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five - ``\0`` bytes. The 5 can be a Python or NumPy integer. - 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. - The dtype itemsize will match the byte string length, here ``"V10"``. - 3. When a ``dtype=`` is passed the call is roughly the same as an - array creation. However, a void scalar rather than array is returned. - - Please see the examples which show all three different conventions. - - Examples - -------- - >>> np.void(5) - np.void(b'\x00\x00\x00\x00\x00') - >>> np.void(b'abcd') - np.void(b'\x61\x62\x63\x64') - >>> np.void((3.2, b'eggs'), dtype="d,S5") - np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) - np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) - - """) - -add_newdoc_for_scalar_type('datetime64', [], - """ - If created from a 64-bit integer, it represents an offset from - ``1970-01-01T00:00:00``. - If created from string, the string can be in ISO 8601 date - or datetime format. - - When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be - dropped and a User Warning is given. - - Datetime64 objects should be considered to be UTC and therefore have an - offset of +0000. - - >>> np.datetime64(10, 'Y') - np.datetime64('1980') - >>> np.datetime64('1980', 'Y') - np.datetime64('1980') - >>> np.datetime64(10, 'D') - np.datetime64('1970-01-11') - - See :ref:`arrays.datetime` for more information. - """) - -add_newdoc_for_scalar_type('timedelta64', [], - """ - A timedelta stored as a 64-bit integer. - - See :ref:`arrays.datetime` for more information. - """) +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Signed integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', """ + is_integer($self, /) + -- + integer.is_integer() -> bool Return ``True`` if the number is finite with integral value. @@ -338,6 +312,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', f""" + as_integer_ratio($self, /) + -- + {float_name}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original @@ -354,6 +331,9 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', f""" + is_integer($self, /) + -- + {float_name}.is_integer() -> bool Return ``True`` if the floating point number is finite with integral @@ -370,10 +350,13 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" + bit_count($self, /) + -- + {int_name}.bit_count() -> int Computes the number of 1-bits in the absolute value of the input. diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 6fd4846006d0..4ce921d67541 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -2,7 +2,9 @@ Test the scalar constructors, which also do type-coercion """ import fractions +import inspect import platform +import sys import types from typing import Any, Literal @@ -10,7 +12,7 @@ import numpy as np from numpy._core import sctypes -from numpy.testing import assert_equal, assert_raises +from numpy.testing import IS_PYPY, assert_equal, assert_raises class TestAsIntegerRatio: @@ -252,3 +254,53 @@ def test_array_wrap(scalar): arr1d = np.array([3], dtype=np.int8) assert scalar.__array_wrap__(arr1d) is arr1d assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "sctype", + [ + *sctypes["int" ], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], +) +def test_constructor_signatures(sctype: type[np.generic]) -> None: + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int" ], *sctypes["uint"], *sctypes["float"]], +) +def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("sctype", sctypes["float"]) +def test_method_signatures_as_integer_ratio(sctype: type[np.floating]) -> None: + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY From 40ead852e432aa9432acfbefce0e3b0c44ec3f10 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:41:52 +0100 Subject: [PATCH 0789/1718] TYP: update `_core._add_newdocs.scalars` stubs --- numpy/_core/_add_newdocs_scalars.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi index 4a06c9b07d74..241f4a00bd45 100644 --- a/numpy/_core/_add_newdocs_scalars.pyi +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -1,4 +1,3 @@ -from collections.abc import Iterable from typing import Final import numpy as np @@ -8,9 +7,10 @@ _system: Final[str] = ... _machine: Final[str] = ... _doc_alias_string: Final[str] = ... _bool_docstring: Final[str] = ... +bool_name: str = ... int_name: str = ... float_name: str = ... def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... -def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... def _get_platform_and_machine() -> tuple[str, str]: ... From 1a1b67e058e960cf8ea33d3abb65fe53f88860db Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:42:18 +0100 Subject: [PATCH 0790/1718] TYP: update sctype constructor stubs --- numpy/__init__.pyi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c83c9fe8ed6d..7cd682e79f3a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4264,13 +4264,13 @@ bool_ = bool @final class object_(_RealMixin, generic): @overload - def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] @overload - def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] @overload - def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] @overload - def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] @overload # catch-all @@ -5628,9 +5628,9 @@ class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload - def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... @overload - def __new__(cls, value: Any, /, dtype: _DTypeLikeVoid) -> Self: ... + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... From e9b9047e4a8d765e84b962783011121b8c996b0d Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 20:43:01 +0100 Subject: [PATCH 0791/1718] STY: appease `ruff` --- numpy/_core/tests/test_scalar_methods.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 4ce921d67541..7f1d7d6d7bdb 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -261,7 +261,7 @@ def test_array_wrap(scalar): @pytest.mark.parametrize( "sctype", [ - *sctypes["int" ], + *sctypes["int"], *sctypes["uint"], *sctypes["float"], *sctypes["complex"], @@ -282,7 +282,7 @@ def test_constructor_signatures(sctype: type[np.generic]) -> None: @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "sctype", - [np.integer, *sctypes["int" ], *sctypes["uint"], *sctypes["float"]], + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], ) def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: try: From 6ba89bd87b77b4ddae4b93cd8c46036ce9857d36 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 21:39:10 +0100 Subject: [PATCH 0792/1718] BUG: `long` and `ulong` constructor signatures --- numpy/_core/_add_newdocs_scalars.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 8c7911e182ce..7305f232510c 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -124,6 +124,10 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None Signed integer type, compatible with C ``int``. """) +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") + # TODO: These docs probably need an if to highlight the default rather than # the C-types (and be correct). add_newdoc_for_scalar_type('int_', '(value=0, /)', """ @@ -150,8 +154,12 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. """) +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ -Signed integer type, compatible with C ``unsigned long long``. +Unsigned integer type, compatible with C ``unsigned long long``. """) add_newdoc_for_scalar_type('half', '(value=0, /)', """ From 2b2dac41564b31d826f55e9b00b718c8a1af583b Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 31 Oct 2025 22:10:58 +0100 Subject: [PATCH 0793/1718] BUG: ``flatiter`` method signatures --- numpy/_core/_add_newdocs.py | 18 ++++++++++++------ numpy/_core/tests/test_indexing.py | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..12553ea0215e 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -80,7 +80,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. @@ -99,7 +98,6 @@ """)) - add_newdoc('numpy._core', 'flatiter', ('index', """ Current flat index into the array. @@ -118,17 +116,25 @@ """)) -# flatiter functions +# flatiter methods add_newdoc('numpy._core', 'flatiter', ('__array__', - """__array__(type=None) Get array from iterator + """ + __array__($self, dtype=None, /, *, copy=None) + -- - """)) + flat.__array__([dtype], *, copy=None) + Get array from iterator + + """)) add_newdoc('numpy._core', 'flatiter', ('copy', """ - copy() + copy($self, /) + -- + + flat.copy() Get a copy of the iterator as a 1-D array. diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index b4bb53fa71e1..65d42d6c9370 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1,4 +1,5 @@ import functools +import inspect import operator import sys import warnings @@ -11,6 +12,7 @@ from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -1672,3 +1674,19 @@ def test_nonempty_string_flat_index_on_flatiter(self): match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " r"and integer or boolean arrays are valid indices"): a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 247cf63f07a3a0a5d057850e94c6c427baf54f11 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Nov 2025 03:12:47 +0100 Subject: [PATCH 0794/1718] TYP: fix and improve the ``flatiter`` stubs --- numpy/__init__.pyi | 84 +++++++------ numpy/typing/tests/data/fail/flatiter.pyi | 40 ++++-- numpy/typing/tests/data/pass/flatiter.py | 10 +- numpy/typing/tests/data/reveal/flatiter.pyi | 129 +++++++++++++------- 4 files changed, 171 insertions(+), 92 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fbd344ad3b43..d945a3dc197c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -782,7 +782,6 @@ _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | _ShapeT = TypeVar("_ShapeT", bound=_Shape) _Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_1DShapeT = TypeVar("_1DShapeT", bound=_1D) _2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... @@ -1621,44 +1620,61 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @final class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property - def base(self) -> _ArrayT_co: ... + def base(self, /) -> _ArrayT_co: ... @property - def coords(self) -> _Shape: ... + def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... @property - def index(self) -> int: ... - def copy(self) -> _ArrayT_co: ... - def __iter__(self) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ... - def __len__(self) -> int: ... - @overload + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] def __getitem__( - self: flatiter[NDArray[_ScalarT]], - key: int | integer | tuple[int | integer], - ) -> _ScalarT: ... - @overload + self: flatiter[ndarray[Any, _DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload # 2d; _[[*[*]]] def __getitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - ) -> _ArrayT_co: ... - # TODO: `__setitem__` operates via `unsafe` casting rules, and can - # thus accept any type accepted by the relevant underlying `np.generic` - # constructor. - # This means that `value` must in reality be a supertype of `npt.ArrayLike`. - def __setitem__( - self, - key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], - value: Any, - ) -> None: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = None, /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... - @overload - def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = None, /) -> ndarray[_AnyShape, _DTypeT]: ... - @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... + self: flatiter[ndarray[Any, _DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], _DTypeT]: ... + @overload # ?d + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, _DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__( + self: flatiter[ndarray[Any, _DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: diff --git a/numpy/typing/tests/data/fail/flatiter.pyi b/numpy/typing/tests/data/fail/flatiter.pyi index be63a082535d..2c6e912bd318 100644 --- a/numpy/typing/tests/data/fail/flatiter.pyi +++ b/numpy/typing/tests/data/fail/flatiter.pyi @@ -1,22 +1,38 @@ +from typing import Any + import numpy as np -import numpy._typing as npt +import numpy.typing as npt -class Index: +class _Index: def __index__(self) -> int: ... -a: np.flatiter[npt.NDArray[np.float64]] -supports_array: npt._SupportsArray[np.dtype[np.float64]] +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... + +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] + +_a_nd: np.flatiter[npt.NDArray[np.float64]] + +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] -a.base = object() # type: ignore[assignment, misc] -a.coords = object() # type: ignore[assignment, misc] -a.index = object() # type: ignore[assignment, misc] -a.copy(order="C") # type: ignore[call-arg] +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] # NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` # does not accept objects with the `__array__` or `__index__` protocols; # boolean indexing is just plain broken (gh-17175) -a[np.bool()] # type: ignore[index] -a[Index()] # type: ignore[call-overload] -a[supports_array] # type: ignore[index] +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] -a[[0, 1, 2]] +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/flatiter.py b/numpy/typing/tests/data/pass/flatiter.py index cc7c6069a89a..70de3a67917d 100644 --- a/numpy/typing/tests/data/pass/flatiter.py +++ b/numpy/typing/tests/data/pass/flatiter.py @@ -12,7 +12,15 @@ a[...] a[:] a.__array__() -a.__array__(np.dtype(np.float64)) b = np.array([1]).flat a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index e188d30fe79f..98d61a6d3428 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,47 +1,86 @@ -from typing import Literal, TypeAlias, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np -import numpy.typing as npt - -a: np.flatiter[npt.NDArray[np.str_]] -a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] - -Size: TypeAlias = Literal[42] -a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] - -assert_type(a.base, npt.NDArray[np.str_]) -assert_type(a.copy(), npt.NDArray[np.str_]) -assert_type(a.coords, tuple[int, ...]) -assert_type(a.index, int) -assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) -assert_type(next(a), np.str_) -assert_type(a[0], np.str_) -assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) -assert_type(a[...], npt.NDArray[np.str_]) -assert_type(a[:], npt.NDArray[np.str_]) -assert_type(a[(...,)], npt.NDArray[np.str_]) -assert_type(a[(0,)], np.str_) - -assert_type(a.__array__(), npt.NDArray[np.str_]) -assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) -assert_type( - a_1d.__array__(), - np.ndarray[tuple[int], np.dtype[np.bytes_]], -) -assert_type( - a_1d.__array__(np.dtype(np.float64)), - np.ndarray[tuple[int], np.dtype[np.float64]], -) -assert_type( - a_1d_fixed.__array__(), - np.ndarray[tuple[Size], np.dtype[np.object_]], -) -assert_type( - a_1d_fixed.__array__(np.dtype(np.float64)), - np.ndarray[tuple[Size], np.dtype[np.float64]], -) - -a[0] = "a" -a[:5] = "a" -a[...] = "a" -a[(...,)] = "a" + +_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) From 57794dfb6efc2a0c8498e4bc91cbedfb08c2a78b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Nov 2025 05:06:57 +0100 Subject: [PATCH 0795/1718] BUG: ``nditer`` runtime signatures --- numpy/_core/_add_newdocs.py | 200 ++++++++++++++++++------------- numpy/_core/tests/test_nditer.py | 26 ++++ 2 files changed, 144 insertions(+), 82 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98bdec4e3c74..fea383390881 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -154,6 +154,19 @@ add_newdoc('numpy._core', 'nditer', """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0) @@ -165,63 +178,62 @@ ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - flags : sequence of str, optional - Flags to control the behavior of the iterator. - - * ``buffered`` enables buffering when required. - * ``c_index`` causes a C-order index to be tracked. - * ``f_index`` causes a Fortran-order index to be tracked. - * ``multi_index`` causes a multi-index, or a tuple of indices - with one per iteration dimension, to be tracked. - * ``common_dtype`` causes all the operands to be converted to - a common data type, with copying or buffering as necessary. - * ``copy_if_overlap`` causes the iterator to determine if read - operands have overlap with write operands, and make temporary - copies as necessary to avoid overlap. False positives (needless - copying) are possible in some cases. - * ``delay_bufalloc`` delays allocation of the buffers until - a reset() call is made. Allows ``allocate`` operands to - be initialized before their values are copied into the buffers. - * ``external_loop`` causes the ``values`` given to be - one-dimensional arrays with multiple values instead of - zero-dimensional arrays. - * ``grow_inner`` allows the ``value`` array sizes to be made - larger than the buffer size when both ``buffered`` and - ``external_loop`` is used. - * ``ranged`` allows the iterator to be restricted to a sub-range - of the iterindex values. - * ``refs_ok`` enables iteration of reference types, such as - object arrays. - * ``reduce_ok`` enables iteration of ``readwrite`` operands - which are broadcasted, also known as reduction operands. - * ``zerosize_ok`` allows `itersize` to be zero. + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - ``readonly``, ``readwrite``, or ``writeonly`` must be specified. - - * ``readonly`` indicates the operand will only be read from. - * ``readwrite`` indicates the operand will be read from and written to. - * ``writeonly`` indicates the operand will only be written to. - * ``no_broadcast`` prevents the operand from being broadcasted. - * ``contig`` forces the operand data to be contiguous. - * ``aligned`` forces the operand data to be aligned. - * ``nbo`` forces the operand data to be in native byte order. - * ``copy`` allows a temporary read-only copy if required. - * ``updateifcopy`` allows a temporary read-write copy if required. - * ``allocate`` causes the array to be allocated if it is None - in the ``op`` parameter. - * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. - * ``arraymask`` indicates that this operand is the mask to use - for selecting elements when writing to operands with the - 'writemasked' flag set. The iterator does not enforce this, - but when writing from a buffer back to the array, it only - copies those elements indicated by this mask. - * ``writemasked`` indicates that only elements where the chosen - ``arraymask`` operand is True will be written to. - * ``overlap_assume_elementwise`` can be used to mark operands that are - accessed only in the iterator order, to allow less conservative - copying when ``copy_if_overlap`` is present. + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -422,10 +434,22 @@ """) +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + # nditer methods add_newdoc('numpy._core', 'nditer', ('copy', """ + copy($self, /) + -- + copy() Get a copy of the iterator in its current state. @@ -444,15 +468,11 @@ """)) -add_newdoc('numpy._core', 'nditer', ('operands', - """ - operands[`Slice`] - - The array(s) to be iterated over. Valid only before the iterator is closed. - """)) - add_newdoc('numpy._core', 'nditer', ('debug_print', """ + debug_print($self, /) + -- + debug_print() Print the current state of the `nditer` instance and debug info to stdout. @@ -461,6 +481,9 @@ add_newdoc('numpy._core', 'nditer', ('enable_external_loop', """ + enable_external_loop($self, /) + -- + enable_external_loop() When the "external_loop" was not used during construction, but @@ -471,6 +494,9 @@ add_newdoc('numpy._core', 'nditer', ('iternext', """ + iternext($self, /) + -- + iternext() Check whether iterations are left, and perform a single internal iteration @@ -486,6 +512,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_axis', """ + remove_axis($self, i, /) + -- + remove_axis(i, /) Removes axis `i` from the iterator. Requires that the flag "multi_index" @@ -495,6 +524,9 @@ add_newdoc('numpy._core', 'nditer', ('remove_multi_index', """ + remove_multi_index($self, /) + -- + remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing @@ -504,32 +536,50 @@ add_newdoc('numpy._core', 'nditer', ('reset', """ + reset($self, /) + -- + reset() Reset the iterator to its initial state. """)) +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + add_newdoc('numpy._core', 'nested_iters', """ - nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ - order="K", casting="safe", buffersize=0) + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the - outermost loop, the last in the innermost loop. Advancing one will change - the subsequent iterators to point at its new element. + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. - axes : list of list of int Each item is used as an "op_axes" argument to an nditer - flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name @@ -574,20 +624,6 @@ """) -add_newdoc('numpy._core', 'nditer', ('close', - """ - close() - - Resolve all writeback semantics in writeable operands. - - See Also - -------- - - :ref:`nditer-context-manager` - - """)) - - ############################################################################### # # broadcast diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index acaaa0548fd5..84a88d54548e 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,3 +1,4 @@ +import inspect import subprocess import sys import textwrap @@ -11,6 +12,7 @@ from numpy import all, arange, array, nditer from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -3496,3 +3498,27 @@ def test_debug_print(capfd): # The actual output may have additional pointers listed that are # stripped from the example output: assert res_line.startswith(expected_line.strip()) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_signature_constructor(): + sig = inspect.signature(np.nditer) + + assert sig.parameters + assert "self" not in sig.parameters + assert "args" not in sig.parameters + assert "kwargs" not in sig.parameters + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "method", + [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], +) +def test_signature_methods(method): + sig = inspect.signature(method) + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY From 7e52b73098dcda9de9f7f45c60cf1c9f86ceb0d0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 1 Nov 2025 22:38:10 +0100 Subject: [PATCH 0796/1718] DOC: remove outdated notes on how to build against numpy in conda-forge (#30127) Closes https://github.com/conda-forge/conda-forge.github.io/issues/2605 [skip actions] [skip azp] [skip cirrus] --- doc/source/dev/depending_on_numpy.rst | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index e3c03b0fea65..de809462654b 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -128,14 +128,9 @@ compatible with a new major release of NumPy and may not be compatible with very old versions. For conda-forge packages, please see -`here `__. - -as of now, it is usually as easy as including:: - - host: - - numpy - run: - - {{ pin_compatible('numpy') }} +`here `__ +for instructions on how to declare a dependency on ``numpy`` when using the C +API. Runtime dependency & version ranges From a8546b7812aaf4734fe82e42c69b068bd4db0718 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sun, 2 Nov 2025 01:13:31 -0700 Subject: [PATCH 0797/1718] CI: disable flaky ubuntu UBsan CI job, and tweak macOS config (#30118) --- .github/workflows/compiler_sanitizers.yml | 47 ++--------------------- 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index a86ff5ae8b53..290ddfa75ffe 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -49,16 +49,16 @@ jobs: pyenv --version - name: Set up LLVM run: | - brew install llvm@19 - LLVM_PREFIX=$(brew --prefix llvm@19) + brew install llvm@20 + LLVM_PREFIX=$(brew --prefix llvm@20) echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV - name: Build Python with address sanitizer run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t - pyenv global 3.14t + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 + pyenv global 3.14 - name: Install dependencies run: | pip install -r requirements/build_requirements.txt @@ -107,42 +107,3 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 - - ubuntu_UBSAN: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - name: Set up pyenv - run: | - git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" - PYENV_ROOT="$HOME/.pyenv" - PYENV_BIN="$PYENV_ROOT/bin" - PYENV_SHIMS="$PYENV_ROOT/shims" - echo "$PYENV_BIN" >> $GITHUB_PATH - echo "$PYENV_SHIMS" >> $GITHUB_PATH - echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV - - name: Check pyenv is working - run: pyenv --version - - name: Build python with address sanitizer - run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14t - pyenv global 3.14t - - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/ci_requirements.txt - pip install -r requirements/test_requirements.txt - - name: Build numpy with UndefinedBehaviorSanitizer - run: python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false - - name: Test - run: | - # pass -s to pytest to see UBSAN errors and warnings, otherwise pytest captures them - ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ - UBSAN_OPTIONS=halt_on_error=1:suppressions=${GITHUB_WORKSPACE}/tools/ci/ubsan_suppressions_x86_64.txt \ - spin test -- -v -s --timeout=600 --durations=10 From 1e424dae42a2d560520b6e053e8e60ac4205bfc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Laboissi=C3=A8re?= Date: Sun, 2 Nov 2025 18:46:22 +0100 Subject: [PATCH 0798/1718] BUG: Avoid compilation error of wrapper file generated with SWIG >= 4.4 (#30128) The `import_array` macro, which is defined in the file `numpy/core/code_generators/generate_numpy_api.py`, is intended for use inside an internal SWIG function that is called in the generated C wrapper file. This macro contains a return statement whose argument must match the function definition. Until version 4.3 of SWIG, the aforementioned function returned a `void*` value. However, in version 4.4, the return value was changed to `int`. This causes compilation of code using import_array() to fail with the following error message: `returning 'void *' from a function with return type 'int' makes integer from pointer without a cast [-Wint-conversion].` This commit resolves the issue by returning either `NULL` or `0`, depending on the SWIG version being used (< 3.4 or >= 3.4, respectively). This change has been successfully tested against SWIG versions 4.3 and 4.4. Closes: numpy/numpy#30122 --- numpy/_core/code_generators/generate_numpy_api.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index dc11bcd2c272..23d678872ca4 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -157,6 +157,12 @@ return 0; } +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ @@ -164,7 +170,7 @@ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ - return NULL; \ + return _RETURN_VALUE; \ } \ } From b5474d49065cf866eb8890bb9b9a20cbc45c41d2 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Mon, 3 Nov 2025 19:34:40 -0500 Subject: [PATCH 0799/1718] MAINT,BUG: make later arguments in array2string keyword only. --- doc/release/upcoming_changes/30068.expired.rst | 3 ++- numpy/_core/arrayprint.py | 8 ++++---- numpy/_core/arrayprint.pyi | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst index 56678465e4b9..5d41c98b3260 100644 --- a/doc/release/upcoming_changes/30068.expired.rst +++ b/doc/release/upcoming_changes/30068.expired.rst @@ -4,7 +4,8 @@ The following long-deprecated APIs have been removed or converted to errors: * The ``style`` parameter has been removed from ``numpy.array2string``. - This argument had no effect since Numpy 1.14.0. + This argument had no effect since Numpy 1.14.0. Any arguments following + it, such as ``formatter`` have now been made keyword-only. * Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index ec17d3063c09..8d576d9e1d56 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -619,18 +619,18 @@ def _array2string(a, options, separator=' ', prefix=""): def _array2string_dispatcher( a, max_line_width=None, precision=None, suppress_small=None, separator=None, prefix=None, - formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix=None, - *, legacy=None): + legacy=None): return (a,) @array_function_dispatch(_array2string_dispatcher, module='numpy') def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - formatter=None, threshold=None, + *, formatter=None, threshold=None, edgeitems=None, sign=None, floatmode=None, suffix="", - *, legacy=None): + legacy=None): """ Return a string representation of an array. diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 6834565da97f..307f844634ca 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -98,13 +98,13 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", + *, formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, suffix: str = "", - *, legacy: _Legacy | None = None, ) -> str: ... From b928580d9f596d1e003479dcdbecd368ce8459b9 Mon Sep 17 00:00:00 2001 From: Trey Cole Date: Mon, 3 Nov 2025 22:08:21 -0500 Subject: [PATCH 0800/1718] MAINT: fix math markup (\times -> \\times) in numpy.linalg.multidot docstring Notes section --- numpy/linalg/_linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index c3cbf3d0ef98..cfcaef13daf2 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2938,7 +2938,7 @@ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Assume we have three matrices - :math:`A_{10 \times 100}, B_{100 \times 5}, C_{5 \times 50}`. + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. The costs for the two different parenthesizations are as follows:: From 0fdb1979cdfcdd31b61f423b66a384fabd1d7fd7 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 4 Nov 2025 12:24:28 +0200 Subject: [PATCH 0801/1718] BLD: use scipy-openblas 0.3.30.7 (#30132) --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 19e6b119b73f..824934787e10 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.15 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.6 +scipy-openblas32==0.3.30.0.7 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 605132ee720a..37e685fef0cc 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.15 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.6 -scipy-openblas64==0.3.30.0.6 +scipy-openblas32==0.3.30.0.7 +scipy-openblas64==0.3.30.0.7 From 3b44dc061343cdfb1f15bf7d8280a844aa994b76 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 4 Nov 2025 17:40:35 +0100 Subject: [PATCH 0802/1718] BUG, DOC, TYP: ``empty`` and ``zeros`` runtime signatures, and missing ``device`` parameter docs (#30140) * BUG: ``empty`` and ``zeros`` runtime signatures * TYP: `empty`, `zeros`, and `ones` parameter defaults and minor shape-type fix * TST: Update `_convert2ma` test signature for `np.ma.empty` --- numpy/_core/_add_newdocs.py | 26 ++-- numpy/_core/multiarray.pyi | 112 ++++++++++-------- numpy/_core/tests/test_numeric.py | 31 +++++ numpy/ma/tests/test_core.py | 8 +- .../tests/data/reveal/array_constructors.pyi | 5 +- 5 files changed, 121 insertions(+), 61 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index f02ab63c146c..bdb84d78bb27 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1293,7 +1293,10 @@ add_newdoc('numpy._core.multiarray', 'empty', """ - empty(shape, dtype=float, order='C', *, device=None, like=None) + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, without initializing entries. @@ -1306,8 +1309,7 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -1363,11 +1365,14 @@ string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. - """) + """) # sufficient null bytes for all number dtypes add_newdoc('numpy._core.multiarray', 'zeros', """ - zeros(shape, dtype=float, order='C', *, like=None) + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) Return a new array of given shape and type, filled with zeros. @@ -1380,8 +1385,12 @@ `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major - (C-style) or column-major (Fortran-style) order in - memory. + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1423,7 +1432,8 @@ """) add_newdoc('numpy._core.multiarray', 'set_typeDict', - """set_typeDict(dict) + """ + set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index dd47e8e872d7..ec56f3429892 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,6 +1,6 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt -from _typeshed import StrOrBytesPath, SupportsLenAndGetItem +from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -10,9 +10,7 @@ from typing import ( Protocol, SupportsIndex, TypeAlias, - TypedDict, TypeVar, - Unpack, final, overload, type_check_only, @@ -32,7 +30,6 @@ from numpy import ( # type: ignore[attr-defined] _SupportsBuffer, _SupportsFileMethods, broadcast, - # Re-exports busdaycalendar, complexfloating, correlate, @@ -54,7 +51,6 @@ from numpy import ( # type: ignore[attr-defined] signedinteger, str_, timedelta64, - # The rest ufunc, uint8, unsignedinteger, @@ -62,10 +58,9 @@ from numpy import ( # type: ignore[attr-defined] ) from numpy._typing import ( ArrayLike, - # DTypes DTypeLike, - # Arrays NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeBytes_co, @@ -82,7 +77,6 @@ from numpy._typing import ( _IntLike_co, _NestedSequence, _ScalarLike_co, - # Shapes _Shape, _ShapeLike, _SupportsArrayFunc, @@ -192,18 +186,15 @@ __all__ = [ _ScalarT = TypeVar("_ScalarT", bound=generic) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_ArrayT_co = TypeVar( - "_ArrayT_co", - bound=ndarray[Any, Any], - covariant=True, -) +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +# TODO: fix the names of these typevars _ReturnType = TypeVar("_ReturnType") _IDType = TypeVar("_IDType") _Nin = TypeVar("_Nin", bound=int) _Nout = TypeVar("_Nout", bound=int) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) _Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] _Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] @@ -236,11 +227,6 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded class _SupportsArray(Protocol[_ArrayT_co]): def __array__(self, /) -> _ArrayT_co: ... -@type_check_only -class _KwargsEmpty(TypedDict, total=False): - device: L["cpu"] | None - like: _SupportsArrayFunc | None - @type_check_only class _ConstructorEmpty(Protocol): # 1-D shape @@ -250,8 +236,10 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload def __call__( @@ -259,8 +247,10 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> ndarray[tuple[int], _DTypeT]: ... @overload def __call__( @@ -268,18 +258,22 @@ class _ConstructorEmpty(Protocol): /, shape: SupportsIndex, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array1D[_ScalarT]: ... @overload def __call__( self, /, shape: SupportsIndex, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[Incomplete]: ... # known shape @overload @@ -288,8 +282,10 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, float64]: ... @overload def __call__( @@ -297,8 +293,10 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> ndarray[_AnyShapeT, _DTypeT]: ... @overload def __call__( @@ -306,18 +304,22 @@ class _ConstructorEmpty(Protocol): /, shape: _AnyShapeT, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> _Array[_AnyShapeT, _ScalarT]: ... @overload def __call__( self, /, shape: _AnyShapeT, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_AnyShapeT, Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, Incomplete]: ... # unknown shape @overload @@ -325,34 +327,42 @@ class _ConstructorEmpty(Protocol): self, /, shape: _ShapeLike, dtype: None = None, - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload def __call__( self, /, shape: _ShapeLike, dtype: _DTypeT | _SupportsDType[_DTypeT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[Any, _DTypeT]: ... + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShape, _DTypeT]: ... @overload def __call__( self, /, shape: _ShapeLike, dtype: type[_ScalarT], - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload def __call__( self, /, shape: _ShapeLike, - dtype: DTypeLike | None = ..., - order: _OrderCF = ..., - **kwargs: Unpack[_KwargsEmpty], - ) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[Incomplete]: ... # using `Final` or `TypeAlias` will break stubtest error = Exception diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index fb5a37c33126..9af89c6ff391 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1,3 +1,4 @@ +import inspect import itertools import math import platform @@ -18,6 +19,7 @@ from numpy.random import rand, randint, randn from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_WASM, assert_, assert_almost_equal, @@ -3371,6 +3373,35 @@ def test_for_reference_leak(self): np.full([dim] * 10, 0) assert_(sys.getrefcount(dim) == beg) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + class TestLikeFuncs: '''Test ones_like, zeros_like, empty_like and full_like''' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 0e4d42da0a5b..b978b25f5827 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5982,7 +5982,13 @@ def test_frommethod_signature(fn, signature): @pytest.mark.parametrize( ('fn', 'signature'), [ - (np.ma.empty, "(*args, fill_value=None, hardmask=False, **kwargs)"), + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), (np.ma.empty_like, "(*args, **kwargs)"), (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), ( diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2d8b5fa3fd17..1d970ef68de5 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -189,7 +189,10 @@ assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64] assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) -assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) From e431aaaa7d465e47ebc39646fe0cf88759c7b728 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 07:34:09 +0100 Subject: [PATCH 0803/1718] BUG: ``_core.multiarray.*`` function runtime signatures --- numpy/_core/multiarray.py | 73 ++++++++++++++++------------ numpy/_core/overrides.py | 11 +++-- numpy/_core/tests/test_multiarray.py | 34 +++++++++++++ 3 files changed, 84 insertions(+), 34 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 5599494720b6..a5995bd829bf 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -114,11 +114,20 @@ def _override___module__(): @array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) def empty_like( - prototype, dtype=None, order=None, subok=None, shape=None, *, device=None + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None ): """ - empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, - device=None) + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- Return a new array with the same shape and type as a given array. @@ -186,15 +195,18 @@ def empty_like( @array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) -def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): """ concatenate( - (a1, a2, ...), + arrays, + /, axis=0, out=None, + *, dtype=None, - casting="same_kind" + casting="same_kind", ) + -- Join a sequence of arrays along an existing axis. @@ -295,7 +307,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) -def inner(a, b): +def inner(a, b, /): """ inner(a, b, /) @@ -389,7 +401,7 @@ def inner(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) -def where(condition, x=None, y=None): +def where(condition, x=None, y=None, /): """ where(condition, [x, y], /) @@ -465,7 +477,7 @@ def where(condition, x=None, y=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) -def lexsort(keys, axis=None): +def lexsort(keys, axis=-1): """ lexsort(keys, axis=-1) @@ -586,7 +598,7 @@ def lexsort(keys, axis=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) -def can_cast(from_, to, casting=None): +def can_cast(from_, to, casting="safe"): """ can_cast(from_, to, casting='safe') @@ -648,7 +660,7 @@ def can_cast(from_, to, casting=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) -def min_scalar_type(a): +def min_scalar_type(a, /): """ min_scalar_type(a, /) @@ -862,7 +874,7 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) -def vdot(a, b): +def vdot(a, b, /): r""" vdot(a, b, /) @@ -925,7 +937,7 @@ def vdot(a, b): @array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) -def bincount(x, weights=None, minlength=None): +def bincount(x, /, weights=None, minlength=0): """ bincount(x, /, weights=None, minlength=0) @@ -1001,7 +1013,7 @@ def bincount(x, weights=None, minlength=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) -def ravel_multi_index(multi_index, dims, mode=None, order=None): +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): """ ravel_multi_index(multi_index, dims, mode='raise', order='C') @@ -1059,7 +1071,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) -def unravel_index(indices, shape=None, order=None): +def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') @@ -1104,7 +1116,7 @@ def unravel_index(indices, shape=None, order=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) -def copyto(dst, src, casting=None, where=None): +def copyto(dst, src, casting="same_kind", where=True): """ copyto(dst, src, casting='same_kind', where=True) @@ -1156,7 +1168,7 @@ def copyto(dst, src, casting=None, where=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) def putmask(a, /, mask, values): """ - putmask(a, mask, values) + putmask(a, /, mask, values) Changes elements of an array based on conditional and input values. @@ -1200,7 +1212,7 @@ def putmask(a, /, mask, values): @array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) -def packbits(a, axis=None, bitorder='big'): +def packbits(a, /, axis=None, bitorder="big"): """ packbits(a, /, axis=None, bitorder='big') @@ -1257,7 +1269,7 @@ def packbits(a, axis=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) -def unpackbits(a, axis=None, count=None, bitorder='big'): +def unpackbits(a, /, axis=None, count=None, bitorder="big"): """ unpackbits(a, /, axis=None, count=None, bitorder='big') @@ -1337,9 +1349,9 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): @array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) -def shares_memory(a, b, max_work=None): +def shares_memory(a, b, /, max_work=-1): """ - shares_memory(a, b, /, max_work=None) + shares_memory(a, b, /, max_work=-1) Determine if two arrays share memory. @@ -1416,9 +1428,9 @@ def shares_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) -def may_share_memory(a, b, max_work=None): +def may_share_memory(a, b, /, max_work=0): """ - may_share_memory(a, b, /, max_work=None) + may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory @@ -1458,14 +1470,14 @@ def may_share_memory(a, b, max_work=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) -def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): """ is_busday( dates, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) Calculates which of the given dates are valid days, and which are not. @@ -1517,7 +1529,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) -def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, busdaycal=None, out=None): """ busday_offset( @@ -1527,7 +1539,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, weekmask='1111100', holidays=None, busdaycal=None, - out=None + out=None, ) First adjusts the date to fall on a valid day according to @@ -1619,7 +1631,7 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, @array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) -def busday_count(begindates, enddates, weekmask=None, holidays=None, +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), busdaycal=None, out=None): """ busday_count( @@ -1692,9 +1704,8 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, return (begindates, enddates, weekmask, holidays, out) -@array_function_from_c_func_and_dispatcher( - _multiarray_umath.datetime_as_string) -def datetime_as_string(arr, unit=None, timezone=None, casting=None): +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6414710ae900..6d5e7750b09b 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -1,6 +1,7 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools +import inspect from numpy._core._multiarray_umath import ( _ArrayFunctionDispatcher, @@ -156,11 +157,15 @@ def decorator(implementation): "argument and a keyword-only argument. " f"{implementation} does not seem to comply.") - if docs_from_dispatcher: - add_docstring(implementation, dispatcher.__doc__) + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) public_api = _ArrayFunctionDispatcher(dispatcher, implementation) - public_api = functools.wraps(implementation)(public_api) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) if module is not None: public_api.__module__ = module diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 38e710179067..d62c2018292c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10902,3 +10902,37 @@ def test_array_method_signatures(methodname: str): assert "self" in sig.parameters assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) +def test_c_func_dispatcher_text_signature(func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") + + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") + + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, np.result_type, + np.dot, np.vdot, np.bincount, np.ravel_multi_index, np.unravel_index, np.copyto, + np.putmask, np.packbits, np.unpackbits, np.shares_memory, np.may_share_memory, + np.is_busday, np.busday_offset, np.busday_count, np.datetime_as_string, + ], +) +def test_c_func_dispatcher_signature(func): + sig = inspect.signature(func) + + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters From 0d7844eef4ad646ed42852ddcc8f29f45466cbc0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:39:04 +0100 Subject: [PATCH 0804/1718] TYP: ``_core.multiarray.*`` function signature updates --- numpy/_core/multiarray.pyi | 350 ++++++++---------- numpy/lib/_index_tricks_impl.pyi | 61 ++- numpy/ma/core.pyi | 8 +- numpy/typing/tests/data/fail/multiarray.pyi | 4 +- numpy/typing/tests/data/pass/multiarray.py | 5 +- numpy/typing/tests/data/reveal/multiarray.pyi | 15 +- 6 files changed, 239 insertions(+), 204 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ec56f3429892..c1a5bb7567fe 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -18,7 +18,7 @@ from typing import ( from typing_extensions import CapsuleType import numpy as np -from numpy import ( # type: ignore[attr-defined] +from numpy import ( _AnyShapeT, _CastingKind, _CopyMode, @@ -413,43 +413,47 @@ empty: Final[_ConstructorEmpty] = ... @overload def empty_like( prototype: _ArrayT, + /, dtype: None = None, - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def empty_like( prototype: _ArrayLike[_ScalarT], + /, dtype: None = None, - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, + prototype: Incomplete, + /, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def empty_like( - prototype: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... @overload def array( @@ -536,44 +540,44 @@ def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF # NOTE: Allow any sequence of array-like objects @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: _ArrayLike[_ScalarT], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, - casting: _CastingKind | None = ... + casting: _CastingKind | None = "same_kind", ) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: _DTypeLike[_ScalarT], - casting: _CastingKind | None = ... + casting: _CastingKind | None = "same_kind", ) -> NDArray[_ScalarT]: ... @overload -def concatenate( # type: ignore[misc] +def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, out: None = None, *, dtype: DTypeLike | None = None, - casting: _CastingKind | None = ... -) -> NDArray[Any]: ... + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], /, - axis: SupportsIndex | None = ..., + axis: SupportsIndex | None = 0, *, out: _ArrayT, - dtype: DTypeLike | None = ..., - casting: _CastingKind | None = ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", ) -> _ArrayT: ... @overload def concatenate( @@ -582,115 +586,80 @@ def concatenate( axis: SupportsIndex | None, out: _ArrayT, *, - dtype: DTypeLike | None = ..., - casting: _CastingKind | None = ... + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", ) -> _ArrayT: ... -def inner( - a: ArrayLike, - b: ArrayLike, - /, -) -> Any: ... +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @overload -def where( - condition: ArrayLike, - /, -) -> tuple[NDArray[intp], ...]: ... +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload -def where( - condition: ArrayLike, - x: ArrayLike, - y: ArrayLike, - /, -) -> NDArray[Any]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... -def lexsort( - keys: ArrayLike, - axis: SupportsIndex | None = ..., -) -> Any: ... +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... -def can_cast( - from_: ArrayLike | DTypeLike, - to: DTypeLike, - casting: _CastingKind | None = ..., -) -> bool: ... +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... def min_scalar_type(a: ArrayLike, /) -> dtype: ... - def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Any: ... +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... @overload -def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload -def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... @overload -def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc] +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... @overload -def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... @overload -def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc] +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... @overload def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... @overload -def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload -def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount( - x: ArrayLike, - /, - weights: ArrayLike | None = ..., - minlength: SupportsIndex = ..., -) -> NDArray[intp]: ... +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... -def copyto( - dst: NDArray[Any], - src: ArrayLike, - casting: _CastingKind | None = ..., - where: _ArrayLikeBool_co | None = ..., -) -> None: ... +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -def putmask( - a: NDArray[Any], - /, - mask: _ArrayLikeBool_co, - values: ArrayLike, -) -> None: ... +_BitOrder: TypeAlias = L["big", "little"] -def packbits( - a: _ArrayLikeInt_co, - /, - axis: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., -) -> NDArray[uint8]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... +@overload def unpackbits( a: _ArrayLike[uint8], /, - axis: SupportsIndex | None = ..., - count: SupportsIndex | None = ..., - bitorder: L["big", "little"] = ..., + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -def shares_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +_MaxWork: TypeAlias = L[-1, 0] -def may_share_memory( - a: object, - b: object, - /, - max_work: int | None = ..., -) -> bool: ... +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload def asarray( @@ -1096,175 +1065,180 @@ def datetime_data( # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here +_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] +_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] + @overload -def busday_count( # type: ignore[misc] +def busday_count( begindates: _ScalarLike_co | dt.date, enddates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, out: None = None, ) -> int_: ... @overload -def busday_count( # type: ignore[misc] - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[int_]: ... @overload def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def busday_count( - begindates: ArrayLike | dt.date | _NestedSequence[dt.date], - enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: datetime64 | dt.date, offsets: _TD64Like_co | dt.timedelta, - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], - roll: L["raise"] = ..., - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload -def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], - offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"], weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... @overload -def busday_offset( # type: ignore[misc] +def busday_offset( dates: _ScalarLike_co | dt.date, offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> datetime64: ... @overload -def busday_offset( # type: ignore[misc] - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[datetime64]: ... @overload def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def busday_offset( - dates: ArrayLike | dt.date | _NestedSequence[dt.date], - offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, roll: _RollKind, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: _ScalarLike_co | dt.date, - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> np.bool: ... @overload -def is_busday( # type: ignore[misc] +def is_busday( dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, out: None = None, ) -> NDArray[np.bool]: ... @overload def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], - weekmask: ArrayLike = ..., - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., - busdaycal: busdaycalendar | None = ..., + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, *, out: _ArrayT, ) -> _ArrayT: ... @overload def is_busday( - dates: ArrayLike | _NestedSequence[dt.date], + dates: ArrayLike | _ToDates, weekmask: ArrayLike, - holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None, + holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, out: _ArrayT, ) -> _ArrayT: ... +_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo + @overload -def datetime_as_string( # type: ignore[misc] +def datetime_as_string( arr: datetime64 | dt.date, - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> str_: ... @overload def datetime_as_string( arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], - unit: L["auto"] | _UnitKind | None = ..., - timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., - casting: _CastingKind = ..., + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", ) -> NDArray[str_]: ... @overload diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 8f12e4a36d99..570688fe8d62 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,4 +1,4 @@ -from _typeshed import Incomplete +from _typeshed import Incomplete, SupportsLenAndGetItem from collections.abc import Sequence from typing import ( Any, @@ -14,11 +14,15 @@ from typing import ( from typing_extensions import TypeVar import numpy as np +from numpy import _CastingKind from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _AnyShape, + _ArrayLike, + _DTypeLike, _FiniteNestedSequence, _NestedSequence, _SupportsArray, @@ -149,13 +153,62 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] def __getitem__(self, key: Incomplete, /) -> Incomplete: ... def __len__(self, /) -> L[0]: ... - # + # Keep in sync with _core.multiarray.concatenate + @staticmethod + @overload + def concatenate( + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[Incomplete]: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 84e9b0660daf..57959ded47bc 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3431,6 +3431,7 @@ def empty( @overload def empty_like( a: _MArrayT, + /, dtype: None = None, order: _OrderKACF = "K", subok: bool = True, @@ -3441,6 +3442,7 @@ def empty_like( @overload def empty_like( a: _ArrayLike[_ScalarT], + /, dtype: None = None, order: _OrderKACF = "K", subok: bool = True, @@ -3450,7 +3452,8 @@ def empty_like( ) -> _MaskedArray[_ScalarT]: ... @overload def empty_like( - a: object, + a: Incomplete, + /, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = "K", subok: bool = True, @@ -3460,7 +3463,8 @@ def empty_like( ) -> _MaskedArray[_ScalarT]: ... @overload def empty_like( - a: object, + a: Incomplete, + /, dtype: DTypeLike | None = None, order: _OrderKACF = "K", subok: bool = True, diff --git a/numpy/typing/tests/data/fail/multiarray.pyi b/numpy/typing/tests/data/fail/multiarray.pyi index 1f9ef6894bad..51128dfbf6f7 100644 --- a/numpy/typing/tests/data/fail/multiarray.pyi +++ b/numpy/typing/tests/data/fail/multiarray.pyi @@ -26,10 +26,10 @@ np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] np.packbits(AR_f8) # type: ignore[arg-type] -np.packbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.unpackbits(AR_i8) # type: ignore[arg-type] -np.unpackbits(AR_u1, bitorder=">") # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/multiarray.py b/numpy/typing/tests/data/pass/multiarray.py index 26cedfd77566..3a505590b5d3 100644 --- a/numpy/typing/tests/data/pass/multiarray.py +++ b/numpy/typing/tests/data/pass/multiarray.py @@ -70,7 +70,8 @@ np.unpackbits(AR_u1) np.shares_memory(1, 2) -np.shares_memory(AR_f8, AR_f8, max_work=1) +np.shares_memory(AR_f8, AR_f8, max_work=-1) np.may_share_memory(1, 2) -np.may_share_memory(AR_f8, AR_f8, max_work=1) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 6ba3fcde632f..424f60df27e7 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -66,7 +66,7 @@ assert_type(np.inner(AR_f8, AR_i8), Any) assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) -assert_type(np.lexsort([0, 1, 2]), Any) +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) assert_type(np.can_cast(np.dtype("i8"), int), bool) assert_type(np.can_cast(AR_f8, "f8"), bool) @@ -94,16 +94,19 @@ assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) -assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) -assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) -assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) assert_type(np.shares_memory(1, 2), bool) -assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) assert_type(np.may_share_memory(1, 2), bool) -assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) assert_type(np.promote_types(np.int32, np.int64), np.dtype) assert_type(np.promote_types("f4", float), np.dtype) From 93864e12bff826b56c6d7d9a572ca07420f9b37d Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:45:54 +0100 Subject: [PATCH 0805/1718] TST: Update `_convert2ma` test signature for `np.ma.empty_like` --- numpy/ma/tests/test_core.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index b978b25f5827..a082f8aa7450 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -5989,7 +5989,13 @@ def test_frommethod_signature(fn, signature): "fill_value=None, hardmask=False)" ), ), - (np.ma.empty_like, "(*args, **kwargs)"), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), ( np.ma.identity, From 91b4a3d326090da711a07ad4e8d403055a622a2b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 08:57:12 +0100 Subject: [PATCH 0806/1718] TYP: ignore a python-version-dependent mypy false-positive --- numpy/_core/multiarray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index c1a5bb7567fe..ff22fc312a11 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -18,7 +18,7 @@ from typing import ( from typing_extensions import CapsuleType import numpy as np -from numpy import ( +from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _AnyShapeT, _CastingKind, _CopyMode, From 7aa023f008969a43648aaf4c3e707349abc7a2f7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 4 Nov 2025 23:57:32 +0100 Subject: [PATCH 0807/1718] BUG: ``broadcast`` runtime signatures (#30137) --- numpy/_core/_add_newdocs.py | 8 ++++++++ numpy/_core/tests/test_numeric.py | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index bdb84d78bb27..afc5307eeacc 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -638,6 +638,9 @@ add_newdoc('numpy._core', 'broadcast', """ + broadcast(*arrays) + -- + Produce an object that mimics broadcasting. Parameters @@ -807,8 +810,13 @@ """)) +# methods + add_newdoc('numpy._core', 'broadcast', ('reset', """ + reset($self, /) + -- + reset() Reset the broadcasted result's iterator(s). diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 9af89c6ff391..485147500461 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -4205,6 +4205,19 @@ def test_shape_mismatch_error_message(self): r"arg 2 with shape \(2,\)"): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + class TestKeepdims: From 7388112cf643238dad828677455d4722a8382bec Mon Sep 17 00:00:00 2001 From: diego_atencia <53157128+alektebel@users.noreply.github.com> Date: Wed, 5 Nov 2025 10:14:19 +0100 Subject: [PATCH 0808/1718] DOC: Fix Returns section formatting in linalg.qr and linalg.svd (#30148) * DOC: Fix Returns section formatting in linalg.qr and linalg.svd Move initial return information from Returns section to Notes section to comply with numpydoc standards and fix rendering issues. Fixes #29779 [skip actions][skip azp][skip cirrus] Co-authored-by: Ross Barnowski Co-authored-by: Matti Picus --- numpy/linalg/_linalg.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index cfcaef13daf2..6884c9b7ef8d 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -995,9 +995,6 @@ def qr(a, mode='reduced'): Returns ------- - When mode is 'reduced' or 'complete', the result will be a namedtuple with - the attributes `Q` and `R`. - Q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not @@ -1026,6 +1023,9 @@ def qr(a, mode='reduced'): Notes ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, ``dorgqr``, and ``zungqr``. @@ -1696,9 +1696,6 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Returns ------- - When `compute_uv` is True, the result is a namedtuple with the following - attribute names: - U : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions @@ -1726,6 +1723,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Notes ----- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. + The decomposition is performed using LAPACK routine ``_gesdd``. SVD is usually described for the factorization of a 2D matrix :math:`A`. From ee591622a397b361f7cf6a136fc32c03a4e2bea0 Mon Sep 17 00:00:00 2001 From: "T.Yamada" Date: Wed, 5 Nov 2025 20:41:55 +0900 Subject: [PATCH 0809/1718] FIX: Not show signature in git_version (#30149) --- numpy/_build_utils/gitversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 7975dd9dba65..47dd71d1567b 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -28,7 +28,7 @@ def git_version(version): git_hash = '' try: p = subprocess.Popen( - ['git', 'log', '-1', '--format="%H %aI"'], + ['git', '-c', 'log.showSignature=false', 'log', '-1', '--format="%H %aI"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__), From 05eab252df4918ea06c06250cf7d6b054d89fb1c Mon Sep 17 00:00:00 2001 From: Michael Davidsaver Date: Wed, 5 Nov 2025 11:26:19 -0800 Subject: [PATCH 0810/1718] BUG: decref on error in PyArray_NewFromDescr (#30152) --- numpy/_core/src/multiarray/ctors.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 76aa2fca86e7..63434b6b5954 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -992,15 +992,16 @@ PyArray_NewFromDescr( int nd, npy_intp const *dims, npy_intp const *strides, void *data, int flags, PyObject *obj) { - if (subtype == NULL) { + if (descr == NULL) { PyErr_SetString(PyExc_ValueError, - "subtype is NULL in PyArray_NewFromDescr"); + "descr is NULL in PyArray_NewFromDescr"); return NULL; } - if (descr == NULL) { + if (subtype == NULL) { PyErr_SetString(PyExc_ValueError, - "descr is NULL in PyArray_NewFromDescr"); + "subtype is NULL in PyArray_NewFromDescr"); + Py_DECREF(descr); return NULL; } From 83b118ea17a3e984853ef26eb4a3bfe73dcf977d Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Wed, 5 Nov 2025 13:05:20 -0800 Subject: [PATCH 0811/1718] BUG: update requires to requirements in numpy.multiarray see #30142 --- numpy/_core/src/multiarray/ctors.c | 14 +++++++------- numpy/_core/src/multiarray/ctors.h | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 76aa2fca86e7..407e02d48103 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1793,7 +1793,7 @@ cleanup:; */ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { npy_dtype_info dt_info = {NULL, NULL}; @@ -1814,7 +1814,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, } PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requires, + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, context); Py_XDECREF(dt_info.descr); @@ -1829,11 +1829,11 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context) + int max_depth, int requirements, PyObject *context) { PyObject *obj; Py_XINCREF(in_descr); /* take ownership as we may replace it */ - if (requires & NPY_ARRAY_NOTSWAPPED) { + if (requirements & NPY_ARRAY_NOTSWAPPED) { if (!in_descr && PyArray_Check(op)) { in_descr = PyArray_DESCR((PyArrayObject *)op); Py_INCREF(in_descr); @@ -1848,16 +1848,16 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requires, context, &was_scalar); + max_depth, requirements, context, &was_scalar); Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } - if ((requires & NPY_ARRAY_ELEMENTSTRIDES) + if ((requirements & NPY_ARRAY_ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *ret; - if (requires & NPY_ARRAY_ENSURENOCOPY) { + if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index 094589968b66..b7a60e0065e0 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -68,11 +68,11 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, - int max_depth, int requires, PyObject *context); + int max_depth, int requirements, PyObject *context); NPY_NO_EXPORT PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags); From 7497f61ce72267a02e4de50026826af025f06426 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 23:57:29 +0100 Subject: [PATCH 0812/1718] BUG, DOC: ``ndarray`` dunder method runtime signatures and missing docs --- numpy/_core/_add_newdocs.py | 145 +++++++++++++++++++++------ numpy/_core/src/multiarray/methods.c | 9 +- numpy/_core/tests/test_multiarray.py | 6 +- 3 files changed, 125 insertions(+), 35 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index afc5307eeacc..bfb3e09d195b 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2315,8 +2315,10 @@ add_newdoc('numpy._core.multiarray', 'ndarray', """ - ndarray(shape, dtype=float, buffer=None, offset=0, - strides=None, order=None) + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2339,6 +2341,7 @@ Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional @@ -2456,21 +2459,6 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """ - a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - - DLPack Protocol: Part of the Array API. - - """)) - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """ - a.__dlpack_device__() - - DLPack Protocol: Part of the Array API. - - """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -3084,28 +3072,50 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', """ - __array_wrap__($self, array, context=None, return_scalar=True, /) + __array_function__($self, /, func, types, args, kwargs) -- - a.__array_wrap__(array[, context[, return_scalar]], /) + a.__array_function__(func, types, args, kwargs) - Returns a view of `array` with the same type as self. + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_namespace__', """ - __copy__($self, /) + __array_namespace__($self, /, *, api_version=None) -- - a.__copy__() + a.__array_namespace__(*, api_version=None) - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + For Array API compatibility. + + """)) - Equivalent to ``a.copy(order='K')``. + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', + """ + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- + + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) + + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. """)) @@ -3115,7 +3125,7 @@ __class_getitem__($cls, item, /) -- - a.__class_getitem__(item, /) + ndarray[shape, dtype] Return a parametrized wrapper around the `~numpy.ndarray` type. @@ -3128,11 +3138,10 @@ Examples -------- - >>> from typing import Any >>> import numpy as np - >>> np.ndarray[Any, np.dtype[np.uint8]] - numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] See Also -------- @@ -3143,6 +3152,20 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', + """ + __copy__($self, /) + -- + + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', """ __deepcopy__($self, memo, /) @@ -3155,6 +3178,31 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', + """ + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- + + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', """ __reduce__($self, /) @@ -3167,6 +3215,18 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', """ __setstate__($self, state, /) @@ -4576,6 +4636,31 @@ """)) +add_newdoc('numpy._core.multiarray', 'ndarray', ('to_device', + """ + to_device($self, device, /, *, stream=None) + -- + + a.to_device(device, /, *, stream=None) + + For Array API compatibility. Since NumPy only supports CPU arrays, this + method is a no-op that returns the same array. + + Parameters + ---------- + device : "cpu" + Must be ``"cpu"``. + stream : None, optional + Currently unsupported. + + Returns + ------- + out : Self + Returns the same array. + + """)) + + add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', """ tofile($self, fid, /, sep='', format='%s') diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index c856fc52742d..26b5a6e179e0 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2899,7 +2899,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, - METH_NOARGS, NULL}, + METH_NOARGS, + "__sizeof__($self, /)\n--\n\nSize in memory."}, /* for the copy module */ {"__copy__", @@ -2928,11 +2929,13 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"__complex__", (PyCFunction) array_complex, - METH_VARARGS, NULL}, + METH_VARARGS, + "__complex__($self, /)\n--\n\ncomplex(self)"}, {"__format__", (PyCFunction) array_format, - METH_VARARGS, NULL}, + METH_VARARGS, + "__format__($self, spec, /)\n--\n\nformat(self[, spec])"}, {"__class_getitem__", (PyCFunction)array_class_getitem, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d62c2018292c..1b221356512c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10880,8 +10880,9 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.parametrize( "methodname", [ - "__array__", "__array_finalize__", "__array_wrap__", - "__copy__", "__deepcopy__", "__reduce__", "__setstate__", + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", @@ -10889,6 +10890,7 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", ], ) def test_array_method_signatures(methodname: str): From 641001177b485bac38cf048615d1802caaf07ba3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 00:21:20 +0100 Subject: [PATCH 0813/1718] DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray`` (#30057) --- numpy/ma/core.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 95bec501dc73..f60b42663e00 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8629,9 +8629,6 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F'}, optional - Whether to use row-major ('C') or column-major ('FORTRAN') memory - representation. Default is 'C'. Returns ------- From 3a35b7c56d0d7c5ef5b7806c6bde336a58857bbe Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 00:28:28 +0100 Subject: [PATCH 0814/1718] BUG: array construction function runtime signatures (#30138) --- numpy/_core/_add_newdocs.py | 45 ++++++++++++++++++++-------- numpy/_core/tests/test_multiarray.py | 26 ++++++++++++++++ 2 files changed, 58 insertions(+), 13 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index afc5307eeacc..4fb3c645cd85 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -855,6 +855,19 @@ add_newdoc('numpy._core.multiarray', 'array', """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=None, + like=None, + ) + -- + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, ndmax=None, like=None) @@ -933,7 +946,7 @@ ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. - copy: Return an array copy of the given object. + copy : Return an array copy of the given object. Notes @@ -1004,6 +1017,9 @@ add_newdoc('numpy._core.multiarray', 'asarray', """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an array. @@ -1053,12 +1069,10 @@ -------- asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1097,6 +1111,9 @@ add_newdoc('numpy._core.multiarray', 'asanyarray', """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1148,13 +1165,10 @@ -------- asarray : Similar function which always returns ndarrays. ascontiguousarray : Convert input to a contiguous array. - asfortranarray : Convert input to an ndarray with column-major - memory order. - asarray_chkfinite : Similar function which checks input for NaNs and - Infs. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. fromiter : Create an array from an iterator. - fromfunction : Construct an array by executing a function on grid - positions. + fromfunction : Construct an array by executing a function on grid positions. Examples -------- @@ -1175,6 +1189,9 @@ add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + ascontiguousarray(a, dtype=None, *, like=None) Return a contiguous array (ndim >= 1) in memory (C order). @@ -1197,8 +1214,7 @@ See Also -------- - asfortranarray : Convert input to an ndarray with column-major - memory order. + asfortranarray : Convert input to an ndarray with column-major memory order. require : Return an ndarray that satisfies requirements. ndarray.flags : Information about the memory layout of the array. @@ -1238,6 +1254,9 @@ add_newdoc('numpy._core.multiarray', 'asfortranarray', """ + asfortranarray(a, dtype=None, *, like=None) + -- + asfortranarray(a, dtype=None, *, like=None) Return an array (ndim >= 1) laid out in Fortran order in memory. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index d62c2018292c..87c2a598437e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -591,6 +591,32 @@ def test_array_as_keyword(self, func): else: func(a=3) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", + [np.array, + np.asarray, + np.asanyarray, + np.ascontiguousarray, + np.asfortranarray]) + def test_array_signature(self, func): + sig = inspect.signature(func) + + assert len(sig.parameters) >= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + class TestAssignment: def test_assignment_broadcasting(self): From a90a3423c221f2b0899fc66e352d82bc4385c5af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Nov 2025 07:44:10 +0100 Subject: [PATCH 0815/1718] TYP: fix an invalid default value for `array`'s `ndmax` parameter --- numpy/_core/_add_newdocs.py | 9 +++++---- numpy/_core/multiarray.pyi | 10 +++++----- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e5939be54bcc..99700c61ed65 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -863,13 +863,13 @@ order='K', subok=False, ndmin=0, - ndmax=None, + ndmax=0, like=None, ) -- array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, - ndmax=None, like=None) + ndmax=0, like=None) Create an array. @@ -920,8 +920,9 @@ needed to meet this requirement. ndmax : int, optional Specifies the maximum number of dimensions to create when inferring - shape from nested sequences. By default, NumPy recurses through all - nesting levels (up to the compile-time constant ``NPY_MAXDIMS``). + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). Setting ``ndmax`` stops recursion at the specified depth, preserving deeper nested structures as objects instead of promoting them to higher-dimensional arrays. In this case, ``dtype=object`` is required. diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ff22fc312a11..35acc4dad5dd 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -464,7 +464,7 @@ def array( order: _OrderKACF = "K", subok: L[True], ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload @@ -476,7 +476,7 @@ def array( order: _OrderKACF = "K", subok: L[True], ndmin: L[0] = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> _ArrayT: ... @overload @@ -488,7 +488,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload @@ -500,7 +500,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[_ScalarT]: ... @overload @@ -512,7 +512,7 @@ def array( order: _OrderKACF = "K", subok: bool = False, ndmin: int = 0, - ndmax: int | None = None, + ndmax: int = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... From 32817ccf7812662e99508e82ce168c8ffc55ac2c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 21:07:56 +0100 Subject: [PATCH 0816/1718] BUG: ``_core.multiarray`` function runtime signatures --- numpy/_core/_add_newdocs.py | 61 +++++++++++-- numpy/_core/tests/test_multiarray.py | 127 ++++++++++++++++----------- 2 files changed, 127 insertions(+), 61 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index e5939be54bcc..46450e14be40 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -570,6 +570,18 @@ add_newdoc('numpy._core', 'nested_iters', """ + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', buffersize=0) @@ -1469,6 +1481,9 @@ add_newdoc('numpy._core.multiarray', 'fromstring', """ + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1478,7 +1493,7 @@ string : str A string containing the data. dtype : data-type, optional - The data type of the array; default: float. For binary input data, + The data type of the array; default: `numpy.float64`. For binary input data, the data must be in exactly this format. Most builtin numeric types are supported and extension types may be supported. count : int, optional @@ -1529,6 +1544,9 @@ add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + compare_chararrays(a1, a2, cmp, rstrip) Performs element-wise comparison of two string arrays using the @@ -1540,20 +1558,20 @@ Arrays to be compared. cmp : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. - rstrip : Boolean - If True, the spaces at the end of Strings are removed before the comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. Returns ------- out : ndarray - The output array of type Boolean with the same shape as a and b. + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. Raises ------ ValueError If `cmp` is not valid. TypeError - If at least one of `a` or `b` is a non-string array + If at least one of `a1` or `a2` is a non-string array Examples -------- @@ -1567,6 +1585,9 @@ add_newdoc('numpy._core.multiarray', 'fromiter', """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + fromiter(iter, dtype, count=-1, *, like=None) Create a new 1-dimensional array from an iterable object. @@ -1622,6 +1643,9 @@ add_newdoc('numpy._core.multiarray', 'fromfile', """ + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1707,6 +1731,9 @@ add_newdoc('numpy._core.multiarray', 'frombuffer', """ + frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) + -- + frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1716,7 +1743,7 @@ buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional - Data-type of the returned array; default: float. + Data-type of the returned array. Default is `numpy.float64`. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional @@ -1767,6 +1794,9 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ + from_dlpack(x, /, *, device=None, copy=None) + -- + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` @@ -1817,6 +1847,9 @@ add_newdoc('numpy._core.multiarray', 'arange', """ + arange(*args, device=None, like=None, **kwargs) + -- + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) Return evenly spaced values within a given interval. @@ -1941,13 +1974,16 @@ add_newdoc('numpy._core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) - Construct an empty array. Used by Pickles. + Construct an empty array. Used by Pickle. """) add_newdoc('numpy._core.multiarray', 'promote_types', """ - promote_types(type1, type2) + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. @@ -5064,6 +5100,9 @@ add_newdoc('numpy._core.umath', 'frompyfunc', """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + frompyfunc(func, /, nin, nout, *[, identity]) Takes an arbitrary Python function and returns a NumPy ufunc. @@ -6874,6 +6913,9 @@ add_newdoc('numpy._core.multiarray', 'busdaycalendar', """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information @@ -6997,6 +7039,9 @@ add_newdoc('numpy._core.multiarray', 'datetime_data', """ + datetime_data(dtype, /) + -- + datetime_data(dtype, /) Get information about the step size of a date or time type. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index c54a4e60ba9a..ace409e2083c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10902,65 +10902,86 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "methodname", - [ - "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", - "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", - "__reduce__", "__reduce_ex__", "__setstate__", - "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", - "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", - "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", - "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", - "repeat", "reshape", "resize", "round", "searchsorted", "setfield", "setflags", - "sort", "partition", "squeeze", "std", "sum", "swapaxes", "take", "tofile", - "tolist", "tobytes", "trace", "transpose", "var", "view", - "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestTextSignatures: + @pytest.mark.parametrize( + "methodname", + [ + "__array__", "__array_finalize__", "__array_function__", "__array_ufunc__", + "__array_wrap__", "__complex__", "__copy__", "__deepcopy__", + "__reduce__", "__reduce_ex__", "__setstate__", + "all", "any", "argmax", "argmin", "argsort", "argpartition", "astype", + "byteswap", "choose", "clip", "compress", "conj", "conjugate", "copy", + "cumprod", "cumsum", "diagonal", "dot", "dump", "dumps", "fill", "flatten", + "getfield", "item", "max", "mean", "min", "nonzero", "prod", "put", "ravel", + "repeat", "reshape", "resize", "round", "searchsorted", "setfield", + "setflags", "sort", "partition", "squeeze", "std", "sum", "swapaxes", + "take", "tofile", "tolist", "tobytes", "trace", "transpose", "var", "view", + "__array_namespace__", "__dlpack__", "__dlpack_device__", "to_device", ], -) -def test_array_method_signatures(methodname: str): - method = getattr(np.ndarray, methodname) - assert callable(method) - - try: - sig = inspect.signature(method) - except ValueError as e: - pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + ) + def test_array_method_signatures(self, methodname: str): + method = getattr(np.ndarray, methodname) + assert callable(method) - assert "self" in sig.parameters - assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.ndarray.{methodname}: {e}") + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) -def test_c_func_dispatcher_text_signature(func): - text_sig = func.__wrapped__.__text_signature__ - assert text_sig.startswith("(") and text_sig.endswith(")") + @pytest.mark.parametrize("func", [np.empty_like, np.concatenate]) + def test_c_func_dispatcher_text_signature(self, func): + text_sig = func.__wrapped__.__text_signature__ + assert text_sig.startswith("(") and text_sig.endswith(")") - sig = inspect.signature(func) - assert sig == inspect.signature(func.__wrapped__) - assert not hasattr(func, "__signature__") + sig = inspect.signature(func) + assert sig == inspect.signature(func.__wrapped__) + assert not hasattr(func, "__signature__") - with pytest.raises(ValueError): - inspect.signature(func, follow_wrapped=False) + with pytest.raises(ValueError): + inspect.signature(func, follow_wrapped=False) + @pytest.mark.parametrize( + "func", + [ + np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, + np.result_type, np.dot, np.vdot, np.bincount, np.ravel_multi_index, + np.unravel_index, np.copyto, np.putmask, np.packbits, np.unpackbits, + np.shares_memory, np.may_share_memory, np.is_busday, np.busday_offset, + np.busday_count, np.datetime_as_string, + ], + ) + def test_c_func_dispatcher_signature(self, func): + sig = inspect.signature(func) -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "func", - [ - np.inner, np.where, np.lexsort, np.can_cast, np.min_scalar_type, np.result_type, - np.dot, np.vdot, np.bincount, np.ravel_multi_index, np.unravel_index, np.copyto, - np.putmask, np.packbits, np.unpackbits, np.shares_memory, np.may_share_memory, - np.is_busday, np.busday_offset, np.busday_count, np.datetime_as_string, - ], -) -def test_c_func_dispatcher_signature(func): - sig = inspect.signature(func) + assert hasattr(func, "__signature__") + assert sig == func.__signature__ + assert sig.parameters + + @pytest.mark.parametrize(("func", "parameter_names"), [ + (np.arange, ("args", "device", "like", "kwargs")), + (np.busdaycalendar, ("weekmask", "holidays")), + (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), + (np.datetime_data, ("dtype",)), + (np.from_dlpack, ("x", "device", "copy")), + (np.frombuffer, ("buffer", "dtype", "count", "offset", "like")), + (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), + (np.fromiter, ("iter", "dtype", "count", "like")), + (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), + (np.nested_iters, ( + "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", + "buffersize", + )), + (np.promote_types, ("type1", "type2")), + ]) + def test_add_newdoc_function_signature(self, func, parameter_names): + assert not hasattr(func, "__signature__") + assert getattr(func, "__text_signature__", None) - assert hasattr(func, "__signature__") - assert sig == func.__signature__ - assert sig.parameters + sig = inspect.signature(func) + assert sig.parameters + assert tuple(sig.parameters) == parameter_names From 49de145702ea29703259266355d39ea02315da6c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:14:07 +0100 Subject: [PATCH 0817/1718] BUG: refine ``arange`` runtime signature for array-api compatibility --- numpy/_core/_add_newdocs.py | 2 +- numpy/_core/tests/test_multiarray.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 46450e14be40..d997998b28c2 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1847,7 +1847,7 @@ add_newdoc('numpy._core.multiarray', 'arange', """ - arange(*args, device=None, like=None, **kwargs) + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) -- arange([start,] stop[, step,], dtype=None, *, device=None, like=None) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index ace409e2083c..2193887cb43f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10962,7 +10962,7 @@ def test_c_func_dispatcher_signature(self, func): assert sig.parameters @pytest.mark.parametrize(("func", "parameter_names"), [ - (np.arange, ("args", "device", "like", "kwargs")), + (np.arange, ("start_or_stop", "stop", "step", "dtype", "device", "like")), (np.busdaycalendar, ("weekmask", "holidays")), (np.char.compare_chararrays, ("a1", "a2", "cmp", "rstrip")), (np.datetime_data, ("dtype",)), From 25d74021d3b0f737c85aae9b512bbcbfc2ab6e79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:22:15 +0100 Subject: [PATCH 0818/1718] TYP: update and improve the ``arange`` stubs --- numpy/_core/multiarray.pyi | 174 +++++++++--------- numpy/ma/core.pyi | 103 +++++++---- numpy/typing/tests/data/pass/numeric.py | 15 +- .../tests/data/reveal/array_constructors.pyi | 24 ++- 4 files changed, 168 insertions(+), 148 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ff22fc312a11..8c3d5dcec1af 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -957,110 +957,102 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -@overload -def arange( # type: ignore[misc] - stop: _IntLike_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - start: _IntLike_co, - stop: _IntLike_co, - step: _IntLike_co = ..., - dtype: None = None, - *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[signedinteger]: ... -@overload -def arange( # type: ignore[misc] - stop: _FloatLike_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload -def arange( # type: ignore[misc] - start: _FloatLike_co, - stop: _FloatLike_co, - step: _FloatLike_co = ..., - dtype: None = None, +_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) + +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[floating]: ... -@overload + dtype: _DTypeLike[_ArangeScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - stop: _TD64Like_co, - /, *, - dtype: None = None, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _TD64Like_co, - stop: _TD64Like_co, - step: _TD64Like_co = ..., - dtype: None = None, + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[timedelta64]: ... -@overload -def arange( # both start and stop must always be specified for datetime64 - start: datetime64, - stop: datetime64, - step: datetime64 = ..., - dtype: None = None, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[datetime64]: ... -@overload + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - stop: Any, - /, *, - dtype: _DTypeLike[_ScalarT], - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: _DTypeLike[_ScalarT] = ..., + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[_ScalarT]: ... -@overload + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) def arange( - stop: Any, /, + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co = 1, *, - dtype: DTypeLike | None = ..., - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... -@overload + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= def arange( - start: Any, - stop: Any, - step: Any = ..., - dtype: DTypeLike | None = ..., + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, - device: L["cpu"] | None = ..., - like: _SupportsArrayFunc | None = ..., -) -> _Array1D[Any]: ... + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... -def datetime_data( - dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, -) -> tuple[str, int]: ... +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 57959ded47bc..16cdc6b0854b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -98,6 +98,7 @@ from numpy._typing import ( _DTypeLike, _DTypeLikeBool, _DTypeLikeVoid, + _FloatLike_co, _IntLike_co, _NestedSequence, _ScalarLike_co, @@ -105,6 +106,7 @@ from numpy._typing import ( _ShapeLike, _SupportsArrayFunc, _SupportsDType, + _TD64Like_co, ) from numpy._typing._dtype_like import _VoidDTypeLike @@ -315,6 +317,7 @@ _Ignored: TypeAlias = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] _MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] _MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] @@ -3142,85 +3145,111 @@ def _convert2ma( params: dict[str, Any] | None = None, ) -> Callable[..., Any]: ... -# keep roughly in sync with `_core.multiarray.arange` -@overload # int, dtype=None (default) +# keep in sync with `_core.multiarray.arange` +@overload # dtype= def arange( - start: int, - stop: int = ..., - step: int = ..., + start_or_stop: _ArangeScalar | float, /, - dtype: None = None, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, *, + dtype: _DTypeLike[_ArangeScalarT], device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.int_]]: ... -@overload # float, dtype=None (default) +) -> _Masked1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) def arange( - start: float, - stop: float = ..., - step: float = ..., + start_or_stop: _IntLike_co, /, - dtype: None = None, + stop: _IntLike_co | None = None, + step: _IntLike_co = 1, *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.float64 | Any]]: ... -@overload # integer | floating | datetime64 | timedelta64, dtype=None (default) +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) def arange( - start: _ArangeScalarT, - stop: _ArangeScalarT = ..., - step: _ArangeScalarT = ..., + start_or_stop: float | floating, /, - dtype: None = None, + stop: _FloatLike_co | None = None, + step: _FloatLike_co = 1, *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ArangeScalarT]]: ... -@overload # dtype: known dtype-like (positional) +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar, - step: _ArangeScalar, + start_or_stop: _FloatLike_co, /, - dtype: _DTypeLike[_ScalarT], + stop: float | floating, + step: _FloatLike_co = 1, *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... -@overload # dtype: known dtype-like (keyword) +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar = ..., - step: _ArangeScalar = ..., + start_or_stop: np.timedelta64, /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co = 1, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... -@overload # dtype: unknown dtype +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) def arange( - start: _ArangeScalar, - stop: _ArangeScalar = ..., - step: _ArangeScalar = ..., + start_or_stop: _TD64Like_co, /, - dtype: DTypeLike | None = None, + stop: np.timedelta64, + step: _TD64Like_co = 1, *, + dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int]]: ... +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... # based on `_core.fromnumeric.clip` @overload diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 4c1ffa0b2776..825a6dd74f34 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -2,27 +2,20 @@ Tests for :mod:`numpy._core.numeric`. Does not include tests which fall under ``array_constructors``. - """ -from __future__ import annotations - -from typing import cast +from typing import Any import numpy as np -import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... i8 = np.int64(1) -A = cast( - np.ndarray[tuple[int, int, int], np.dtype[np.intp]], - np.arange(27).reshape(3, 3, 3), -) -B: list[list[list[int]]] = A.tolist() +A = np.arange(27).reshape(3, 3, 3) +B = A.tolist() C = np.empty((27, 27)).view(SubClass) np.count_nonzero(i8) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 1d970ef68de5..b0ba67d6a087 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -114,17 +114,23 @@ assert_type(np.frombuffer(A), npt.NDArray[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) -assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64 +_x_datetime: np.datetime64 + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) assert_type(np.require(A), npt.NDArray[np.float64]) From 3b259283bcefc890147bc9eae1e62e57d5edb094 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Nov 2025 00:59:51 +0100 Subject: [PATCH 0819/1718] TYP: Allow passing `step=None` to `arange` --- numpy/_core/multiarray.pyi | 16 ++++++++-------- numpy/ma/core.pyi | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 8c3d5dcec1af..fa4850094646 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -967,7 +967,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: _DTypeLike[_ArangeScalarT], device: L["cpu"] | None = None, @@ -978,7 +978,7 @@ def arange( start_or_stop: _IntLike_co, /, stop: _IntLike_co | None = None, - step: _IntLike_co = 1, + step: _IntLike_co | None = 1, *, dtype: type[int] | _DTypeLike[np.int_] | None = None, device: L["cpu"] | None = None, @@ -989,7 +989,7 @@ def arange( start_or_stop: float | floating, /, stop: _FloatLike_co | None = None, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: L["cpu"] | None = None, @@ -1000,7 +1000,7 @@ def arange( start_or_stop: _FloatLike_co, /, stop: float | floating, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: L["cpu"] | None = None, @@ -1011,7 +1011,7 @@ def arange( start_or_stop: np.timedelta64, /, stop: _TD64Like_co | None = None, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: L["cpu"] | None = None, @@ -1022,7 +1022,7 @@ def arange( start_or_stop: _TD64Like_co, /, stop: np.timedelta64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: L["cpu"] | None = None, @@ -1033,7 +1033,7 @@ def arange( start_or_stop: np.datetime64, /, stop: np.datetime64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.datetime64] | None = None, device: L["cpu"] | None = None, @@ -1044,7 +1044,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, device: L["cpu"] | None = None, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 16cdc6b0854b..e30a60ef2384 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3151,7 +3151,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: _DTypeLike[_ArangeScalarT], device: Literal["cpu"] | None = None, @@ -3164,7 +3164,7 @@ def arange( start_or_stop: _IntLike_co, /, stop: _IntLike_co | None = None, - step: _IntLike_co = 1, + step: _IntLike_co | None = 1, *, dtype: type[int] | _DTypeLike[np.int_] | None = None, device: Literal["cpu"] | None = None, @@ -3177,7 +3177,7 @@ def arange( start_or_stop: float | floating, /, stop: _FloatLike_co | None = None, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, @@ -3190,7 +3190,7 @@ def arange( start_or_stop: _FloatLike_co, /, stop: float | floating, - step: _FloatLike_co = 1, + step: _FloatLike_co | None = 1, *, dtype: type[float] | _DTypeLike[np.float64] | None = None, device: Literal["cpu"] | None = None, @@ -3203,7 +3203,7 @@ def arange( start_or_stop: np.timedelta64, /, stop: _TD64Like_co | None = None, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, @@ -3216,7 +3216,7 @@ def arange( start_or_stop: _TD64Like_co, /, stop: np.timedelta64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.timedelta64] | None = None, device: Literal["cpu"] | None = None, @@ -3229,7 +3229,7 @@ def arange( start_or_stop: np.datetime64, /, stop: np.datetime64, - step: _TD64Like_co = 1, + step: _TD64Like_co | None = 1, *, dtype: _DTypeLike[np.datetime64] | None = None, device: Literal["cpu"] | None = None, @@ -3242,7 +3242,7 @@ def arange( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, - step: _ArangeScalar | float = 1, + step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, device: Literal["cpu"] | None = None, From fa476b5ca9b9f6103b216e6fc096c55a422d3efb Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 6 Nov 2025 11:37:06 +0100 Subject: [PATCH 0820/1718] DOC: add release note for #30147 --- doc/release/upcoming_changes/30147.compatibility.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/30147.compatibility.rst diff --git a/doc/release/upcoming_changes/30147.compatibility.rst b/doc/release/upcoming_changes/30147.compatibility.rst new file mode 100644 index 000000000000..c5d13323fe6e --- /dev/null +++ b/doc/release/upcoming_changes/30147.compatibility.rst @@ -0,0 +1,4 @@ +* Type-checkers will no longer accept calls to `numpy.arange` with + ``start`` as a keyword argument. This was done for compatibility with + the Array API standard. At runtime it is still possible to use + `numpy.arange` with ``start`` as a keyword argument. From 2740d5f6a93bff84e14ef9a4eed1475cbd15f3fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20Laboissi=C3=A8re?= Date: Thu, 6 Nov 2025 16:49:46 +0100 Subject: [PATCH 0821/1718] ENH: Run SWIG unit tests in CI action (#30161) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Replace deprecated unittest.makeSuite() by unittest.TestLoader().loadTestsFromTestCase() * Do not use negative values in argument of Tensor.Max() The unit tests involving `TestCase.testMaxWrongDim` do not work for `` being one of `uchar`, `ushort`, `uint`, `ulong`, and `ulongLong` (which translate to `uint8`, `uint16`, `uint32`, `uint64`, and `uint64`, respectively). Instead of raising the expected `TypeError`, the unit tests raise `OverflowError`, with the error message “Python integer -1 out of bounds for ”. This is fixed by replacing the negative values in the argument of the `max` function by positive ones. * Run unit tests for the SWIG binding at the end of the sdist job The unit tests will be run in the Linux workflow using the Makefile distributed in `tools/swig/test` directory. They are run after installation of NumPy in the system, ensuring that the C header files are correctly included (see GitHub issue [#30131](https://github.com/numpy/numpy/issues/30131)). --- .github/workflows/linux.yml | 5 +++++ tools/swig/test/testArray.py | 6 +++--- tools/swig/test/testFarray.py | 2 +- tools/swig/test/testFlat.py | 24 ++++++++++++------------ tools/swig/test/testFortran.py | 24 ++++++++++++------------ tools/swig/test/testMatrix.py | 24 ++++++++++++------------ tools/swig/test/testSuperTensor.py | 24 ++++++++++++------------ tools/swig/test/testTensor.py | 26 +++++++++++++------------- tools/swig/test/testVector.py | 24 ++++++++++++------------ 9 files changed, 82 insertions(+), 77 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b749c60f3cc3..74edb50d9273 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -280,6 +280,11 @@ jobs: run: | cd tools pytest --pyargs numpy -m "not slow" + - name: Test SWIG binding + run: | + sudo apt update + sudo apt install make swig + make -C tools/swig/test test array_api_tests: needs: [smoke_test] diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py index a8528207c167..b9b4af3319ae 100755 --- a/tools/swig/test/testArray.py +++ b/tools/swig/test/testArray.py @@ -378,9 +378,9 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(Array1TestCase)) - suite.addTest(unittest.makeSuite(Array2TestCase)) - suite.addTest(unittest.makeSuite(ArrayZTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array1TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array2TestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ArrayZTestCase)) # Execute the test suite print("Testing Classes of Module Array") diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index a9310e20a897..75bf99c054cb 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -149,7 +149,7 @@ def testView(self): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(FarrayTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(FarrayTestCase)) # Execute the test suite print("Testing Classes of Module Farray") diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index 543ee0c41d9f..43ed84bcfa06 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -176,18 +176,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Flat") diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index 498732f3118f..8b23af610481 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -140,18 +140,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py index d20312ecc2a0..d3151a0fb857 100755 --- a/tools/swig/test/testMatrix.py +++ b/tools/swig/test/testMatrix.py @@ -339,18 +339,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 2D Functions of Module Matrix") diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index e0027428e647..f49a0aa07a90 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -374,18 +374,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 4D Functions of Module SuperTensor") diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py index aa962b0cbcda..536d848e6135 100755 --- a/tools/swig/test/testTensor.py +++ b/tools/swig/test/testTensor.py @@ -99,7 +99,7 @@ def testMaxWrongDim(self): "Test max function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Tensor.__dict__[self.typeStr + "Max"] - self.assertRaises(TypeError, max, [0, -1, 2, -3]) + self.assertRaises(TypeError, max, [0, 1, 2, 3]) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap def testMin(self): @@ -379,18 +379,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 3D Functions of Module Tensor") diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py index f0b51715d1d5..15ad96da4503 100755 --- a/tools/swig/test/testVector.py +++ b/tools/swig/test/testVector.py @@ -358,18 +358,18 @@ def __init__(self, methodName="runTest"): # Build the test suite suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite( scharTestCase)) - suite.addTest(unittest.makeSuite( ucharTestCase)) - suite.addTest(unittest.makeSuite( shortTestCase)) - suite.addTest(unittest.makeSuite( ushortTestCase)) - suite.addTest(unittest.makeSuite( intTestCase)) - suite.addTest(unittest.makeSuite( uintTestCase)) - suite.addTest(unittest.makeSuite( longTestCase)) - suite.addTest(unittest.makeSuite( ulongTestCase)) - suite.addTest(unittest.makeSuite( longLongTestCase)) - suite.addTest(unittest.makeSuite(ulongLongTestCase)) - suite.addTest(unittest.makeSuite( floatTestCase)) - suite.addTest(unittest.makeSuite( doubleTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase)) + suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase)) # Execute the test suite print("Testing 1D Functions of Module Vector") From 666719b3742d5c7a887356de1320b0e959ee5ab5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 18:38:18 +0100 Subject: [PATCH 0822/1718] ENH: Add ``order`` parameter to ``np.ma.asanyarray`` (#30163) * ENH: Add ``order`` parameter to ``np.ma.asanyarray`` * TYP: Update the ``np.ma.asanyarray`` stubs --- numpy/ma/core.py | 23 ++++++++++++++++++++--- numpy/ma/core.pyi | 8 ++++---- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index f60b42663e00..e56b0cfa573b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8616,7 +8616,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None): +def asanyarray(a, dtype=None, order='A'): """ Convert the input to a masked array, conserving subclasses. @@ -8629,6 +8629,14 @@ def asanyarray(a, dtype=None): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. Returns ------- @@ -8658,9 +8666,18 @@ def asanyarray(a, dtype=None): """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. - if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order == 'A' + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): return a - return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) ############################################################################## diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e30a60ef2384..bbcd617e9699 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2453,13 +2453,13 @@ def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None) -> _MArrayT: ... +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderACF = "A") -> _MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... # def is_masked(x: object) -> bool: ... From 125b5ce97176325a6ee6a1dbb22b0db995087598 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 20:13:06 +0100 Subject: [PATCH 0823/1718] MAINT: some ``numpy.polynomial.*`` namespace pollution cleanup (#30165) --- numpy/polynomial/chebyshev.py | 8 +++----- numpy/polynomial/hermite.py | 10 ++++------ numpy/polynomial/hermite_e.py | 10 ++++------ numpy/polynomial/laguerre.py | 10 ++++------ numpy/polynomial/legendre.py | 10 ++++------ numpy/polynomial/polynomial.py | 14 ++++++-------- numpy/polynomial/polyutils.py | 6 +++--- numpy/polynomial/tests/test_polynomial.py | 3 +-- 8 files changed, 29 insertions(+), 42 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 58fce6046287..55b48b905848 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -108,8 +108,6 @@ """ # noqa: E501 import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -936,7 +934,7 @@ def chebder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1059,7 +1057,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1721,7 +1719,7 @@ def chebroots(c): # rotated companion matrix reduces error m = chebcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 47e1dfc05b4b..fc2ea9e4194a 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -655,7 +653,7 @@ def hermder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -772,7 +770,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1543,7 +1541,7 @@ def hermroots(c): # rotated companion matrix reduces error m = hermcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1636,7 +1634,7 @@ def hermgauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1], dtype=np.float64) m = hermcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_n(x, ideg) diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index d30fc1b5aa14..6225b9dd6b75 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -653,7 +651,7 @@ def hermeder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1461,7 +1459,7 @@ def hermeroots(c): # rotated companion matrix reduces error m = hermecompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1548,7 +1546,7 @@ def hermegauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = hermecompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = _normed_hermite_e_n(x, ideg) diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 8d5d5ae67632..b1d87bf6d035 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -76,8 +76,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -650,7 +648,7 @@ def lagder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -770,7 +768,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1525,7 +1523,7 @@ def lagroots(c): # rotated companion matrix reduces error m = lagcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1577,7 +1575,7 @@ def laggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = lagcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = lagval(x, c) diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 5fdc5245b9d3..237e340cbf45 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -80,8 +80,6 @@ """ import numpy as np -import numpy.linalg as la -from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu from ._polybase import ABCPolyBase @@ -676,7 +674,7 @@ def legder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -799,7 +797,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -1464,7 +1462,7 @@ def legroots(c): # rotated companion matrix reduces error m = legcompanion(c)[::-1, ::-1] - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r @@ -1510,7 +1508,7 @@ def leggauss(deg): # matrix is symmetric in this case in order to obtain better zeros. c = np.array([0] * deg + [1]) m = legcompanion(c) - x = la.eigvalsh(m) + x = np.linalg.eigvalsh(m) # improve roots by one application of Newton dy = legval(x, c) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 220306693cf9..e3823c89cd98 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -81,9 +81,7 @@ 'polycompanion'] import numpy as np -import numpy.linalg as la -from numpy._core.overrides import array_function_dispatch -from numpy.lib.array_utils import normalize_axis_index +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch from . import polyutils as pu from ._polybase import ABCPolyBase @@ -523,7 +521,7 @@ def polyder(c, m=1, scl=1, axis=0): iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -637,7 +635,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - iaxis = normalize_axis_index(iaxis, c.ndim) + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c @@ -852,7 +850,7 @@ def _polyval2d_dispatcher(x, y, c): def _polygrid2d_dispatcher(x, y, c): return (x, y, c) -@array_function_dispatch(_polyval2d_dispatcher) +@_array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -904,7 +902,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) -@array_function_dispatch(_polygrid2d_dispatcher) +@_array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. @@ -1547,7 +1545,7 @@ def polyroots(c): return np.array([-c[0] / c[1]]) m = polycompanion(c) - r = la.eigvals(m) + r = np.linalg.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 18dc0a8d1d24..5e0e1af973ae 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -23,8 +23,6 @@ import warnings import numpy as np -from numpy._core.multiarray import dragon4_positional, dragon4_scientific -from numpy.exceptions import RankWarning __all__ = [ 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', @@ -661,7 +659,7 @@ def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): # warn on rank reduction if rank != order and not full: msg = "The fit may be poorly conditioned" - warnings.warn(msg, RankWarning, stacklevel=2) + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) if full: return c, [resids, rank, s, rcond] @@ -725,6 +723,8 @@ def _as_int(x, desc): def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + if not np.issubdtype(type(x), np.floating): return str(x) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 5c8e85c7f860..4c924a758b06 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -10,7 +10,6 @@ import numpy as np import numpy.polynomial.polynomial as poly -import numpy.polynomial.polyutils as pu from numpy.testing import ( assert_, assert_almost_equal, @@ -657,7 +656,7 @@ def test_fit_degenerate_domain(self): assert_equal(p.coef, [2.]) p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) assert_almost_equal(p.coef, [2.05]) - with pytest.warns(pu.RankWarning): + with pytest.warns(np.exceptions.RankWarning): p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) def test_result_type(self): From 2b216b1a6072d28e4aa7b02a7e13e69713a20c5d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 6 Nov 2025 20:49:07 +0100 Subject: [PATCH 0824/1718] BUG: ``numpy.random.*`` class runtime signatures (#30164) This fixes `inspect.signature` for - `np.random.BitGenerator` - `np.random.Generator` - `np.random.MT19937` - `np.random.PCG64` - `np.random.PCG64DXSM` - `np.random.Philox` - `np.random.RandomState` - `np.random.SFC64` - `np.random.SeedSequence` - `np.random.bit_generator.SeedlessSeedSequence` This also fixes a typo in `bit_generator.pxd` that accidentally defined an empty unused class `np.random.bit_generator.SeedlessSequence`. Related to #30104, #30114, #30121, #30124, #30126, #30137, #30138, #30140, #30143, #30146, #30147, and #30155 --- numpy/random/_generator.pyx | 118 +++++++++++++------------- numpy/random/_mt19937.pyx | 4 +- numpy/random/_pcg64.pyx | 8 +- numpy/random/_philox.pyx | 6 +- numpy/random/_sfc64.pyx | 4 +- numpy/random/bit_generator.pxd | 2 +- numpy/random/bit_generator.pyx | 18 ++-- numpy/random/mtrand.pyx | 40 ++++----- numpy/random/tests/test_regression.py | 29 ++++++- 9 files changed, 129 insertions(+), 100 deletions(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index d9c2730de8ca..bace56b59422 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -140,8 +140,8 @@ cdef bint _check_bit_generator(object bitgen): cdef class Generator: - """ - Generator(bit_generator) + # the first line is used to populate `__text_signature__` + """Generator(bit_generator)\n-- Container for the BitGenerators. @@ -396,8 +396,8 @@ cdef class Generator: Drawn samples from the parameterized beta distribution. Examples - -------- - The beta distribution has mean a/(a+b). If ``a == b`` and both + -------- + The beta distribution has mean a/(a+b). If ``a == b`` and both are > 1, the distribution is symmetric with mean 0.5. >>> rng = np.random.default_rng() @@ -405,11 +405,11 @@ cdef class Generator: >>> sample = rng.beta(a=a, b=b, size=size) >>> np.mean(sample) 0.5047328775385895 # may vary - + Otherwise the distribution is skewed left or right according to whether ``a`` or ``b`` is greater. The distribution is mirror symmetric. See for example: - + >>> a, b, size = 2, 7, 10000 >>> sample_left = rng.beta(a=a, b=b, size=size) >>> sample_right = rng.beta(a=b, b=a, size=size) @@ -422,12 +422,12 @@ cdef class Generator: -0.0003163943736596009 # may vary Display the histogram of the two samples: - + >>> import matplotlib.pyplot as plt - >>> plt.hist([sample_left, sample_right], + >>> plt.hist([sample_left, sample_right], ... 50, density=True, histtype='bar') >>> plt.show() - + References ---------- .. [1] Wikipedia, "Beta distribution", @@ -477,17 +477,17 @@ cdef class Generator: Examples -------- - Assume a company has 10000 customer support agents and the time - between customer calls is exponentially distributed and that the + Assume a company has 10000 customer support agents and the time + between customer calls is exponentially distributed and that the average time between customer calls is 4 minutes. >>> scale, size = 4, 10000 >>> rng = np.random.default_rng() >>> time_between_calls = rng.exponential(scale=scale, size=size) - What is the probability that a customer will call in the next - 4 to 5 minutes? - + What is the probability that a customer will call in the next + 4 to 5 minutes? + >>> x = ((time_between_calls < 5).sum())/size >>> y = ((time_between_calls < 4).sum())/size >>> x - y @@ -718,10 +718,10 @@ cdef class Generator: Notes ----- - This function generates random bytes from a discrete uniform - distribution. The generated bytes are independent from the CPU's + This function generates random bytes from a discrete uniform + distribution. The generated bytes are independent from the CPU's native endianness. - + Examples -------- >>> rng = np.random.default_rng() @@ -1024,9 +1024,9 @@ cdef class Generator: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than high. The high limit may be included in the returned array of - floats due to floating-point rounding in the equation - ``low + (high-low) * random_sample()``. high - low must be + less than high. The high limit may be included in the returned array of + floats due to floating-point rounding in the equation + ``low + (high-low) * random_sample()``. high - low must be non-negative. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -1541,10 +1541,10 @@ cdef class Generator: So there is about a 1% chance that the F statistic will exceed 7.62, the measured value is 36, so the null hypothesis is rejected at the 1% level. - - The corresponding probability density function for ``n = 20`` + + The corresponding probability density function for ``n = 20`` and ``m = 20`` is: - + >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 @@ -1554,7 +1554,7 @@ cdef class Generator: >>> plt.plot(x, stats.f.pdf(x, dfnum, dfden)) >>> plt.xlim([0, 5]) >>> plt.show() - + """ return cont(&random_f, &self._bitgen, size, self.lock, 2, dfnum, 'dfnum', CONS_POSITIVE, @@ -1701,7 +1701,7 @@ cdef class Generator: The distribution of a chi-square random variable with 20 degrees of freedom looks as follows: - + >>> import matplotlib.pyplot as plt >>> import scipy.stats as stats >>> s = rng.chisquare(20, 10000) @@ -1921,14 +1921,14 @@ cdef class Generator: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -1947,18 +1947,18 @@ cdef class Generator: >>> s = rng.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1, @@ -3034,21 +3034,21 @@ cdef class Generator: Draw samples from the distribution: >>> rng = np.random.default_rng() - >>> n, p, size = 10, .5, 10000 + >>> n, p, size = 10, .5, 10000 >>> s = rng.binomial(n, p, 10000) Assume a company drills 9 wild-cat oil exploration wells, each with - an estimated probability of success of ``p=0.1``. All nine wells fail. + an estimated probability of success of ``p=0.1``. All nine wells fail. What is the probability of that happening? - Over ``size = 20,000`` trials the probability of this happening + Over ``size = 20,000`` trials the probability of this happening is on average: >>> n, p, size = 9, 0.1, 20000 >>> np.sum(rng.binomial(n=n, p=p, size=size) == 0)/size 0.39015 # may vary - The following can be used to visualize a sample with ``n=100``, + The following can be used to visualize a sample with ``n=100``, ``p=0.4`` and the corresponding probability density function: >>> import matplotlib.pyplot as plt @@ -3167,10 +3167,10 @@ cdef class Generator: appear before the third "1" is a negative binomial distribution. Because this method internally calls ``Generator.poisson`` with an - intermediate random value, a ValueError is raised when the choice of + intermediate random value, a ValueError is raised when the choice of :math:`n` and :math:`p` would result in the mean + 10 sigma of the sampled - intermediate distribution exceeding the max acceptable value of the - ``Generator.poisson`` method. This happens when :math:`p` is too low + intermediate distribution exceeding the max acceptable value of the + ``Generator.poisson`` method. This happens when :math:`p` is too low (a lot of failures happen for every success) and :math:`n` is too big ( a lot of successes are allowed). Therefore, the :math:`n` and :math:`p` values must satisfy the constraint: @@ -3302,7 +3302,7 @@ cdef class Generator: >>> s = rng.poisson(lam=lam, size=size) Verify the mean and variance, which should be approximately ``lam``: - + >>> s.mean(), s.var() (4.9917 5.1088311) # may vary @@ -3456,7 +3456,7 @@ cdef class Generator: Examples -------- - Draw 10,000 values from the geometric distribution, with the + Draw 10,000 values from the geometric distribution, with the probability of an individual success equal to ``p = 0.35``: >>> p, size = 0.35, 10000 @@ -3475,7 +3475,7 @@ cdef class Generator: >>> plt.plot(bins, (1-p)**(bins-1)*p) >>> plt.xlim([0, 25]) >>> plt.show() - + """ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0, p, 'p', CONS_BOUNDED_GT_0_1, @@ -4646,11 +4646,11 @@ cdef class Generator: -------- shuffle permutation - + Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -4722,7 +4722,7 @@ cdef class Generator: if axis is None: if x.ndim > 1: if not (np.PyArray_FLAGS(out) & (np.NPY_ARRAY_C_CONTIGUOUS | - np.NPY_ARRAY_F_CONTIGUOUS)): + np.NPY_ARRAY_F_CONTIGUOUS)): flags = (np.NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_WRITEBACKIFCOPY) to_shuffle = PyArray_FromArray(out, @@ -4802,8 +4802,8 @@ cdef class Generator: Notes ----- - An important distinction between methods ``shuffle`` and ``permuted`` is - how they both treat the ``axis`` parameter which can be found at + An important distinction between methods ``shuffle`` and ``permuted`` is + how they both treat the ``axis`` parameter which can be found at :ref:`generator-handling-axis-parameter`. Examples @@ -5011,11 +5011,11 @@ def default_rng(seed=None): Examples -------- `default_rng` is the recommended constructor for the random number class - `Generator`. Here are several ways we can construct a random - number generator using `default_rng` and the `Generator` class. + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. Here we use `default_rng` to generate a random float: - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> print(rng) @@ -5025,10 +5025,10 @@ def default_rng(seed=None): 0.22733602246716966 >>> type(rfloat) - - Here we use `default_rng` to generate 3 random integers between 0 + + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): - + >>> import numpy as np >>> rng = np.random.default_rng(12345) >>> rints = rng.integers(low=0, high=10, size=3) @@ -5036,9 +5036,9 @@ def default_rng(seed=None): array([6, 2, 7]) >>> type(rints[0]) - + Here we specify a seed so that we have reproducible results: - + >>> import numpy as np >>> rng = np.random.default_rng(seed=42) >>> print(rng) diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index ed69c2aa6c58..c74498356dda 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -43,8 +43,8 @@ cdef uint64_t mt19937_raw(void *st) noexcept nogil: return mt19937_next32( st) cdef class MT19937(BitGenerator): - """ - MT19937(seed=None) + # the first line is used to populate `__text_signature__` + """MT19937(seed=None)\n-- Container for the Mersenne Twister pseudo-random number generator. diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index e6e9b8e0ac3c..fd2dd40db565 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -51,8 +51,8 @@ cdef double pcg64_cm_double(void* st) noexcept nogil: return uint64_to_double(pcg64_cm_next64(st)) cdef class PCG64(BitGenerator): - """ - PCG64(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64(seed=None)\n-- BitGenerator for the PCG-64 pseudo-random number generator. @@ -284,8 +284,8 @@ cdef class PCG64(BitGenerator): cdef class PCG64DXSM(BitGenerator): - """ - PCG64DXSM(seed=None) + # the first line is used to populate `__text_signature__` + """PCG64DXSM(seed=None)\n-- BitGenerator for the PCG-64 DXSM pseudo-random number generator. diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 3f33c7078f83..75fc4081ee62 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -52,8 +52,8 @@ cdef double philox_double(void*st) noexcept nogil: return uint64_to_double(philox_next64( st)) cdef class Philox(BitGenerator): - """ - Philox(seed=None, counter=None, key=None) + # the first line is used to populate `__text_signature__` + """Philox(seed=None, counter=None, key=None)\n-- Container for the Philox (4x64) pseudo-random number generator. @@ -194,7 +194,7 @@ cdef class Philox(BitGenerator): cdef _reset_state_variables(self): cdef philox_state *rng_state = &self.rng_state - + rng_state[0].has_uint32 = 0 rng_state[0].uinteger = 0 rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 86136f0b42fb..81a5fc3d21e5 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -34,8 +34,8 @@ cdef double sfc64_double(void* st) noexcept nogil: cdef class SFC64(BitGenerator): - """ - SFC64(seed=None) + # the first line is used to populate `__text_signature__` + """SFC64(seed=None)\n-- BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG. diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index dfa7d0a71c08..dbaab4721fec 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -31,5 +31,5 @@ cdef class SeedSequence(): np.ndarray[np.npy_uint32, ndim=1] entropy_array) cdef get_assembled_entropy(self) -cdef class SeedlessSequence(): +cdef class SeedlessSeedSequence: pass diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index d9a733c7a618..0bb9552a86ce 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -226,8 +226,10 @@ class ISpawnableSeedSequence(ISeedSequence): """ -cdef class SeedlessSeedSequence(): - """ +cdef class SeedlessSeedSequence: + # the first line is used to populate `__text_signature__` + """SeedlessSeedSequence()\n-- + A seed sequence for BitGenerators with no need for seed state. See Also @@ -247,9 +249,9 @@ cdef class SeedlessSeedSequence(): ISpawnableSeedSequence.register(SeedlessSeedSequence) -cdef class SeedSequence(): - """ - SeedSequence(entropy=None, *, spawn_key=(), pool_size=4) +cdef class SeedSequence: + # the first line is used to populate `__text_signature__` + """SeedSequence(entropy=None, *, spawn_key=(), pool_size=4, n_children_spawned=0)\n-- SeedSequence mixes sources of entropy in a reproducible way to set the initial state for independent and very probably non-overlapping @@ -489,9 +491,9 @@ cdef class SeedSequence(): ISpawnableSeedSequence.register(SeedSequence) -cdef class BitGenerator(): - """ - BitGenerator(seed=None) +cdef class BitGenerator: + # the first line is used to populate `__text_signature__` + """BitGenerator(seed=None)\n-- Base Class for generic BitGenerators, which provide a stream of random bits based on different algorithms. Must be overridden. diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 2bf6441bb368..8e7f437641ca 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -119,8 +119,8 @@ cdef object int64_to_long(object x): cdef class RandomState: - """ - RandomState(seed=None) + # the first line is used to populate `__text_signature__` + """RandomState(seed=None)\n-- Container for the slow Mersenne Twister pseudo-random number generator. Consider using a different BitGenerator with the Generator container @@ -542,16 +542,16 @@ cdef class RandomState: Examples -------- - A real world example: Assume a company has 10000 customer support + A real world example: Assume a company has 10000 customer support agents and the average time between customer calls is 4 minutes. >>> n = 10000 >>> time_between_calls = np.random.default_rng().exponential(scale=4, size=n) - What is the probability that a customer will call in the next - 4 to 5 minutes? - - >>> x = ((time_between_calls < 5).sum())/n + What is the probability that a customer will call in the next + 4 to 5 minutes? + + >>> x = ((time_between_calls < 5).sum())/n >>> y = ((time_between_calls < 4).sum())/n >>> x-y 0.08 # may vary @@ -1087,9 +1087,9 @@ cdef class RandomState: greater than or equal to low. The default value is 0. high : float or array_like of floats Upper boundary of the output interval. All values generated will be - less than or equal to high. The high limit may be included in the - returned array of floats due to floating-point rounding in the - equation ``low + (high-low) * random_sample()``. The default value + less than or equal to high. The high limit may be included in the + returned array of floats due to floating-point rounding in the + equation ``low + (high-low) * random_sample()``. The default value is 1.0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -2226,14 +2226,14 @@ cdef class RandomState: Does their energy intake deviate systematically from the recommended value of 7725 kJ? Our null hypothesis will be the absence of deviation, and the alternate hypothesis will be the presence of an effect that could be - either positive or negative, hence making our test 2-tailed. + either positive or negative, hence making our test 2-tailed. Because we are estimating the mean and we have N=11 values in our sample, - we have N-1=10 degrees of freedom. We set our significance level to 95% and - compute the t statistic using the empirical mean and empirical standard - deviation of our intake. We use a ddof of 1 to base the computation of our + we have N-1=10 degrees of freedom. We set our significance level to 95% and + compute the t statistic using the empirical mean and empirical standard + deviation of our intake. We use a ddof of 1 to base the computation of our empirical standard deviation on an unbiased estimate of the variance (note: - the final estimate is not unbiased due to the concave nature of the square + the final estimate is not unbiased due to the concave nature of the square root). >>> np.mean(intake) @@ -2251,18 +2251,18 @@ cdef class RandomState: >>> s = np.random.standard_t(10, size=1000000) >>> h = plt.hist(s, bins=100, density=True) - Does our t statistic land in one of the two critical regions found at + Does our t statistic land in one of the two critical regions found at both tails of the distribution? >>> np.sum(np.abs(t) < np.abs(s)) / float(len(s)) 0.018318 #random < 0.05, statistic is in critical region - The probability value for this 2-tailed test is about 1.83%, which is - lower than the 5% pre-determined significance threshold. + The probability value for this 2-tailed test is about 1.83%, which is + lower than the 5% pre-determined significance threshold. Therefore, the probability of observing values as extreme as our intake - conditionally on the null hypothesis being true is too low, and we reject - the null hypothesis of no deviation. + conditionally on the null hypothesis being true is too low, and we reject + the null hypothesis of no deviation. """ return cont(&legacy_standard_t, &self._aug_state, size, self.lock, 1, diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index b29bd526266e..eeaf6d2b4bd3 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -1,8 +1,11 @@ +import inspect import sys +import pytest + import numpy as np from numpy import random -from numpy.testing import assert_, assert_array_equal, assert_raises +from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises class TestRegression: @@ -146,3 +149,27 @@ def __array__(self, dtype=None, copy=None): perm = rng.permutation(m) assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") From 3d2e7a6a2e48eace87361941defbef3dc54b2b2a Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 4 Nov 2025 14:17:15 +0100 Subject: [PATCH 0825/1718] BUG: ``dtype`` and ``dtype.newbyteorder`` runtime signatures --- numpy/_core/_add_newdocs.py | 6 +++++ numpy/_core/tests/test_dtype.py | 41 +++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 78c660f423f7..995aff7e5874 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6192,6 +6192,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ + dtype(dtype, align=False, copy=False, metadata={}) + -- + dtype(dtype, align=False, copy=False, [metadata]) Create a data type object. @@ -6758,6 +6761,9 @@ add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', """ + newbyteorder($self, new_order='S', /) + -- + newbyteorder(new_order='S', /) Return a new dtype with a different byte order. diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 611d5857fb15..c5c6ae653197 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,5 +1,6 @@ import ctypes import gc +import inspect import operator import pickle import sys @@ -17,6 +18,7 @@ from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, + IS_PYPY, IS_PYSTON, IS_WASM, assert_, @@ -1994,3 +1996,42 @@ def test_creating_dtype_with_dtype_class_errors(): # Regression test for #25031, calling `np.dtype` with itself segfaulted. with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): np.array(np.ones(10), dtype=np.dtype) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_dtype_signature(): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + assert "metadata" in sig.parameters + assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["metadata"].default == {} + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +def test_dtype_newbyteorder_signature(): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" From d3d2db1ac1d1302a075622ada2ef70aad9ae9bd8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 02:20:31 +0100 Subject: [PATCH 0826/1718] BUG: ``dtypes.*DType`` runtime signatures --- numpy/_core/_add_newdocs.py | 50 ++++++++++++++ numpy/_core/src/multiarray/dtypemeta.c | 7 +- numpy/_core/tests/test_dtype.py | 94 ++++++++++++++++---------- 3 files changed, 109 insertions(+), 42 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 995aff7e5874..3e15e624739c 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -7391,8 +7391,58 @@ def refer_to_array_attribute(attr, method=True): """) +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + + Please see `numpy.dtype` for the typical way to create dtype instances and + :ref:`arrays.dtypes` for additional information. + """) + del _dtype_name, _signature, _sctype_name # avoid namespace pollution + + add_newdoc('numpy._core.multiarray', 'StringDType', """ + StringDType(*, coerce=True, **kwargs) + -- + StringDType(*, na_object=np._NoValue, coerce=True) Create a StringDType instance. diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 1c208aa54a84..bada1addd9cc 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1154,12 +1154,7 @@ dtypemeta_wrap_legacy_descriptor( .tp_flags = Py_TPFLAGS_DEFAULT, .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, - .tp_doc = ( - "DType class corresponding to the scalar type and dtype of " - "the same name.\n\n" - "Please see `numpy.dtype` for the typical way to create\n" - "dtype instances and :ref:`arrays.dtypes` for additional\n" - "information."), + .tp_doc = NULL, /* set in python */ },}, .flags = NPY_DT_LEGACY, /* Further fields are not common between DTypes */ diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index c5c6ae653197..6623a7668af2 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1999,39 +1999,61 @@ def test_creating_dtype_with_dtype_class_errors(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -def test_dtype_signature(): - sig = inspect.signature(np.dtype) - - assert len(sig.parameters) == 4 - - assert "dtype" in sig.parameters - assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["dtype"].default is inspect.Parameter.empty - - assert "align" in sig.parameters - assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["align"].default is False - - assert "copy" in sig.parameters - assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["copy"].default is False - - assert "metadata" in sig.parameters - assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["metadata"].default == {} - -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -def test_dtype_newbyteorder_signature(): - sig = inspect.signature(np.dtype.newbyteorder) - - assert len(sig.parameters) == 2 - - assert "self" in sig.parameters - assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY - assert sig.parameters["self"].default is inspect.Parameter.empty - - assert "new_order" in sig.parameters - assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY - assert sig.parameters["new_order"].default == "S" +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestDTypeSignatures: + def test_signature_dtype(self): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + assert "metadata" in sig.parameters + assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["metadata"].default == {} + + def test_signature_dtype_newbyteorder(self): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" + + @pytest.mark.parametrize("typename", np.dtypes.__all__) + def test_signature_dtypes_classes(self, typename: str): + dtype_type = getattr(np.dtypes, typename) + sig = inspect.signature(dtype_type) + + match typename.lower().removesuffix("dtype"): + case "bytes" | "str": + params_expect = {"size"} + case "void": + params_expect = {"length"} + case "datetime64" | "timedelta64": + params_expect = {"unit"} + case "string": + # `na_object` cannot be used in the text signature because of its + # `np._NoValue` default, which isn't supported by `inspect.signature`, + # so `**kwargs` is used instead. + params_expect = {"coerce", "kwargs"} + case _: + params_expect = set() + + params_actual = set(sig.parameters) + assert params_actual == params_expect From ead381527798f3d74bd2668b830138b7b82fe340 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 03:50:17 +0100 Subject: [PATCH 0827/1718] DOC: Warn that `{Void,DateTime64,TimeDelta64}DType` cannot be instantiated --- numpy/_core/_add_newdocs.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 3e15e624739c..61c646f5aae4 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -7425,17 +7425,26 @@ def refer_to_array_attribute(attr, method=True): ("DateTime64DType", "(unit, /)", "datetime64"), ("TimeDelta64DType", "(unit, /)", "timedelta64"), ): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + add_newdoc('numpy.dtypes', _dtype_name, f""" {_dtype_name}{_signature} -- DType class corresponding to the `numpy.{_sctype_name}` scalar type. - - Please see `numpy.dtype` for the typical way to create dtype instances and - :ref:`arrays.dtypes` for additional information. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. """) - del _dtype_name, _signature, _sctype_name # avoid namespace pollution + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution add_newdoc('numpy._core.multiarray', 'StringDType', From 9ac8d69e141350cebbec8c9b247c8a459ef212cb Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 7 Nov 2025 04:01:07 +0100 Subject: [PATCH 0828/1718] DOC, TYP: Workaround sphinx rendering issue with (slightly incorrect) `metadata={}` --- numpy/__init__.pyi | 202 +++++++++++++++++++------------- numpy/_core/_add_newdocs.py | 2 +- numpy/_core/tests/test_dtype.py | 7 +- 3 files changed, 126 insertions(+), 85 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b03f67b1e281..6fd6fbd3898a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1177,8 +1177,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ... ) -> dtype[float64]: ... @@ -1188,8 +1189,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _DTypeLike[_ScalarT], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[_ScalarT]: ... @@ -1207,48 +1209,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... @overload def __new__( cls, dtype: type[int], # also accepts `type[builtins.bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... @overload def __new__( cls, dtype: type[float], # also accepts `type[int | bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... @overload def __new__( cls, dtype: type[complex], # also accepts `type[float | int | bool]` - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[complex128 | float64 | int_ | np.bool]: ... @overload def __new__( cls, dtype: type[bytes | ct.c_char] | _BytesCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, dtype: type[str] | _StrCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to @@ -1261,8 +1269,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, @@ -1271,8 +1280,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... @@ -1281,48 +1291,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( cls, dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( cls, dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( cls, dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( cls, dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( cls, dtype: _ULongCodes | type[ct.c_ulong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[ulong]: ... @@ -1331,48 +1347,54 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( cls, dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( cls, dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( cls, dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( cls, dtype: _IntPCodes | type[intp | ct.c_ssize_t], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( cls, dtype: _LongCodes | type[ct.c_long], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[long]: ... @@ -1381,16 +1403,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Float16Codes | _HalfCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( cls, dtype: _Float32Codes | _SingleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @@ -1398,8 +1422,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... @@ -1408,24 +1433,27 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _Complex64Codes | _CSingleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( cls, dtype: _Complex128Codes | _CDoubleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( cls, dtype: _CLongDoubleCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1434,16 +1462,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _TD64Codes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[timedelta64]: ... @overload def __new__( cls, dtype: _DT64Codes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[datetime64]: ... @@ -1452,9 +1482,10 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ... + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1462,56 +1493,63 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[str, Any] = ..., ) -> dtype[character]: ... @@ -1520,8 +1558,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: builtins.str, - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype: ... @@ -1535,8 +1574,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): def __new__( cls, dtype: type[object], - align: builtins.bool = ..., - copy: builtins.bool = ..., + align: builtins.bool = False, + copy: builtins.bool = False, + *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_ | Any]: ... diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 61c646f5aae4..219c6e07a176 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -6192,7 +6192,7 @@ add_newdoc('numpy._core.multiarray', 'dtype', """ - dtype(dtype, align=False, copy=False, metadata={}) + dtype(dtype, align=False, copy=False, **kwargs) -- dtype(dtype, align=False, copy=False, [metadata]) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 6623a7668af2..4ac88d7d0250 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -2018,9 +2018,10 @@ def test_signature_dtype(self): assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD assert sig.parameters["copy"].default is False - assert "metadata" in sig.parameters - assert sig.parameters["metadata"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD - assert sig.parameters["metadata"].default == {} + # the optional `metadata` parameter has no default, so `**kwargs` must be used + assert "kwargs" in sig.parameters + assert sig.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + assert sig.parameters["kwargs"].default is inspect.Parameter.empty def test_signature_dtype_newbyteorder(self): sig = inspect.signature(np.dtype.newbyteorder) From a0ce2fb88f8d631045bc568923a0a4c0a1b1414e Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Fri, 7 Nov 2025 00:31:42 -0800 Subject: [PATCH 0829/1718] MAINT: Migrate einsum.c.src to C++ (einsum.cpp) (#30142) Addresses one item from #29528 No C++ templates were needed as einsum.c.src was not templated but was still a c.src file for historical reason. This is a good starting point for future c++ migration work. What this PR does: - Renamed `einsum.c.src` to `einsum.cpp` - Updated references in genapi.py and meson.build - In `einsum.cpp`: - Added `extern "C"` C linkage where necessary - Added 4 explicit casts to maintain functionality --- numpy/_core/code_generators/genapi.py | 2 +- numpy/_core/meson.build | 2 +- .../src/multiarray/{einsum.c.src => einsum.cpp} | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) rename numpy/_core/src/multiarray/{einsum.c.src => einsum.cpp} (99%) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 6a370a7dc3cd..e97177e46153 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -62,7 +62,7 @@ def get_processor(): join('multiarray', 'descriptor.c'), join('multiarray', 'dlpack.c'), join('multiarray', 'dtypemeta.c'), - join('multiarray', 'einsum.c.src'), + join('multiarray', 'einsum.cpp'), join('multiarray', 'public_dtype_api.c'), join('multiarray', 'flagsobject.c'), join('multiarray', 'getset.c'), diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dc07586bcf8e..2ce11153b581 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1161,7 +1161,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/dragon4.c', 'src/multiarray/dtype_transfer.c', 'src/multiarray/dtype_traversal.c', - src_file.process('src/multiarray/einsum.c.src'), + 'src/multiarray/einsum.cpp', src_file.process('src/multiarray/einsum_sumprod.c.src'), 'src/multiarray/public_dtype_api.c', 'src/multiarray/flagsobject.c', diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.cpp similarity index 99% rename from numpy/_core/src/multiarray/einsum.c.src rename to numpy/_core/src/multiarray/einsum.cpp index 3733c436cb1b..f12cec7824d7 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.cpp @@ -20,14 +20,14 @@ #include //PyArray_AssignRawScalar #include - +extern "C" { #include "convert.h" #include "common.h" #include "ctors.h" #include "einsum_sumprod.h" #include "einsum_debug.h" - +} /* * Parses the subscripts for one operand into an output of 'ndim' @@ -40,7 +40,6 @@ * - subscripts="abbcbc", ndim=6 -> op_labels=[97, 98, -1, 99, -3, -2] * - subscripts="ab...bc", ndim=6 -> op_labels=[97, 98, 0, 0, -3, 99] */ - static int parse_operand_subscripts(char *subscripts, int length, int ndim, int iop, char *op_labels, @@ -131,13 +130,13 @@ parse_operand_subscripts(char *subscripts, int length, /* If it is a proper label, find any duplicates of it. */ if (label > 0) { /* Search for the next matching label. */ - char *next = memchr(op_labels + idim + 1, label, ndim - idim - 1); + char *next = (char*)memchr(op_labels + idim + 1, label, ndim - idim - 1); while (next != NULL) { /* The offset from next to op_labels[idim] (negative). */ *next = (char)((op_labels + idim) - next); /* Search for the next matching label. */ - next = memchr(next + 1, label, op_labels + ndim - 1 - next); + next = (char*)memchr(next + 1, label, op_labels + ndim - 1 - next); } } } @@ -322,7 +321,7 @@ get_single_op_view(PyArrayObject *op, char *labels, Py_TYPE(op), PyArray_DESCR(op), ndim_output, new_dims, new_strides, PyArray_DATA(op), PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0, - (PyObject *)op, (PyObject *)op, 0); + (PyObject *)op, (PyObject *)op, (_NPY_CREATION_FLAGS)0); if (*ret == NULL) { return -1; @@ -472,7 +471,7 @@ prepare_op_axes(int ndim, int iop, char *labels, int *axes, } /* It's a labeled dimension, find the matching one */ else { - char *match = memchr(labels, label, ndim); + char *match = (char*)memchr(labels, label, ndim); /* If the op doesn't have the label, broadcast it */ if (match == NULL) { axes[i] = -1; @@ -1112,6 +1111,7 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop, * the strides that are fixed for the whole loop. */ stride = NpyIter_GetInnerStrideArray(iter); + sop = get_sum_of_products_function(nop, NpyIter_GetDescrArray(iter)[0]->type_num, NpyIter_GetDescrArray(iter)[0]->elsize, From 2ec5d92ea05cc5daea8974dddb98c85738611c53 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 10:45:37 -0500 Subject: [PATCH 0830/1718] BUG: Fix handling by ``unique`` of signed zero in complex types. (#30125) --- benchmarks/benchmarks/bench_lib.py | 2 +- numpy/_core/src/multiarray/unique.cpp | 41 ++++++++++++++++++++------- numpy/lib/tests/test_arraysetops.py | 9 ++++++ 3 files changed, 40 insertions(+), 12 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 3f7988a2ab7f..33e2871fc727 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -125,7 +125,7 @@ class Unique(Benchmark): # sizes of the 1D arrays [200, int(2e5)], # percent of np.nan in arrays - [10., 90.], + [0.0, 10., 90.], # percent of unique values in arrays [0.2, 20.], # dtypes of the arrays diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 910c5f7c731e..77083c04c519 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -42,31 +43,46 @@ size_t hash_integer(const T *value, npy_bool equal_nan) { template size_t hash_complex(const T *value, npy_bool equal_nan) { - S value_real = real(*value); - S value_imag = imag(*value); - int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + std::complex z = *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); if (equal_nan && hasnan) { return 0; } // Now, equal_nan is false or neither of the values is not NaN. // So we don't need to worry about NaN here. - const unsigned char* value_bytes = reinterpret_cast(value); - size_t hash = npy_fnv1a(value_bytes, sizeof(T)); + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZERO); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZERO); + } + + size_t hash = npy_fnv1a(reinterpret_cast(&z), sizeof(z)); return hash; } size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan) { - npy_longdouble value_real = npy_creall(*value); - npy_longdouble value_imag = npy_cimagl(*value); - int hasnan = npy_isnan(value_real) || npy_isnan(value_imag); + std::complex z = + *reinterpret_cast *>(value); + int hasnan = npy_isnan(z.real()) || npy_isnan(z.imag()); if (equal_nan && hasnan) { return 0; } // Now, equal_nan is false or neither of the values is not NaN. // So we don't need to worry about NaN here. + + // Convert -0.0 to 0.0. + if (z.real() == 0.0) { + z.real(NPY_PZEROL); + } + if (z.imag() == 0.0) { + z.imag(NPY_PZEROL); + } + // Some floating-point complex dtypes (e.g., npy_complex256) include undefined or // unused bits in their binary representation // (see: https://github.com/numpy/numpy/blob/main/numpy/_core/src/npymath/npy_math_private.h#L254-L261). @@ -76,13 +92,14 @@ size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan #if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \ defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \ defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) + constexpr size_t SIZEOF_LDOUBLE_MAN = sizeof(ldouble_man_t); constexpr size_t SIZEOF_LDOUBLE_EXP = sizeof(ldouble_exp_t); constexpr size_t SIZEOF_LDOUBLE_SIGN = sizeof(ldouble_sign_t); constexpr size_t SIZEOF_BUFFER = 2 * (SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_MAN + SIZEOF_LDOUBLE_EXP + SIZEOF_LDOUBLE_SIGN); unsigned char buffer[SIZEOF_BUFFER]; - union IEEEl2bitsrep bits_real{value_real}, bits_imag{value_imag}; + union IEEEl2bitsrep bits_real{z.real()}, bits_imag{z.imag()}; size_t offset = 0; for (const IEEEl2bitsrep &bits: {bits_real, bits_imag}) { @@ -101,8 +118,10 @@ size_t hash_complex_clongdouble(const npy_clongdouble *value, npy_bool equal_nan offset += SIZEOF_LDOUBLE_SIGN; } #else - constexpr size_t SIZEOF_BUFFER = NPY_SIZEOF_CLONGDOUBLE; - const unsigned char* buffer = reinterpret_cast(value); + + const unsigned char* buffer = reinterpret_cast(&z); + constexpr size_t SIZEOF_BUFFER = sizeof(z); + #endif size_t hash = npy_fnv1a(buffer, SIZEOF_BUFFER); diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 9d5a4d693fa5..4e8d503427de 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1291,3 +1291,12 @@ def test_unique_axis_float_raises_typeerror(self): arr1d = np.array([np.nan, 0, 0, np.nan]) with pytest.raises(TypeError, match="integer argument expected"): np.unique(arr1d, axis=0.0, equal_nan=False) + + @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) + @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], + [-200, complex(-200, -0.0), -1], + [-25, 3, -5j, complex(-25, -0.0), 3j]]) + def test_unique_complex_signed_zeros(self, dt, values): + z = np.array(values, dtype=dt) + u = np.unique(z) + assert len(u) == len(values) - 1 From 15063ae71f5608529dbedd261279408903be5434 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 14:41:06 -0500 Subject: [PATCH 0831/1718] BUG: Fix check of PyMem_Calloc return value. (#30176) --- numpy/_core/src/umath/wrapping_array_method.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 9b3970561f3f..b340e72bdbfd 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -114,7 +114,7 @@ get_wrapping_auxdata(void) } else { res = PyMem_Calloc(1, sizeof(wrapping_auxdata)); - if (res < 0) { + if (res == NULL) { PyErr_NoMemory(); return NULL; } From f39abd4ffbca44f01eec353115ffd2d7ad296fb2 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 7 Nov 2025 14:50:59 -0500 Subject: [PATCH 0832/1718] ENH: Updates for the `spin bench` command. (#30175) * MAINT: spin: Use sys.executable to run a Python script in the bench function. * ENH: spin: Expose --factor as an option for 'bench' --- .spin/cmds.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 453faa77d932..ea62717e4f78 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -392,6 +392,11 @@ def lint(ctx, fix): '--quick', '-q', is_flag=True, default=False, help="Run each benchmark only once (timings won't be accurate)" ) +@click.option( + '--factor', '-f', default=1.05, + help="The factor above or below which a benchmark result is " + "considered reportable. This is passed on to the asv command." +) @click.argument( 'commits', metavar='', required=False, @@ -399,7 +404,7 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): """🏋 Run benchmarks. \b @@ -454,7 +459,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): meson._set_pythonpath(build_dir) p = spin.util.run( - ['python', '-c', 'import numpy as np; print(np.__version__)'], + [sys.executable, '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, output=False @@ -482,7 +487,7 @@ def bench(ctx, tests, compare, verbose, quick, commits, build_dir): ) cmd_compare = [ - 'asv', 'continuous', '--factor', '1.05', + 'asv', 'continuous', '--factor', str(factor), ] + bench_args + [commit_a, commit_b] _run_asv(cmd_compare) From 884aec9750f0e42d563792678b2454ea86218f69 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 9 Nov 2025 14:12:49 +0100 Subject: [PATCH 0833/1718] BUG: Allow np.percentile to operate on float16 data (#29105) * BUG: Allow np.percentile to operate on float16 data * add an extra regression test * add an extra regression test * remove unused default value * add release note * review comments: part1 * review comments: part 2 * review comments: part 3 --- doc/release/upcoming_changes/29105.change.rst | 1 + numpy/lib/_function_base_impl.py | 25 +++---- numpy/lib/tests/test_function_base.py | 66 ++++++++++++++++--- 3 files changed, 72 insertions(+), 20 deletions(-) create mode 100644 doc/release/upcoming_changes/29105.change.rst diff --git a/doc/release/upcoming_changes/29105.change.rst b/doc/release/upcoming_changes/29105.change.rst new file mode 100644 index 000000000000..b5d4a9838f30 --- /dev/null +++ b/doc/release/upcoming_changes/29105.change.rst @@ -0,0 +1 @@ +* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit floating point input data has been improved. \ No newline at end of file diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 44db48f7a00c..58ce2f63dfa2 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4219,9 +4219,7 @@ def percentile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float) - # by making the divisor have the dtype of the data array. - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4469,11 +4467,7 @@ def quantile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + q = np.asanyarray(q) if not _quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -4549,7 +4543,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, previous_indexes, method): +def _get_gamma(virtual_indexes, previous_indexes, method, dtype): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4570,7 +4564,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + return np.asanyarray(gamma, dtype=dtype) def _lerp(a, b, t, out=None): @@ -4788,7 +4782,16 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + if arr.dtype.kind in "iu": + gtype = None + elif arr.dtype.kind == "f": + # make sure the return value matches the input array type + gtype = arr.dtype + else: + gtype = virtual_indexes.dtype + + gamma = _get_gamma(virtual_indexes, previous_indexes, + method_props, gtype) result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) gamma = gamma.reshape(result_shape) result = _lerp(previous, diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 57228e363e71..1aaca3bae1d6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3277,6 +3277,16 @@ def test_period(self): assert_almost_equal(np.interp(x, xp, fp, period=360), y) +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + class TestPercentile: def test_basic(self): @@ -3870,15 +3880,38 @@ def test_nat_basic(self, dtype, pos): res = np.percentile(a, 30, axis=0) assert_array_equal(np.isnat(res), [False, True, False]) - -quantile_methods = [ - 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', - 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', - 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', - 'midpoint'] - - -methods_supporting_weights = ["inverted_cdf"] + @pytest.mark.parametrize("qtype", [np.float16, np.float32]) + @pytest.mark.parametrize("method", quantile_methods) + def test_percentile_gh_29003(self, qtype, method): + # test that with float16 or float32 input we do not get overflow + zero = qtype(0) + one = qtype(1) + a = np.zeros(65521, qtype) + a[:20_000] = one + z = np.percentile(a, 50, method=method) + assert z == zero + assert z.dtype == a.dtype + z = np.percentile(a, 99, method=method) + assert z == one + assert z.dtype == a.dtype + + def test_percentile_gh_29003_Fraction(self): + zero = Fraction(0) + one = Fraction(1) + a = np.array([zero] * 65521) + a[:20_000] = one + z = np.percentile(a, 50) + assert z == zero + z = np.percentile(a, Fraction(50)) + assert z == zero + assert np.array(z).dtype == a.dtype + + z = np.percentile(a, 99) + assert z == one + # test that with only Fraction input the return type is a Fraction + z = np.percentile(a, Fraction(99)) + assert z == one + assert np.array(z).dtype == a.dtype class TestQuantile: @@ -4244,6 +4277,21 @@ def test_closest_observation(self): assert_equal(4, np.quantile(arr[0:9], q, method=m)) assert_equal(5, np.quantile(arr, q, method=m)) + def test_quantile_gh_29003_Fraction(self): + r = np.quantile([1, 2], q=Fraction(1)) + assert r == Fraction(2) + assert isinstance(r, Fraction) + + r = np.quantile([1, 2], q=Fraction(.5)) + assert r == Fraction(3, 2) + assert isinstance(r, Fraction) + + def test_float16_gh_29003(self): + a = np.arange(50_001, dtype=np.float16) + q = .999 + value = np.quantile(a, q) + assert value == q * 50_000 + assert value.dtype == np.float16 class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, From b2bde8bc660463e0d7caa9276d4c49739ce615ab Mon Sep 17 00:00:00 2001 From: star1327p Date: Sun, 9 Nov 2025 12:19:12 -0800 Subject: [PATCH 0834/1718] DOC: Corrected grammatical issues in code comments --- doc/source/dev/depending_on_numpy.rst | 2 +- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/array_method.h | 2 +- numpy/_core/src/multiarray/compiled_base.c | 2 +- numpy/_core/src/multiarray/nditer_pywrap.c | 2 +- numpy/_core/src/multiarray/textreading/rows.c | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/distutils/pathccompiler.py | 2 +- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/mixins.py | 2 +- numpy/lib/tests/test_histograms.py | 2 +- numpy/polynomial/hermite.py | 6 +++--- numpy/polynomial/hermite_e.py | 6 +++--- numpy/random/_pcg64.pyx | 4 ++-- numpy/random/_philox.pyx | 2 +- 15 files changed, 20 insertions(+), 20 deletions(-) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index de809462654b..7583dc9af84a 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -92,7 +92,7 @@ If a package either uses the NumPy C-API directly or it uses some other tool that depends on it like Cython or Pythran, NumPy is a *build-time* dependency of the package. -By default, NumPy exposes a API that is backward compatible with the earliest +By default, NumPy exposes an API that is backward compatible with the earliest NumPy version that supports the oldest Python version currently supported by NumPy. For example, NumPy 1.25.0 supports Python 3.9 and above; and the earliest NumPy version to support Python 3.9 was 1.19. Therefore we guarantee diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 10dc83ac657f..578e7b1554f4 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -97,7 +97,7 @@ array_converter_new( Py_INCREF(item->DType); /* - * Check whether we were passed a an int/float/complex Python scalar. + * Check whether we were passed an int/float/complex Python scalar. * If not, set `descr` and clear pyscalar/scalar flags as needed. */ if (item->scalar_input && npy_mark_tmp_array_if_pyscalar( diff --git a/numpy/_core/src/multiarray/array_method.h b/numpy/_core/src/multiarray/array_method.h index bcf270899f13..303425e38274 100644 --- a/numpy/_core/src/multiarray/array_method.h +++ b/numpy/_core/src/multiarray/array_method.h @@ -69,7 +69,7 @@ typedef struct PyArrayMethodObject_tag { /* - * We will sometimes have to create a ArrayMethod and allow passing it around, + * We will sometimes have to create an ArrayMethod and allow passing it around, * similar to `instance.method` returning a bound method, e.g. a function like * `ufunc.resolve()` can return a bound object. * The current main purpose of the BoundArrayMethod is that it holds on to the diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 179c8e404322..e6a45554555f 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -450,7 +450,7 @@ _linear_search(const npy_double key, const npy_double *arr, const npy_intp len, /** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1]. * - * If an starting index guess is in-range, the array values around this + * If a starting index guess is in-range, the array values around this * index are first checked. This allows for repeated calls for well-ordered * keys (a very common case) to use the previous index as a very good guess. * diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index b68f7ad9708d..a0e1b09a584c 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -916,7 +916,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self), } if (!PyTuple_Check(item) && !PyList_Check(item)) { PyErr_SetString(PyExc_ValueError, - "Each item in axes must be a an integer tuple"); + "Each item in axes must be an integer tuple"); Py_DECREF(item); return NULL; } diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index e5a294ecb01d..2fa1fb948553 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -156,7 +156,7 @@ create_conv_funcs( * returned array can differ for strings. * @param homogeneous Whether the datatype of the array is not homogeneous, * i.e. not structured. In this case the number of columns has to be - * discovered an the returned array will be 2-dimensional rather than + * discovered and the returned array will be 2-dimensional rather than * 1-dimensional. * * @returns Returns the result as an array object or NULL on error. The result diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 611d5857fb15..98403826f562 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -209,7 +209,7 @@ def test_field_order_equality(self): 'formats': ['i4', 'f4'], 'offsets': [4, 0]}) assert_equal(x == y, False) - # This is an safe cast (not equiv) due to the different names: + # This is a safe cast (not equiv) due to the different names: assert np.can_cast(x, y, casting="safe") @pytest.mark.parametrize( diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py index 48051810ee21..1f879edf4d21 100644 --- a/numpy/distutils/pathccompiler.py +++ b/numpy/distutils/pathccompiler.py @@ -3,7 +3,7 @@ class PathScaleCCompiler(UnixCCompiler): """ - PathScale compiler compatible with an gcc built Python. + PathScale compiler compatible with a gcc built Python. """ compiler_type = 'pathcc' diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 58ce2f63dfa2..f37f69bcb5dd 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4731,7 +4731,7 @@ def _quantile( if weights is None: # --- Computation of indexes # Index where to find the value in the sorted array. - # Virtual because it is a floating point value, not an valid index. + # Virtual because it is a floating point value, not a valid index. # The nearest neighbours are used for interpolation try: method_props = _QuantileMethods[method] diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index 9a531c6b9022..cd02bf7f4a50 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -70,7 +70,7 @@ class NDArrayOperatorsMixin: but that should support arithmetic and numpy universal functions like arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. - As an trivial example, consider this implementation of an ``ArrayLike`` + As a trivial example, consider this implementation of an ``ArrayLike`` class that simply wraps a NumPy array and ensures that the result of any arithmetic operation is also an ``ArrayLike`` object: diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index fec8828d242e..cae11cfdcd65 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -136,7 +136,7 @@ def test_bool_conversion(self): a = np.array([1, 1, 0], dtype=np.uint8) int_hist, int_edges = np.histogram(a) - # Should raise an warning on booleans + # Should raise a warning on booleans # Ensure that the histograms are equivalent, need to suppress # the warnings to get the actual outputs with pytest.warns(RuntimeWarning, match='Converting input from .*'): diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index fc2ea9e4194a..c6007d19df7f 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -796,7 +796,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermval(x, c, tensor=True): """ - Evaluate an Hermite series at points x. + Evaluate a Hermite series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1439,7 +1439,7 @@ def hermcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Hermite basis polynomial. This provides + symmetric when `c` is a Hermite basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1693,7 +1693,7 @@ def hermweight(x): # class Hermite(ABCPolyBase): - """An Hermite series class. + """A Hermite series class. The Hermite class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 6225b9dd6b75..f5d82aa543b9 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -794,7 +794,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): def hermeval(x, c, tensor=True): """ - Evaluate an HermiteE series at points x. + Evaluate a HermiteE series at points x. If `c` is of length ``n + 1``, this function returns the value: @@ -1365,7 +1365,7 @@ def hermecompanion(c): Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an HermiteE basis polynomial. This provides + symmetric when `c` is a HermiteE basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. @@ -1595,7 +1595,7 @@ def hermeweight(x): # class HermiteE(ABCPolyBase): - """An HermiteE series class. + """A HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index fd2dd40db565..30a00a11aa1d 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -264,7 +264,7 @@ cdef class PCG64(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated @@ -498,7 +498,7 @@ cdef class PCG64DXSM(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index 75fc4081ee62..da47ad21e2de 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -315,7 +315,7 @@ cdef class Philox(BitGenerator): * The random values are simulated using a rejection-based method and so, on average, more than one value from the underlying - RNG is required to generate an single draw. + RNG is required to generate a single draw. * The number of bits required to generate a simulated value differs from the number of bits generated by the underlying RNG. For example, two 16-bit integer values can be simulated From 933fd548a2ba872b732be4682f9fa6962996071b Mon Sep 17 00:00:00 2001 From: Aniket <148300120+Aniketsy@users.noreply.github.com> Date: Mon, 10 Nov 2025 19:55:08 +0530 Subject: [PATCH 0835/1718] DOC: Add concrete Meson build example for NumPy C ufunc extension (#30008) Co-authored-by: Marten Henric van Kerkwijk Co-authored-by: Ralf Gommers --- doc/source/user/c-info.ufunc-tutorial.rst | 420 +++++++++------------- 1 file changed, 170 insertions(+), 250 deletions(-) diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 8d8b267a6bd1..09daa95b7875 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -74,9 +74,9 @@ For comparison and general edification of the reader we provide a simple implementation of a C extension of ``logit`` that uses no numpy. -To do this we need two files. The first is the C file which contains -the actual code, and the second is the ``setup.py`` file used to create -the module. +To do this we need three files. The first is the C file which contains +the actual code, and the others are two project files that describe +how to create the module. .. code-block:: c @@ -157,65 +157,91 @@ the module. return m; } -To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` -in the same folder. Then ``python setup.py build`` will build the module to -import, or ``python setup.py install`` will install the module to your -site-packages directory. +To create the module, one proceeds as one would for a Python package, creating +a ``pyproject.toml`` file, which defines a build back-end, and then another +file for that backend which describes how to compile the code. For the backend, +we recommend ``meson-python``, as we use it for numpy itself, but below we +also show how to use the older ``setuptools``. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for spammodule.c + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "spam" + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python"] + build-backend = "mesonpy" - from setuptools import setup, Extension - import numpy as np + .. code-block:: meson - module1 = Extension('spam', sources=['spammodule.c']) + project('spam', 'c') - setup(name='spam', version='1.0', ext_modules=[module1]) + py = import('python').find_installation() + sources = files('spammodule.c') -Once the spam module is imported into python, you can call logit + extension_module = py.extension_module( + 'spam', + sources, + install: true, + ) + + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "spam" + version = "0.1" + + [build-system] + requires = ["setuptools"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + + spammodule = Extension('spam', sources=['spammodule.c']) + + setup(name='spam', version='1.0', + ext_modules=[spammodule]) + +With either of the above, one can build and install the ``spam`` package with, + +.. code-block:: bash + + pip install . + +Once the ``spam`` module is imported into python, you can call logit via ``spam.logit``. Note that the function used above cannot be applied as-is to numpy arrays. To do so we must call :py:func:`numpy.vectorize` -on it. For example, if a python interpreter is opened in the file containing -the spam library or spam has been installed, one can perform the -following commands: - ->>> import numpy as np ->>> import spam ->>> spam.logit(0) --inf ->>> spam.logit(1) -inf ->>> spam.logit(0.5) -0.0 ->>> x = np.linspace(0,1,10) ->>> spam.logit(x) -TypeError: only length-1 arrays can be converted to Python scalars ->>> f = np.vectorize(spam.logit) ->>> f(x) -array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, - 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) +on it. For example:: + + >>> import numpy as np + >>> import spam + >>> spam.logit(0) + -inf + >>> spam.logit(1) + inf + >>> spam.logit(0.5) + 0.0 + >>> x = np.linspace(0,1,10) + >>> spam.logit(x) + TypeError: only length-1 arrays can be converted to Python scalars + >>> f = np.vectorize(spam.logit) + >>> f(x) + array([ -inf, -2.07944154, -1.25276297, -0.69314718, -0.22314355, + 0.22314355, 0.69314718, 1.25276297, 2.07944154, inf]) THE RESULTING LOGIT FUNCTION IS NOT FAST! ``numpy.vectorize`` simply loops over ``spam.logit``. The loop is done at the C level, but the numpy @@ -236,8 +262,7 @@ Example NumPy ufunc for one dtype For simplicity we give a ufunc for a single dtype, the ``'f8'`` ``double``. As in the previous section, we first give the ``.c`` file -and then the ``setup.py`` file used to create the module containing the -ufunc. +and then the files used to create a ``npufunc`` module containing the ufunc. The place in the code corresponding to the actual computations for the ufunc are marked with ``/* BEGIN main ufunc computation */`` and @@ -339,59 +364,77 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. The module -can also be placed into a local folder e.g. ``npufunc_directory`` below -using ``python setup.py build_ext --inplace``. +For the files needed to create the module, the main difference from our +previous example is that we now need to declare dependencies on numpy. - .. code-block:: python +.. tab-set:: - ''' - setup.py file for single_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. + .. tab-item:: meson - Calling - $python setup.py build_ext --inplace - will build the extension library in the npufunc_directory. + Sample ``pyproject.toml`` and ``meson.build``. - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so + .. code-block:: toml - Calling - $python setup.py install - will install the module in your site-packages file. + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' + [build-system] + requires = ["meson-python", "numpy"] + build-backend = "mesonpy" - from setuptools import setup, Extension - from numpy import get_include + .. code-block:: meson - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) + project('npufunc', 'c') - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + py = import('python').find_installation() + np_dep = dependency('numpy') + sources = files('single_type_logit.c') -After the above has been installed, it can be imported and used as follows. + extension_module = py.extension_module( + 'npufunc', + sources, + dependencies: [np_dep], + install: true, + ) ->>> import numpy as np ->>> import npufunc ->>> npufunc.logit(0.5) -np.float64(0.0) ->>> a = np.linspace(0,1,5) ->>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) + .. tab-item:: setuptools + + Sample ``pyproject.toml`` and ``setup.py``. + + .. code-block:: toml + + [project] + name = "npufunc" + dependencies = ["numpy"] + version = "0.1" + + [build-system] + requires = ["setuptools", "numpy"] + build-backend = "setuptools.build_meta" + + .. code-block:: python + + from setuptools import setup, Extension + from numpy import get_include + + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) + +After the above has been installed, it can be imported and used as follows:: + + >>> import numpy as np + >>> import npufunc + >>> npufunc.logit(0.5) + np.float64(0.0) + >>> a = np.linspace(0, 1, 5) + >>> npufunc.logit(a) + array([ -inf, -1.09861229, 0. , 1.09861229, inf]) .. _`sec:NumPy-many-loop`: @@ -402,10 +445,10 @@ Example NumPy ufunc with multiple dtypes .. index:: pair: ufunc; adding new -We finally give an example of a full ufunc, with inner loops for -half-floats, floats, doubles, and long doubles. As in the previous -sections we first give the ``.c`` file and then the corresponding -``setup.py`` file. +We now extend the above to a full ``logit`` ufunc, with inner loops for +floats, doubles, and long doubles. Here, we can use the same build files +as above, except we need to change the source file from ``single_type_logit.c`` +to ``multi_type_logit.c``. The places in the code corresponding to the actual computations for the ufunc are marked with ``/* BEGIN main ufunc computation */`` and @@ -419,7 +462,6 @@ is the primary thing that must be changed to create your own ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -514,39 +556,13 @@ is the primary thing that must be changed to create your own ufunc. } - static void half_float_logit(char **args, const npy_intp *dimensions, - const npy_intp *steps, void *data) - { - npy_intp i; - npy_intp n = dimensions[0]; - char *in = args[0], *out = args[1]; - npy_intp in_step = steps[0], out_step = steps[1]; - - float tmp; - - for (i = 0; i < n; i++) { - - /* BEGIN main ufunc computation */ - tmp = npy_half_to_float(*(npy_half *)in); - tmp /= 1 - tmp; - tmp = logf(tmp); - *((npy_half *)out) = npy_float_to_half(tmp); - /* END main ufunc computation */ - - in += in_step; - out += out_step; - } - } - /*This gives pointers to the above functions*/ - PyUFuncGenericFunction funcs[4] = {&half_float_logit, - &float_logit, + PyUFuncGenericFunction funcs[3] = {&float_logit, &double_logit, &long_double_logit}; - static const char types[8] = {NPY_HALF, NPY_HALF, - NPY_FLOAT, NPY_FLOAT, + static const char types[6] = {NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE}; @@ -586,92 +602,40 @@ is the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py`` file for the above code. As before, the module -can be build via calling ``python setup.py build`` at the command prompt, -or installed to site-packages via ``python setup.py install``. - - .. code-block:: python - - ''' - setup.py file for multi_type_logit.c - Note that since this is a numpy extension - we add an include_dirs=[get_include()] so that the - extension is built with numpy's C/C++ header files. - Furthermore, we also have to include the npymath - lib for half-float d-type. - - Calling - $python setup.py build_ext --inplace - will build the extension library in the current file. - - Calling - $python setup.py build - will build a file that looks like ./build/lib*, where - lib* is a file that begins with lib. The library will - be in this file and end with a C library extension, - such as .so - - Calling - $python setup.py install - will install the module in your site-packages file. - - See the setuptools section 'Building Extension Modules' - at setuptools.pypa.io for more information. - ''' - - from setuptools import setup, Extension - from numpy import get_include - from os import path - - path_to_npymath = path.join(get_include(), '..', 'lib') - npufunc = Extension('npufunc', - sources=['multi_type_logit.c'], - include_dirs=[get_include()], - # Necessary for the half-float d-type. - library_dirs=[path_to_npymath], - libraries=["npymath"]) - - setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - - After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0,1,5) +>>> a = np.linspace(0, 1, 5, dtype="f4") >>> npufunc.logit(a) -array([ -inf, -1.09861229, 0. , 1.09861229, inf]) +:1: RuntimeWarning: divide by zero encountered in logit +array([ -inf, -1.0986123, 0. , 1.0986123, inf], + dtype=float32) +.. note:: + Supporting ``float16`` (half-precision) in custom ufuncs is more complex + due to its non-standard C representation and conversion requirements. The + above code can process ``float16`` input, but will do so by converting it + to ``float32``. The result will then be ``float32`` too, but one can + convert it back to ``float16`` by passing in a suitable output, as in + ``npufunc.logit(a, out=np.empty_like(a))``. For examples of actual + ``float16`` loops, see the numpy source code. .. _`sec:NumPy-many-arg`: Example NumPy ufunc with multiple arguments/return values ========================================================= -Our final example is a ufunc with multiple arguments. It is a modification -of the code for a logit ufunc for data with a single dtype. We -compute ``(A * B, logit(A * B))``. - -We only give the C code as the setup.py file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) +Creating a ufunc with multiple arguments is not difficult. Here, we make a +modification of the code for a logit ufunc, where we compute ``(A * B, +logit(A * B))``. For simplicity, we only create a loop for doubles. -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['multi_arg_logit.c'], - include_dirs=[get_include()]) +We again only give the C code as the files needed to create the module are the +same as before, but with the source file name replaced by +``multi_arg_logit.c``. The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -684,7 +648,6 @@ as well as all other properties of a ufunc. #include #include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" - #include "numpy/halffloat.h" #include /* @@ -786,29 +749,12 @@ Example NumPy ufunc with structured array dtype arguments This example shows how to create a ufunc for a structured array dtype. For the example we show a trivial ufunc for adding two arrays with dtype ``'u8,u8,u8'``. The process is a bit different from the other examples since -a call to :c:func:`PyUFunc_FromFuncAndData` doesn't fully register ufuncs for +a call to :c:func:`PyUFunc_FromFuncAndData` cannot register ufuncs for custom dtypes and structured array dtypes. We need to also call :c:func:`PyUFunc_RegisterLoopForDescr` to finish setting up the ufunc. -We only give the C code as the ``setup.py`` file is exactly the same as -the ``setup.py`` file in `Example NumPy ufunc for one dtype`_, except that -the line - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['single_type_logit.c'], - include_dirs=[get_include()]) - -is replaced with - - .. code-block:: python - - npufunc = Extension('npufunc', - sources=['add_triplet.c'], - include_dirs=[get_include()]) - -The C file is given below. +We only give the C code as the files needed to construct the module are again +exactly the same as before, except that the source file is now ``add_triplet.c``. .. code-block:: c @@ -867,7 +813,7 @@ The C file is given below. static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, - "struct_ufunc_test", + "npufunc", NULL, -1, StructUfuncTestMethods, @@ -907,7 +853,7 @@ The C file is given below. dtypes[2] = dtype; /* Register ufunc for structured dtype */ - PyUFunc_RegisterLoopForDescr(add_triplet, + PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet, dtype, &add_uint64_triplet, dtypes, @@ -920,37 +866,11 @@ The C file is given below. return m; } -.. index:: - pair: ufunc; adding new - -The returned ufunc object is a callable Python object. It should be -placed in a (module) dictionary under the same name as was used in the -name argument to the ufunc-creation routine. The following example is -adapted from the umath module - - .. code-block:: c +Sample usage:: - static PyUFuncGenericFunction atan2_functions[] = { - PyUFunc_ff_f, PyUFunc_dd_d, - PyUFunc_gg_g, PyUFunc_OO_O_method}; - static void *atan2_data[] = { - (void *)atan2f, (void *)atan2, - (void *)atan2l, (void *)"arctan2"}; - static const char atan2_signatures[] = { - NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, - NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, - NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE - NPY_OBJECT, NPY_OBJECT, NPY_OBJECT}; - ... - /* in the module initialization code */ - PyObject *f, *dict, *module; - ... - dict = PyModule_GetDict(module); - ... - f = PyUFunc_FromFuncAndData(atan2_functions, - atan2_data, atan2_signatures, 4, 2, 1, - PyUFunc_None, "arctan2", - "a safe and correct arctan(x1/x2)", 0); - PyDict_SetItemString(dict, "arctan2", f); - Py_DECREF(f); - ... + >>> import npufunc + >>> import numpy as np + >>> a = np.array([(1, 2, 3), (4, 5, 6)], "u8,u8,u8") + >>> npufunc.add_triplet(a, a) + array([(2, 4, 6), (8, 10, 12)], + dtype=[('f0', ' Date: Mon, 10 Nov 2025 18:30:27 +0100 Subject: [PATCH 0836/1718] MAINT: ``ma.asanyarray``: use ``order=None`` as default --- numpy/ma/core.py | 19 +++++++++---------- numpy/ma/core.pyi | 8 ++++---- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e56b0cfa573b..a374410209f5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8616,7 +8616,7 @@ def asarray(a, dtype=None, order=None): subok=False, order=order) -def asanyarray(a, dtype=None, order='A'): +def asanyarray(a, dtype=None, order=None): """ Convert the input to a masked array, conserving subclasses. @@ -8629,14 +8629,13 @@ def asanyarray(a, dtype=None, order='A'): Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. - order : {'C', 'F', 'A'}, optional - Specify the order of the array. If order is 'C', then the array - will be in C-contiguous order (last-index varies the fastest). - If order is 'F', then the returned array will be in - Fortran-contiguous order (first-index varies the fastest). - If order is 'A' (default), then the returned array may be - in any order (either C-, Fortran-contiguous, or even discontiguous), - unless a copy is required, in which case it will be C-contiguous. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. Returns ------- @@ -8670,7 +8669,7 @@ def asanyarray(a, dtype=None, order='A'): isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype) and ( - order == 'A' + order in {None, 'A', 'K'} or order == 'C' and a.flags.carray or order == 'F' and a.flags.f_contiguous ) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index bbcd617e9699..15e6b58762b4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2453,13 +2453,13 @@ def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderACF = "A") -> _MArrayT: ... +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderACF = "A") -> _MaskedArray[_ScalarT]: ... +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... # def is_masked(x: object) -> bool: ... From c0f3eb9fe699951b1c63c523d6a39843e873d2e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Nov 2025 17:53:01 +0000 Subject: [PATCH 0837/1718] MAINT: Bump int128/hide-comment-action from 1.46.0 to 1.47.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.46.0 to 1.47.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/bddc4e774ea6f0b45b9621c3a689db72b9a3cec5...3580fff2b9b7c0e16466686530622f0eed93132a) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.47.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 95e487cd60b4..9f4cda234717 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@bddc4e774ea6f0b45b9621c3a689db72b9a3cec5 # v1.46.0 + uses: int128/hide-comment-action@3580fff2b9b7c0e16466686530622f0eed93132a # v1.47.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 81e28edc622637769010fd11bf17ca32a031543f Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Tue, 16 Sep 2025 11:47:43 -0700 Subject: [PATCH 0838/1718] merge implementation --- numpy/_core/einsumfunc.py | 541 ++++++++++++++++++++++++-------------- 1 file changed, 344 insertions(+), 197 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 8e71e6d4b1eb..8a96f1444971 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -2,12 +2,14 @@ Implementation of optimized einsum. """ +import functools import itertools import operator -from numpy._core.multiarray import c_einsum -from numpy._core.numeric import asanyarray, tensordot +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply __all__ = ['einsum', 'einsum_path'] @@ -440,116 +442,6 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): return path -def _can_dot(inputs, result, idx_removed): - """ - Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. - - Parameters - ---------- - inputs : list of str - Specifies the subscripts for summation. - result : str - Resulting summation. - idx_removed : set - Indices that are removed in the summation - - - Returns - ------- - type : bool - Returns true if BLAS should and can be used, else False - - Notes - ----- - If the operations is BLAS level 1 or 2 and is not already aligned - we default back to einsum as the memory movement to copy is more - costly than the operation itself. - - - Examples - -------- - - # Standard GEMM operation - >>> _can_dot(['ij', 'jk'], 'ik', set('j')) - True - - # Can use the standard BLAS, but requires odd data movement - >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) - False - - # DDOT where the memory is not aligned - >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) - False - - """ - - # All `dot` calls remove indices - if len(idx_removed) == 0: - return False - - # BLAS can only handle two operands - if len(inputs) != 2: - return False - - input_left, input_right = inputs - - for c in set(input_left + input_right): - # can't deal with repeated indices on same input or more than 2 total - nl, nr = input_left.count(c), input_right.count(c) - if (nl > 1) or (nr > 1) or (nl + nr > 2): - return False - - # can't do implicit summation or dimension collapse e.g. - # "ab,bc->c" (implicitly sum over 'a') - # "ab,ca->ca" (take diagonal of 'a') - if nl + nr - 1 == int(c in result): - return False - - # Build a few temporaries - set_left = set(input_left) - set_right = set(input_right) - keep_left = set_left - idx_removed - keep_right = set_right - idx_removed - rs = len(idx_removed) - - # At this point we are a DOT, GEMV, or GEMM operation - - # Handle inner products - - # DDOT with aligned data - if input_left == input_right: - return True - - # DDOT without aligned data (better to use einsum) - if set_left == set_right: - return False - - # Handle the 4 possible (aligned) GEMV or GEMM cases - - # GEMM or GEMV no transpose - if input_left[-rs:] == input_right[:rs]: - return True - - # GEMM or GEMV transpose both - if input_left[:rs] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose right - if input_left[-rs:] == input_right[-rs:]: - return True - - # GEMM or GEMV transpose left - if input_left[:rs] == input_right[:rs]: - return True - - # Einsum is faster than GEMV if we have to copy data - if not keep_left or not keep_right: - return False - - # We are a matrix-matrix product, but we need to copy data - return True - - def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -887,13 +779,14 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build a few useful list and sets input_list = input_subscripts.split(',') + num_inputs = len(input_list) input_sets = [set(x) for x in input_list] output_set = set(output_subscript) indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) # Get length of each unique dimension and ensure all dimensions are correct dimension_dict = {} - broadcast_indices = [[] for x in range(len(input_list))] for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): @@ -903,10 +796,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for cnum, char in enumerate(term): dim = sh[cnum] - # Build out broadcast indices - if dim == 1: - broadcast_indices[tnum].append(char) - if char in dimension_dict.keys(): # For broadcasting cases we always want the largest dim size if dimension_dict[char] == 1: @@ -918,9 +807,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: dimension_dict[char] = dim - # Convert broadcast inds to sets - broadcast_indices = [set(x) for x in broadcast_indices] - # Compute size of each input array plus the output array size_list = [_compute_size_by_dict(term, dimension_dict) for term in input_list + [output_subscript]] @@ -931,23 +817,16 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): else: memory_arg = memory_limit - # Compute naive cost - # This isn't quite right, need to look into exactly how einsum does this - inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 - naive_cost = _flop_count( - indices, inner_product, len(input_list), dimension_dict - ) - # Compute the path if explicit_einsum_path: path = path_type[1:] elif ( (path_type is False) - or (len(input_list) in [1, 2]) + or (num_inputs in [1, 2]) or (indices == output_set) ): # Nothing to be optimized, leave it to einsum - path = [tuple(range(len(input_list)))] + path = [tuple(range(num_inputs))] elif path_type == "greedy": path = _greedy_path( input_sets, output_set, dimension_dict, memory_arg @@ -969,26 +848,18 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract - cost = _flop_count( - idx_contract, idx_removed, len(contract_inds), dimension_dict - ) - cost_list.append(cost) - scale_list.append(len(idx_contract)) - size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) - bcast = set() tmp_inputs = [] for x in contract_inds: tmp_inputs.append(input_list.pop(x)) - bcast |= broadcast_indices.pop(x) - - new_bcast_inds = bcast - idx_removed - - # If we're broadcasting, nix blas - if not len(idx_removed & bcast): - do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) - else: - do_blas = False # Last contraction if (cnum - len(path)) == -1: @@ -998,16 +869,11 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): idx_result = "".join([x[1] for x in sorted(sort_result)]) input_list.append(idx_result) - broadcast_indices.append(new_bcast_inds) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = ( - contract_inds, idx_removed, einsum_str, input_list[:], do_blas - ) + contraction = (contract_inds, einsum_str, input_list[:]) contraction_list.append(contraction) - opt_cost = sum(cost_list) + 1 - if len(input_list) != 1: # Explicit "einsum_path" is usually trusted, but we detect this kind of # mistake in order to prevent from returning an intermediate value. @@ -1022,11 +888,21 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): overall_contraction = input_subscripts + "->" + output_subscript header = ("scaling", "current", "remaining") + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 speedup = naive_cost / opt_cost max_i = max(size_list) path_print = f" Complete contraction: {overall_contraction}\n" - path_print += f" Naive scaling: {len(indices)}\n" + path_print += f" Naive scaling: {num_indices}\n" path_print += " Optimized scaling: %d\n" % max(scale_list) path_print += f" Naive FLOP count: {naive_cost:.3e}\n" path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" @@ -1037,7 +913,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -1046,6 +922,312 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): return (path, path_print) +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + sizes = {} + singletons = set() + + # parse left term + seen = set() + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + + # set or check size + if sizes.setdefault(ix, d) != d: + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + + if ix in seen: + continue + seen.add(ix) + + if ix in b_term: + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + + # parse right term + seen.clear() + for ix, d in zip(b_term, shape_b): + if d == 1: + singletons.add(ix) + continue + # broadcast indices don't appear as singletons in output + singletons.discard(ix) + + # set or check size + if sizes.setdefault(ix, d) != d: + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + + if ix in seen: + continue + seen.add(ix) + + if ix not in a_term: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we want to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + perm_ab = tuple(out_produced.index(ix) for ix in out) + if perm_ab == tuple(range(len(perm_ab))): + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): # Arguably we dispatch on more arguments than we really should; see note in # _einsum_path_dispatcher for why. @@ -1434,58 +1616,23 @@ def einsum(*operands, out=None, optimize=False, **kwargs): operands, contraction_list = einsum_path(*operands, optimize=optimize, einsum_call=True) - # Handle order kwarg for output array, c_einsum allows mixed case - output_order = kwargs.pop('order', 'K') - if output_order.upper() == 'A': - if all(arr.flags.f_contiguous for arr in operands): - output_order = 'F' - else: - output_order = 'C' - # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining, blas = contraction + inds, einsum_str, _ = contraction tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) - # Call tensordot if still possible - if blas: - # Checks have already been handled - input_str, results_index = einsum_str.split('->') - input_left, input_right = input_str.split(',') - - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") - - # Find indices to contract over - left_pos, right_pos = [], [] - for s in sorted(idx_rm): - left_pos.append(input_left.find(s)) - right_pos.append(input_right.find(s)) - - # Contract! - new_view = tensordot( - *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) - ) - - # Build a new view if needed - if (tensor_result != results_index) or handle_out: - if handle_out: - kwargs["out"] = out - new_view = c_einsum( - tensor_result + '->' + results_index, new_view, **kwargs - ) + # If out was specified + if handle_out: + kwargs["out"] = out - # Call einsum + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) else: - # If out was specified - if handle_out: - kwargs["out"] = out - - # Do the contraction + # Call einsum new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) # Append new items and dereference what we can @@ -1495,4 +1642,4 @@ def einsum(*operands, out=None, optimize=False, **kwargs): if specified_out: return out else: - return asanyarray(operands[0], order=output_order) + return operands[0] From d2c1ccfcda6218f3a35263316377ceb73e7fa6e4 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Wed, 5 Nov 2025 14:25:01 -0800 Subject: [PATCH 0839/1718] add note and tweak parsing --- numpy/_core/einsumfunc.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 8a96f1444971..dd5c65bc8307 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -1107,10 +1107,11 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): else: new_shape_ab = None - # then we want to permute the matmul produced output: + # then we might need to permute the matmul produced output: out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) - perm_ab = tuple(out_produced.index(ix) for ix in out) - if perm_ab == tuple(range(len(perm_ab))): + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: perm_ab = None return ( @@ -1162,6 +1163,11 @@ def bmm_einsum(eq, a, b, out=None, **kwargs): Returns ------- array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. """ ( eq_a, From 5b67bc86871c37d172a49c41d4a1fee04d7fdb81 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:10:12 +0100 Subject: [PATCH 0840/1718] BUG: ``[u]longlong.bit_count`` runtime signature --- numpy/_core/_add_newdocs_scalars.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 7305f232510c..3f9eca5e47f3 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -358,7 +358,8 @@ def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None """)) for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', - 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): # Add negative examples for signed cases by checking typecode add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', f""" From db6bd44f1f8ec6f0322516d4f78865503b06e72c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:32:39 +0100 Subject: [PATCH 0841/1718] BUG: ``np.generic`` method runtime signatures --- numpy/_core/_add_newdocs.py | 992 ++++++++--------------- numpy/_core/tests/test_scalar_methods.py | 112 ++- 2 files changed, 388 insertions(+), 716 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 78c660f423f7..5869d645cdf2 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -9,6 +9,8 @@ """ +import textwrap + from numpy._core.function_base import add_newdoc from numpy._core.overrides import get_array_function_like_doc # noqa: F401 @@ -3140,18 +3142,6 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_namespace__', - """ - __array_namespace__($self, /, *, api_version=None) - -- - - a.__array_namespace__(*, api_version=None) - - For Array API compatibility. - - """)) - - add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', """ __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) @@ -3208,32 +3198,6 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', - """ - __copy__($self, /) - -- - - a.__copy__() - - Used if :func:`copy.copy` is called on an array. Returns a copy of the array. - - Equivalent to ``a.copy(order='K')``. - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', - """ - __deepcopy__($self, memo, /) - -- - - a.__deepcopy__(memo, /) - - Used if :func:`copy.deepcopy` is called on an array. - - """)) - - add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', """ __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) @@ -3308,119 +3272,243 @@ """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('all', +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', """ - all($self, /, axis=None, out=None, keepdims=False, *, where=True) + dot($self, other, /, out=None) -- - a.all(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) - - Returns True if all elements evaluate to True. + a.dot(other, /, out=None) - Refer to `numpy.all` for full documentation. + Refer to :func:`numpy.dot` for full documentation. See Also -------- - numpy.all : equivalent function + numpy.dot : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('any', +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', """ - any($self, /, axis=None, out=None, keepdims=False, *, where=True) + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) -- - a.any(axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) + a.argpartition(kth, axis=-1, kind='introselect', order=None) - Returns True if any of the elements of `a` evaluate to True. + Returns the indices that would partition this array. - Refer to `numpy.any` for full documentation. + Refer to `numpy.argpartition` for full documentation. See Also -------- - numpy.any : equivalent function + numpy.argpartition : equivalent function """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', """ - argmax($self, /, axis=None, out=None, *, keepdims=False) + partition($self, kth, /, axis=-1, kind='introselect', order=None) -- - a.argmax(axis=None, out=None, *, keepdims=False) + a.partition(kth, axis=-1, kind='introselect', order=None) - Return indices of the maximum values along the given axis. + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. - Refer to `numpy.argmax` for full documentation. + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. See Also -------- - numpy.argmax : equivalent function + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. + + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) """)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## + +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- + +{doc}""" + +def _array_method_doc(name: str, params: str, doc: str) -> None: """ - argmin($self, /, axis=None, out=None, *, keepdims=False) - -- + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. - a.argmin(axis=None, out=None, *, keepdims=False) + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. - Return indices of the minimum values along the given axis. + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ - Refer to `numpy.argmin` for detailed documentation. + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" - See Also - -------- - numpy.argmin : equivalent function + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) - """)) + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) -add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', +_array_method_doc('__array_namespace__', "*, api_version=None", """ - argsort($self, /, axis=-1, kind=None, order=None, *, stable=None) - -- + a.__array_namespace__(*, api_version=None) - a.argsort(axis=-1, kind=None, order=None, *, stable=None) + For Array API compatibility. + """) - Returns the indices that would sort this array. +_array_method_doc('__copy__', "", + """ + a.__copy__() - Refer to `numpy.argsort` for full documentation. + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. See Also -------- - numpy.argsort : equivalent function + numpy.all : equivalent function + """) - """)) +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + Returns True if any of the elements of `a` evaluate to True. -add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', - """ - argpartition($self, kth, /, axis=-1, kind='introselect', order=None) - -- + Refer to `numpy.any` for full documentation. - a.argpartition(kth, axis=-1, kind='introselect', order=None) + See Also + -------- + numpy.any : equivalent function + """) - Returns the indices that would partition this array. +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", + """ + a.argmax(axis=None, out=None, *, keepdims=False) - Refer to `numpy.argpartition` for full documentation. + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. See Also -------- - numpy.argpartition : equivalent function + numpy.argmax : equivalent function + """) - """)) +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + See Also + -------- + numpy.argmin : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", """ - astype($self, /, dtype, order='K', casting='unsafe', subok=True, copy=True) - -- + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + """) +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", + """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. @@ -3495,14 +3583,10 @@ >>> x[:2].astype(int, casting="same_value") array([1, 2]) - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', +_array_method_doc('byteswap', "inplace=False", """ - byteswap($self, /, inplace=False) - -- - a.byteswap(inplace=False) Swap the bytes of the array elements @@ -3552,15 +3636,10 @@ >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', +_array_method_doc('choose', "choices, out=None, mode='raise'", """ - choose($self, /, choices, out=None, mode='raise') - -- - a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. @@ -3570,16 +3649,11 @@ See Also -------- numpy.choose : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", """ - clip($self, /, min=None, max=None, out=None, **kwargs) - -- - - a.clip(min=np._NoValue, max=np._NoValue, out=None, **kwargs) + a.clip(min=, max=, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. @@ -3589,15 +3663,10 @@ See Also -------- numpy.clip : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', +_array_method_doc('compress', "condition, axis=None, out=None", """ - compress($self, /, condition, axis=None, out=None) - -- - a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. @@ -3607,15 +3676,10 @@ See Also -------- numpy.compress : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', +_array_method_doc('conj', "", """ - conj($self, /) - -- - a.conj() Complex-conjugate all elements. @@ -3625,15 +3689,10 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', +_array_method_doc('conjugate', "", """ - conjugate($self, /) - -- - a.conjugate() Return the complex conjugate, element-wise. @@ -3643,15 +3702,10 @@ See Also -------- numpy.conjugate : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', +_array_method_doc('copy', "order='C'", """ - copy($self, /, order='C') - -- - a.copy(order='C') Return a copy of the array. @@ -3719,15 +3773,10 @@ array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", """ - cumprod($self, /, axis=None, dtype=None, out=None) - -- - a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. @@ -3737,15 +3786,10 @@ See Also -------- numpy.cumprod : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", """ - cumsum($self, /, axis=None, dtype=None, out=None) - -- - a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. @@ -3755,15 +3799,10 @@ See Also -------- numpy.cumsum : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", """ - diagonal($self, /, offset=0, axis1=0, axis2=1) - -- - a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a @@ -3775,27 +3814,10 @@ See Also -------- numpy.diagonal : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', - """ - dot($self, other, /, out=None) - -- - - a.dot(other, /, out=None) - - Refer to :func:`numpy.dot` for full documentation. - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', +_array_method_doc('dump', "file", """ - dump($self, /, file) - -- - a.dump(file) Dump a pickle of the array to the specified file. @@ -3805,32 +3827,22 @@ ---------- file : str or Path A string naming the dump file. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', +_array_method_doc('dumps', "", """ - dumps($self, /) - -- - a.dumps() Returns the pickle of the array as a string. - pickle.loads will convert the string back to an array. + ``pickle.loads`` will convert the string back to an array. Parameters ---------- None + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', +_array_method_doc('fill', "value", """ - fill($self, /, value) - -- - a.fill(value) Fill the array with a scalar value. @@ -3869,15 +3881,10 @@ >>> a[...] = np.array(3) >>> a array([3, 3], dtype=object) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', +_array_method_doc('flatten', "order='C'", """ - flatten($self, /, order='C') - -- - a.flatten(order='C') Return a copy of the array collapsed into one dimension. @@ -3911,15 +3918,10 @@ array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', +_array_method_doc('getfield', "dtype, offset=0", """ - getfield($self, /, dtype, offset=0) - -- - a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. @@ -3957,15 +3959,10 @@ >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('item', +_array_method_doc('item', "*args", """ - item($self, /, *args) - -- - a.item(*args) Copy an element of an array to a standard Python scalar and return it. @@ -4026,16 +4023,13 @@ >>> a = np.array([np.int64(1)], dtype=object) >>> a.item() #return np.int64 np.int64(1) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('max', - """ - max($self, /, axis=None, out=None, **kwargs) - -- +_KWARGS_REDUCE = "keepdims=, initial=, where=" - a.max(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) Return the maximum along a given axis. @@ -4044,107 +4038,89 @@ See Also -------- numpy.amax : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', - """ - mean($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.mean(axis=None, dtype=None, out=None, keepdims=np._NoValue, *, where=np._NoValue) +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) - Returns the average of the array elements along given axis. + Return the minimum along a given axis. - Refer to `numpy.mean` for full documentation. + Refer to `numpy.amin` for full documentation. See Also -------- - numpy.mean : equivalent function + numpy.amin : equivalent function + """) - """)) +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + Return the product of the array elements over the given axis -add_newdoc('numpy._core.multiarray', 'ndarray', ('min', - """ - min($self, /, axis=None, out=None, **kwargs) - -- + Refer to `numpy.prod` for full documentation. - a.min(axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) + See Also + -------- + numpy.prod : equivalent function + """) - Return the minimum along a given axis. +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) - Refer to `numpy.amin` for full documentation. + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. See Also -------- - numpy.amin : equivalent function - - """)) - + numpy.sum : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", """ - nonzero($self, /) - -- + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) - a.nonzero() - - Return the indices of the elements that are non-zero. + Returns the average of the array elements along given axis. - Refer to `numpy.nonzero` for full documentation. + Refer to `numpy.mean` for full documentation. See Also -------- - numpy.nonzero : equivalent function - - """)) - + numpy.mean : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', +_array_method_doc('nonzero', "", """ - prod($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.prod(axis=None, dtype=None, out=None, keepdims=np._NoValue, - initial=np._NoValue, where=np._NoValue) + a.nonzero() - Return the product of the array elements over the given axis + Return the indices of the elements that are non-zero. - Refer to `numpy.prod` for full documentation. + Refer to `numpy.nonzero` for full documentation. See Also -------- - numpy.prod : equivalent function - - """)) - + numpy.nonzero : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('put', +_array_method_doc('put', "indices, values, /, mode='raise'", """ - put($self, indices, values, /, mode='raise') - -- - a.put(indices, values, mode='raise') - Set ``a.flat[n] = values[n]`` for all `n` in indices. + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', +_array_method_doc('ravel', "order='C'", """ - ravel($self, /, order='C') - -- - - a.ravel([order]) + a.ravel(order='C') Return a flattened array. @@ -4153,17 +4129,11 @@ See Also -------- numpy.ravel : equivalent function - ndarray.flat : a flat iterator on the array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', +_array_method_doc('repeat', "repeats, /, axis=None", """ - repeat($self, repeats, /, axis=None) - -- - a.repeat(repeats, axis=None) Repeat elements of an array. @@ -4173,16 +4143,12 @@ See Also -------- numpy.repeat : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', +_array_method_doc('reshape', "*shape, order='C', copy=None", """ - reshape($self, /, *shape, order='C', copy=None) - -- - a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -4196,18 +4162,13 @@ ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. - For example, ``a.reshape(10, 11)`` is equivalent to - ``a.reshape((10, 11))``. - - """)) - + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', +_array_method_doc('resize', "*new_shape, refcheck=True", """ - resize($self, /, *new_shape, refcheck=True) - -- - - a.resize(new_shape, refcheck=True) + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) Change shape and size of array in-place. @@ -4295,15 +4256,10 @@ array([[0]]) >>> c array([[0]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('round', +_array_method_doc('round', "decimals=0, out=None", """ - round($self, /, decimals=0, out=None) - -- - a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. @@ -4313,33 +4269,23 @@ See Also -------- numpy.around : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", """ - searchsorted($self, v, /, side='left', sorter=None) - -- - a.searchsorted(v, side='left', sorter=None) - Find indices where elements of v should be inserted in a to maintain order. + Find indices where elements of `v` should be inserted in `a` to maintain order. - For full documentation, see `numpy.searchsorted` + For full documentation, see `numpy.searchsorted`. See Also -------- numpy.searchsorted : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', +_array_method_doc('setfield', "val, /, dtype, offset=0", """ - setfield($self, val, /, dtype, offset=0) - -- - a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. @@ -4386,15 +4332,10 @@ array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', +_array_method_doc('setflags', "*, write=None, align=None, uic=None", """ - setflags($self, /, *, write=None, align=None, uic=None) - -- - a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, @@ -4466,15 +4407,10 @@ Traceback (most recent call last): File "", line 1, in ValueError: cannot set WRITEBACKIFCOPY flag to True + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", """ - sort($self, /, axis=-1, kind=None, order=None, *, stable=None) - -- - a.sort(axis=-1, kind=None, order=None, *, stable=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. @@ -4536,77 +4472,10 @@ >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '>> import numpy as np - >>> a = np.array([3, 4, 2, 1]) - >>> a.partition(3) - >>> a - array([2, 1, 3, 4]) # may vary - - >>> a.partition((1, 3)) - >>> a - array([1, 2, 3, 4]) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', +_array_method_doc('squeeze', "axis=None", """ - squeeze($self, /, axis=None) - -- - a.squeeze(axis=None) Remove axes of length one from `a`. @@ -4616,16 +4485,13 @@ See Also -------- numpy.squeeze : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('std', - """ - std($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) - -- +_KWARGS_STD = "*, keepdims=, where=, mean=" - a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) +_array_method_doc('std', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.std(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) Returns the standard deviation of the array elements along given axis. @@ -4634,33 +4500,23 @@ See Also -------- numpy.std : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', - """ - sum($self, /, axis=None, dtype=None, out=None, **kwargs) - -- - - a.sum(axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue) +_array_method_doc('var', "axis=None, dtype=None, out=None, ddof=0, **kwargs", + f""" + a.var(axis=None, dtype=None, out=None, ddof=0, {_KWARGS_STD}) - Return the sum of the array elements over the given axis. + Returns the variance of the array elements, along given axis. - Refer to `numpy.sum` for full documentation. + Refer to `numpy.var` for full documentation. See Also -------- - numpy.sum : equivalent function - - """)) - + numpy.var : equivalent function + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', +_array_method_doc('swapaxes', "axis1, axis2, /", """ - swapaxes($self, axis1, axis2, /) - -- - a.swapaxes(axis1, axis2, /) Return a view of the array with `axis1` and `axis2` interchanged. @@ -4670,15 +4526,10 @@ See Also -------- numpy.swapaxes : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('take', +_array_method_doc('take', "indices, /, axis=None, out=None, mode='raise'", """ - take($self, indices, /, axis=None, out=None, mode='raise') - -- - a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. @@ -4688,15 +4539,10 @@ See Also -------- numpy.take : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('to_device', +_array_method_doc('to_device', "device, /, *, stream=None", """ - to_device($self, device, /, *, stream=None) - -- - a.to_device(device, /, *, stream=None) For Array API compatibility. Since NumPy only supports CPU arrays, this @@ -4713,16 +4559,11 @@ ------- out : Self Returns the same array. + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', +_array_method_doc('tofile', "fid, /, sep='', format='%s'", """ - tofile($self, fid, /, sep='', format='%s') - -- - - a.tofile(fid, sep='', format='%s') + a.tofile(fid, /, sep='', format='%s') Write array to a file as text or binary (default). @@ -4756,22 +4597,17 @@ file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', +_array_method_doc('tolist', "", """ - tolist($self, /) - -- - a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via - the `~numpy.ndarray.item` function. + the `~numpy.ndarray.item` method. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. @@ -4825,14 +4661,10 @@ TypeError: iteration over a 0-d array >>> a.tolist() 1 - """)) - + """) -add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', +_array_method_doc('tobytes', "order='C'", """ - tobytes($self, /, order='C') - -- - a.tobytes(order='C') Construct Python bytes containing the raw data bytes in the array. @@ -4869,15 +4701,10 @@ True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", """ - trace($self, /, offset=0, axis1=0, axis2=1, dtype=None, out=None) - -- - a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. @@ -4887,15 +4714,10 @@ See Also -------- numpy.trace : equivalent function + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', +_array_method_doc('transpose', "*axes", """ - transpose($self, /, *axes) - -- - a.transpose(*axes) Returns a view of the array with axes transposed. @@ -4947,33 +4769,10 @@ array([1, 2, 3, 4]) >>> a.transpose() array([1, 2, 3, 4]) + """) - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('var', - """ - var($self, /, axis=None, dtype=None, out=None, ddof=0, **kwargs) - -- - - a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, where=np._NoValue, mean=np._NoValue) - - Returns the variance of the array elements, along given axis. - - Refer to `numpy.var` for full documentation. - - See Also - -------- - numpy.var : equivalent function - - """)) - - -add_newdoc('numpy._core.multiarray', 'ndarray', ('view', +_array_method_doc('view', "*args, **kwargs", """ - view($self, /, *args, **kwargs) - -- - a.view([dtype][, type]) New view of array with the same data. @@ -5089,8 +4888,7 @@ [[2312, 2826], [5396, 5910]]], dtype=int16) - - """)) + """) ############################################################################## @@ -7100,21 +6898,11 @@ # Attributes -def refer_to_array_attribute(attr, method=True): - docstring = """ - Scalar {} identical to the corresponding array attribute. - - Please see `ndarray.{}`. - """ - - return attr, docstring.format("method" if method else "attribute", attr) - +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('T', method=False)) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('base', method=False)) +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) add_newdoc('numpy._core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) @@ -7151,150 +6939,12 @@ def refer_to_array_attribute(attr, method=True): # Methods -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('all')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('any')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmax')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argmin')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('argsort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('astype')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('byteswap')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('choose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('clip')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('compress')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('conjugate')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('copy')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumprod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('cumsum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('diagonal')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dump')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('dumps')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('fill')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('flatten')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('getfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('item')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('max')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('mean')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('min')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('nonzero')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('prod')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('put')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('ravel')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('repeat')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('reshape')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('resize')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('round')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('searchsorted')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setfield')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('setflags')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sort')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('squeeze')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('std')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('sum')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('swapaxes')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('take')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tofile')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('tolist')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('trace')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('transpose')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('var')) - -add_newdoc('numpy._core.numerictypes', 'generic', - refer_to_array_attribute('view')) - add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', """ - __class_getitem__(item, /) + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) Return a parametrized wrapper around the `~numpy.number` type. diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 7f1d7d6d7bdb..b993a8f3df29 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -257,50 +257,72 @@ def test_array_wrap(scalar): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "sctype", - [ - *sctypes["int"], - *sctypes["uint"], - *sctypes["float"], - *sctypes["complex"], - *sctypes["others"], - np.datetime64, - np.timedelta64, - ], -) -def test_constructor_signatures(sctype: type[np.generic]) -> None: - try: - sig = inspect.signature(sctype) - except ValueError: - pytest.fail(f"missing signature: {sctype}") - - assert sig.parameters +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize( - "sctype", - [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], -) -def test_method_signatures_is_integer(sctype: type[np.integer | np.floating]) -> None: - try: - sig = inspect.signature(sctype.is_integer) - except ValueError: - pytest.fail(f"missing signature: {sctype.__name__}.is_integer") - - assert len(sig.parameters) == 1 - assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters -@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") -@pytest.mark.parametrize("sctype", sctypes["float"]) -def test_method_signatures_as_integer_ratio(sctype: type[np.floating]) -> None: - try: - sig = inspect.signature(sctype.as_integer_ratio) - except ValueError: - pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") - - assert len(sig.parameters) == 1 - assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray From 42d91bf1883196d59872002b8918d6460a22da79 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:33:20 +0100 Subject: [PATCH 0842/1718] TYP: ``np.generic`` method stubs updates --- numpy/__init__.pyi | 87 +++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 47 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b03f67b1e281..bd6c8addcb38 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3682,21 +3682,24 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def trace( # type: ignore[misc] self: Never, /, - offset: Never = ..., - axis1: Never = ..., - axis2: Never = ..., - dtype: Never = ..., - out: Never = ..., + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, ) -> Never: ... - def diagonal(self: Never, /, offset: Never = ..., axis1: Never = ..., axis2: Never = ...) -> Never: ... # type: ignore[misc] + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] - def sort(self: Never, /, axis: Never = ..., kind: Never = ..., order: Never = ...) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] - def setfield(self: Never, /, val: Never, dtype: Never, offset: Never = ...) -> None: ... # type: ignore[misc] - def searchsorted(self: Never, /, v: Never, side: Never = ..., sorter: Never = ...) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] # NOTE: this wont't raise, but won't do anything either - def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = False) -> None: ... + @overload + def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... # def byteswap(self, /, inplace: L[False] = False) -> Self: ... @@ -3705,84 +3708,74 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def astype( self, + /, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, ) -> _ScalarT: ... @overload def astype( self, + /, dtype: DTypeLike | None, - order: _OrderKACF = ..., - casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., - ) -> Any: ... + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view( - self, - dtype: _DTypeLike[_ScalarT], - type: type[NDArray[Any]] = ..., - ) -> _ScalarT: ... + def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... @overload - def view( - self, - dtype: DTypeLike, - type: type[NDArray[Any]] = ..., - ) -> Any: ... + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield( - self, - dtype: _DTypeLike[_ScalarT], - offset: SupportsIndex = ... - ) -> _ScalarT: ... + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> Any: ... + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @overload def take( self, indices: _IntLike_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> Self: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, out: None = None, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload def take( self, indices: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + /, + axis: SupportsIndex | None = None, *, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... @overload def take( self, indices: _ArrayLikeInt_co, + /, axis: SupportsIndex | None, out: _ArrayT, - mode: _ModeKind = ..., + mode: _ModeKind = "raise", ) -> _ArrayT: ... def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... From 45454c634628be64412035a89d10746a91ba199e Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 11 Nov 2025 01:33:53 +0100 Subject: [PATCH 0843/1718] TYP: update ``_core._add_newdocs`` stubs --- numpy/_core/_add_newdocs.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi index b23c3b1adedd..2d004814fdcf 100644 --- a/numpy/_core/_add_newdocs.pyi +++ b/numpy/_core/_add_newdocs.pyi @@ -1,3 +1,2 @@ +from .function_base import add_newdoc as add_newdoc from .overrides import get_array_function_like_doc as get_array_function_like_doc - -def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... From a2d99b745e397e9e3ea0cf8a4033b7290bb3be3b Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 26 Sep 2025 20:24:43 -0400 Subject: [PATCH 0844/1718] ENH: Add fast path in ufuncs for numerical scalars. This adds a fast check for numerical scalars, improving the speed by about a factor of 6, with essentially no cost for arrays. At the moment, just for single-input, single-output ufuncs, with the assumption that the output has the same type as the input (if that fails, the regular path is taken). --- .../upcoming_changes/29819.improvement.rst | 6 + numpy/_core/src/umath/ufunc_object.c | 148 +++++++++++++++++- 2 files changed, 149 insertions(+), 5 deletions(-) create mode 100644 doc/release/upcoming_changes/29819.improvement.rst diff --git a/doc/release/upcoming_changes/29819.improvement.rst b/doc/release/upcoming_changes/29819.improvement.rst new file mode 100644 index 000000000000..fa4ac07f2a08 --- /dev/null +++ b/doc/release/upcoming_changes/29819.improvement.rst @@ -0,0 +1,6 @@ +Performance increase for scalar calculations +-------------------------------------------- +The speed of calculations on scalars has been improved by about a factor 6 for +ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed +difference from their ``math`` equivalents from a factor 19 to 3 (the speed +for arrays is left unchanged). diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 0bdc14e04cde..a9f00f351459 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -66,6 +66,7 @@ #include "npy_static_data.h" #include "multiarraymodule.h" #include "number.h" +#include "scalartypes.h" // for is_anyscalar_exact and scalar_value /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -3721,7 +3722,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, if (ret == NULL) { goto fail; } - + Py_XDECREF(out); Py_DECREF(signature[0]); @@ -4270,6 +4271,135 @@ replace_with_wrapped_result_and_return(PyUFuncObject *ufunc, return NULL; } +/* + * Check whether the input object is a known scalar and whether the ufunc has + * a suitable inner loop for it, which takes and returns the data type of the + * input (this function is not called if output or any other argument was given). + * If a loop was found, call it and store the result. + * + * Returns -2 if a short-cut is not possible, 0 on success and -1 on error. + */ +static int +try_trivial_scalar_call( + PyUFuncObject *ufunc, PyObject *const obj, PyObject **result) +{ + assert(ufunc->nin == 1 && ufunc->nout == 1 && !ufunc->core_enabled); + npy_clongdouble cin, cout; // aligned storage, using longest type. + char *in = (char *)&cin, *out = (char *)&cout; + char *data[] = {in, out}; + int ret = -2; + PyArray_Descr *dt; + /* + * For supported input, get input pointer and descriptor. Otherwise, bail. + */ + if (obj == Py_False || obj == Py_True) { + *(npy_bool *)in = (obj == Py_True); + dt = PyArray_DescrFromType(NPY_BOOL); + } + else if (PyFloat_CheckExact(obj)) { + *(double *)in = PyFloat_AS_DOUBLE(obj); + dt = PyArray_DescrFromType(NPY_FLOAT64); + } + else if (PyLong_CheckExact(obj)) { + int overflow; + npy_intp val = PyLong_AsLongAndOverflow(obj, &overflow); + if (overflow) { + return -2; // bail, main code perhaps deals with this. + } + if (error_converting(val)) { + return -1; // should never happen; pass on it if does. + } + *(npy_intp *)in = val; + dt = PyArray_DescrFromType(NPY_INTP); + } + else if (PyComplex_CheckExact(obj)) { + Py_complex oop = PyComplex_AsCComplex(obj); + if (error_converting(oop.real)) { + return -1; // should never happen; pass on it if does. + } + *(double *)in = oop.real; + *(double *)(in+sizeof(double)) = oop.imag; + dt = PyArray_DescrFromType(NPY_COMPLEX128); + } + else if (is_anyscalar_exact(obj)) { + dt = PyArray_DescrFromScalar(obj); + if (!PyDataType_ISNUMBER(dt)) { + goto bail; + } + data[0] = scalar_value(obj, dt); + } + else { + return -2; + } + /* + * Check the ufunc supports our descriptor, bailing (return -2) if not. + */ + // Try getting info from the (private) cache. Fall back if not found, + // so that the the dtype gets registered and things will work next time. + PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; + PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + if (info == NULL) { + goto bail; + } + // Check actual dtype is correct (can be wrong with promotion). + PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if ((PyTuple_GET_ITEM(all_dtypes, 0) != (PyObject *)NPY_DTYPE(dt)) || + (PyTuple_GET_ITEM(all_dtypes, 1) != (PyObject *)NPY_DTYPE(dt))) { + goto bail; + } + // Get method, bailing if not an arraymethod (e.g., a promotor). + PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + if (!PyObject_TypeCheck(method, &PyArrayMethod_Type)) { + goto bail; + } + // Get loop, requiring that the output and input dtype are the same. + PyArrayMethod_Context context; + PyArray_Descr *descrs[2] = {dt, dt}; + NPY_context_init(&context, descrs); + context.caller = (PyObject *)ufunc; + context.method = method; + npy_intp strides[2] = {0, 0}; // 0 ensures scalar math, not SIMD for half. + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (method->get_strided_loop(&context, 1, 0, strides, + &strided_loop, &auxdata, &flags) < 0) { + ret = -1; // Should not happen, so raise error if it does anyway. + goto bail; + } + /* + * Call loop with single element, checking floating point errors. + */ + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + npy_clear_floatstatus(); + } + npy_intp n = 1; + ret = strided_loop(&context, data, &n, strides, auxdata); + NPY_AUXDATA_FREE(auxdata); + if (ret == 0) { + if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { + // Check for any unmasked floating point errors (note: faster + // than _check_ufunc_fperr as one doesn't need mask up front). + int fpe_errors = npy_get_floatstatus(); + if (fpe_errors) { + if (PyUFunc_GiveFloatingpointErrors( + ufunc_get_name_cstr(ufunc), fpe_errors) < 0) { + ret = -1; // Real error, falling back would not help. + goto bail; + } + } + } + *result = PyArray_Scalar(out, dt, NULL); + if (*result == NULL) { + ret = -1; // Real error (should never happen). + } + } + bail: + Py_DECREF(dt); + return ret; +} /* * Main ufunc call implementation. @@ -4288,6 +4418,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, int errval; int nin = ufunc->nin, nout = ufunc->nout, nop = ufunc->nargs; + if (len_args == 1 && kwnames == NULL && !PyArray_Check(args[0]) + && nin == 1 && nout == 1 && !ufunc->core_enabled) { + // Possibly scalar input, try the fast path, falling back on failure. + PyObject *result = NULL; + if (try_trivial_scalar_call(ufunc, args[0], &result) != -2) { + return result; + } + } /* All following variables are cleared in the `fail` error path */ ufunc_full_args full_args = {NULL, NULL}; PyArrayObject *wheremask = NULL; @@ -4301,7 +4439,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, return NULL; } memset(scratch_objs, 0, sizeof(void *) * (nop * 4 + 2)); - + PyArray_DTypeMeta **signature = (PyArray_DTypeMeta **)scratch_objs; PyArrayObject **operands = (PyArrayObject **)(signature + nop); PyArray_DTypeMeta **operand_DTypes = (PyArray_DTypeMeta **)(operands + nop + 1); @@ -4366,17 +4504,17 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Extra positional args but no keywords */ /* DEPRECATED NumPy 2.4, 2025-08 */ if ((PyObject *)ufunc == n_ops.maximum || (PyObject *)ufunc == n_ops.minimum) { - + if (DEPRECATE( "Passing more than 2 positional arguments to np.maximum and np.minimum " - "is deprecated. If you meant to use the third argument as an output, " + "is deprecated. If you meant to use the third argument as an output, " "use the `out` keyword argument instead. If you hoped to work with " "more than 2 inputs, combine them into a single array and get the extrema " "for the relevant axis.") < 0) { return NULL; } } - + if (all_none) { Py_SETREF(full_args.out, NULL); } From c86f0420333c1cc97ec000944f45021027ca58aa Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Tue, 11 Nov 2025 13:41:16 +0100 Subject: [PATCH 0845/1718] ENH,MAINT: rewrite np.fix to use np.trunc internally (#30197) * DEP: rewrite np.fix to use np.trunc internally * TST: add trunc to object for ufunclike tests --- numpy/lib/_ufunclike_impl.py | 10 +--------- numpy/typing/tests/data/pass/ufunclike.py | 3 +++ 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 79cec5aa08b6..569840697d81 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -56,15 +56,7 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ - # promote back to an array if flattened - res = nx.ceil(x, out=... if out is None else out) - res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) - - # when no out argument is passed and no subclasses are involved, flatten - # scalars - if out is None and type(res) is nx.ndarray: - res = res[()] - return res + return nx.trunc(x, out=out) @array_function_dispatch(_dispatcher, verify=False, module='numpy') diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index c02a68cc062c..f69ec8eaf4ed 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -12,6 +12,9 @@ def __ceil__(self) -> Object: def __floor__(self) -> Object: return self + def __trunc__(self) -> Object: + return self + def __ge__(self, value: object) -> bool: return True From 3c5121a299045ef5242c9ea984338553859daf4a Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Tue, 11 Nov 2025 15:29:17 +0100 Subject: [PATCH 0846/1718] DOC: update result_type docs to link to promotion rules --- numpy/_core/multiarray.py | 36 ++---------------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 5599494720b6..4393f096fa9d 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -701,19 +701,8 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments. - - Type promotion in NumPy works similarly to the rules in languages - like C++, with some slight differences. When both scalars and - arrays are used, the array's type takes precedence and the actual value - of the scalar is taken into account. - - For example, calculating 3*a, where a is an array of 32-bit floats, - intuitively should result in a 32-bit float output. If the 3 is a - 32-bit integer, the NumPy rules indicate it can't convert losslessly - into a 32-bit float, so a 64-bit float should be the result type. - By examining the value of the constant, '3', we see that it fits in - an 8-bit integer, which can be cast losslessly into the 32-bit float. + type promotion rules to the arguments + (`see also `_). Parameters ---------- @@ -729,27 +718,6 @@ def result_type(*arrays_and_dtypes): -------- dtype, promote_types, min_scalar_type, can_cast - Notes - ----- - The specific algorithm used is as follows. - - Categories are determined by first checking which of boolean, - integer (int/uint), or floating point (float/complex) the maximum - kind of all the arrays and the scalars are. - - If there are only scalars or the maximum category of the scalars - is higher than the maximum category of the arrays, - the data types are combined with :func:`promote_types` - to produce the return value. - - Otherwise, `min_scalar_type` is called on each scalar, and - the resulting data types are all combined with :func:`promote_types` - to produce the return value. - - The set of int values is not a subset of the uint values for types - with the same number of bits, something not reflected in - :func:`min_scalar_type`, but handled as a special case in `result_type`. - Examples -------- >>> import numpy as np From be14bb7574f7513646d073c72c53f73da60481c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:05:47 +0000 Subject: [PATCH 0847/1718] MAINT: Bump actions/dependency-review-action from 4.8.1 to 4.8.2 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.8.1 to 4.8.2. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/40c09b7dc99638e5ddb0bfd91c1673effc064d8a...3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.8.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 797918b2ceff..b2fd18051ddd 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From c46282261eabe3c61288d854e6a316ad64222a9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:05:55 +0000 Subject: [PATCH 0848/1718] MAINT: Bump astral-sh/setup-uv from 7.1.2 to 7.1.3 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.2 to 7.1.3. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41...5a7eac68fb9809dea845d802897dc5c723910fa3) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 5fbc4afba8e9..0e9e63d53d1a 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -58,7 +58,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 + - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index f5b2fbfbbbae..43e60fae80b9 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2 + - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 with: python-version: ${{ matrix.py }} activate-environment: true From 576e0a10f95e190577f73a760a61abfe7803d2e4 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 11 Nov 2025 18:50:01 +0100 Subject: [PATCH 0849/1718] DOC: Fix some broken refs and Typos. (#30196) The default role is set to autolink (which fallbacks to emphasis if not resolved) And I believe there is typo in ALIGNMENT vs ALIGNENT. I would suggest moving away from autolink for default role, and have it raise an error if not resolved. This would also make sure users use double backticks for code. [skip actions][skip azp][skip cirrus] --- doc/source/reference/c-api/array.rst | 2 +- doc/source/reference/c-api/types-and-structures.rst | 2 +- doc/source/user/c-info.beyond-basics.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 7a1bace2c63e..ebed18de2196 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -784,7 +784,7 @@ cannot not be accessed directly. Allows setting of the itemsize, this is *only* relevant for string/bytes datatypes as it is the current pattern to define one with a new size. -.. c:function:: npy_intp PyDataType_ALIGNENT(PyArray_Descr *descr) +.. c:function:: npy_intp PyDataType_ALIGNMENT(PyArray_Descr *descr) The alignment of the datatype. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 49704913037b..a039af130860 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -365,7 +365,7 @@ PyArrayDescr_Type and PyArray_Descr places an item of this type: ``offsetof(struct {char c; type v;}, v)`` - See `PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x + See :c:func:`PyDataType_ALIGNMENT` for a way to access this field in a NumPy 1.x compatible way. .. c:member:: PyObject *metadata diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 7bf793ae2e47..eadeafe51e8e 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,7 +268,7 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. -Note that this API is inherently thread-unsafe. See `thread_safety` for more +Note that this API is inherently thread-unsafe. See :ref:`thread_safety` for more details about thread safety in NumPy. From 330e2c17f868dd54837c726ac43b9d96a2cb2f2a Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Wed, 12 Nov 2025 12:07:01 +0100 Subject: [PATCH 0850/1718] DOC: simplify link in docstring for result_type [skip azp][skip actions][skip cirrus] --- numpy/_core/multiarray.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 4393f096fa9d..3b1eed0e82e4 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -701,8 +701,7 @@ def result_type(*arrays_and_dtypes): result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy - type promotion rules to the arguments - (`see also `_). + :ref:`type promotion ` rules to the arguments. Parameters ---------- From eba60dc3e61528953b2fa0a85d4d7ee009814797 Mon Sep 17 00:00:00 2001 From: Aadya Chinubhai <77720426+aadya940@users.noreply.github.com> Date: Wed, 12 Nov 2025 03:48:33 -0800 Subject: [PATCH 0851/1718] ENH: Reduce compute time for `tobytes` in non-contiguos paths (#30170) * Add optimal copy path for non-contiguos arrays in ToString C Impl. * Add tests for obytes new path * Add imports * fix comments * fix memory issues and add benchmarks * run ruff --fix * simplify function * minor fix * minor typos and move tests --- benchmarks/benchmarks/bench_core.py | 4 ++ numpy/_core/src/multiarray/convert.c | 82 +++++++++++++++++----------- numpy/_core/tests/test_multiarray.py | 13 ++++- 3 files changed, 67 insertions(+), 32 deletions(-) diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index a9a6c88b87a0..ea7aae007fdc 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -14,6 +14,7 @@ def setup(self): self.l_view = [memoryview(a) for a in self.l] self.l10x10 = np.ones((10, 10)) self.float64_dtype = np.dtype(np.float64) + self.arr = np.arange(10000).reshape(100, 100) def time_array_1(self): np.array(1) @@ -48,6 +49,9 @@ def time_array_l_view(self): def time_can_cast(self): np.can_cast(self.l10x10, self.float64_dtype) + def time_tobytes_noncontiguous(self): + self.arr.T.tobytes() + def time_can_cast_same_kind(self): np.can_cast(self.l10x10, self.float64_dtype, casting="same_kind") diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 983d9bc19ce6..ccd883f2b0f4 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -335,11 +335,7 @@ NPY_NO_EXPORT PyObject * PyArray_ToString(PyArrayObject *self, NPY_ORDER order) { npy_intp numbytes; - npy_intp i; - char *dptr; - int elsize; PyObject *ret; - PyArrayIterObject *it; if (order == NPY_ANYORDER) order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; @@ -354,41 +350,65 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) numbytes = PyArray_NBYTES(self); if ((PyArray_IS_C_CONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { - ret = PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); + return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - else { - PyObject *new; - if (order == NPY_FORTRANORDER) { - /* iterators are always in C-order */ - new = PyArray_Transpose(self, NULL); - if (new == NULL) { - return NULL; - } + + /* Avoid Ravel where possible for fewer copies. */ + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { + + /* Allocate final Bytes Object */ + ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); + if (ret == NULL) { + return NULL; } - else { - Py_INCREF(self); - new = (PyObject *)self; + + /* Writable Buffer */ + char* dest = PyBytes_AS_STRING(ret); + + int flags = NPY_ARRAY_WRITEABLE; + if (order == NPY_FORTRANORDER) { + flags |= NPY_ARRAY_F_CONTIGUOUS; } - it = (PyArrayIterObject *)PyArray_IterNew(new); - Py_DECREF(new); - if (it == NULL) { + + Py_INCREF(PyArray_DESCR(self)); + /* Array view */ + PyArrayObject *dest_array = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + NULL, // strides + dest, + flags, + NULL + ); + + if (dest_array == NULL) { + Py_DECREF(ret); return NULL; } - ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); - if (ret == NULL) { - Py_DECREF(it); + + /* Copy directly from source to destination with proper ordering */ + if (PyArray_CopyInto(dest_array, self) < 0) { + Py_DECREF(dest_array); + Py_DECREF(ret); return NULL; } - dptr = PyBytes_AS_STRING(ret); - i = it->size; - elsize = PyArray_ITEMSIZE(self); - while (i--) { - memcpy(dptr, it->dataptr, elsize); - dptr += elsize; - PyArray_ITER_NEXT(it); - } - Py_DECREF(it); + + Py_DECREF(dest_array); + return ret; + + } + + /* Non-contiguous, Has References and/or Init Path. */ + PyArrayObject *contig = (PyArrayObject *)PyArray_Ravel(self, order); + if (contig == NULL) { + return NULL; } + + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); + Py_DECREF(contig); return ret; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2193887cb43f..051a81d99d1f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -3835,6 +3835,18 @@ class ArraySubclass(np.ndarray): assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) + @pytest.mark.parametrize("shape", [(3, 224, 224), (8, 512, 512)]) + def test_tobytes_no_copy_fastpath(self, shape): + # Test correctness of non-contiguous paths for `tobytes` + rng = np.random.default_rng(0) + arr = rng.standard_normal(shape, dtype=np.float32) + noncontig = arr.transpose(1, 2, 0) + + # correctness + expected = np.ascontiguousarray(noncontig).tobytes() + got = noncontig.tobytes() + assert got == expected + def test_swapaxes(self): a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) @@ -10546,7 +10558,6 @@ def test_getfield(): pytest.raises(ValueError, a.getfield, 'uint8', 16) pytest.raises(ValueError, a.getfield, 'uint64', 0) - class TestViewDtype: """ Verify that making a view of a non-contiguous array works as expected. From 41c06bc51f5848925ad647bd8dfd1bfc7aee4f64 Mon Sep 17 00:00:00 2001 From: Veit Heller Date: Wed, 12 Nov 2025 12:51:00 +0100 Subject: [PATCH 0852/1718] ENH: Detect Fortran vs C order in array_assign_boolean_subscript (#30201) This PR fixes gh-30156 by detecting when to set KEEPORDER order in array_assign_boolean_subscript and adds a benchmark for it. --- benchmarks/benchmarks/bench_indexing.py | 16 ++++++++++++++++ numpy/_core/src/multiarray/mapping.c | 2 ++ 2 files changed, 18 insertions(+) diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 81c812e81e19..f1153489f515 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -84,6 +84,22 @@ def time_assign_cast(self, ndim): arr[indx] = val +class BooleanAssignmentOrder(Benchmark): + params = ['C', 'F'] + param_names = ['order'] + + def setup(self, order): + shape = (64, 64, 64) + # emulate gh-30156: boolean assignment into a Fortran/C array + self.base = np.zeros(shape, dtype=np.uint32, order=order) + mask = np.random.RandomState(0).rand(*self.base.shape) > 0.5 + self.mask = mask.copy(order) + self.value = np.uint32(7) + + def time_boolean_assign_scalar(self, order): + self.base[self.mask] = self.value + + class IndexingSeparate(Benchmark): def setup(self): self.tmp_dir = mkdtemp() diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 997f21928db3..d6128f74621a 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1173,6 +1173,8 @@ array_assign_boolean_subscript(PyArrayObject *self, } else { v_stride = 0; + /* If the same value is repeated, iteration order does not matter */ + order = NPY_KEEPORDER; } v_data = PyArray_DATA(v); From 011385f3a8d52a32bba62fcf2ac380e92083bf0f Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:19:24 -0400 Subject: [PATCH 0853/1718] MAINT: Avoid "fooling deallocator" in array_descr_set Found this caused problems while trying to implement storing the dimensions and strides on the instance. Splitting it out since it is generally bad form to assume too much about the inner workings of, e.g., the deallocator, and it has no real benefits beyond trying to be clever. --- numpy/_core/src/multiarray/getset.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 16876639897c..1aff38476d50 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -500,15 +500,23 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) if (temp == NULL) { return -1; } + /* create new dimensions cache and fill it */ + npy_intp new_nd = PyArray_NDIM(temp); + npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); + if (new_dims == NULL) { + Py_DECREF(temp); + PyErr_NoMemory(); + return -1; + } + memcpy(new_dims, PyArray_DIMS(temp), new_nd * sizeof(npy_intp)); + memcpy(new_dims + new_nd, PyArray_STRIDES(temp), new_nd * sizeof(npy_intp)); + /* Update self with new cache */ npy_free_cache_dim_array(self); - ((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp); - ((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp); - ((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp); + ((PyArrayObject_fields *)self)->nd = new_nd; + ((PyArrayObject_fields *)self)->dimensions = new_dims; + ((PyArrayObject_fields *)self)->strides = new_dims + new_nd; newtype = PyArray_DESCR(temp); - Py_INCREF(PyArray_DESCR(temp)); - /* Fool deallocator not to delete these*/ - ((PyArrayObject_fields *)temp)->nd = 0; - ((PyArrayObject_fields *)temp)->dimensions = NULL; + Py_INCREF(newtype); Py_DECREF(temp); } From 930b0c241b53f71458dd949e809c0c04af559cc8 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:53:26 -0400 Subject: [PATCH 0854/1718] MAINT,BUG: create PyArray_Resize_int, which returns int, and use it internally. Also fix the minimum allocation used inside, which should be 1, not elsize. And use the default function to deallocate dimension & strides. --- doc/source/reference/c-api/array.rst | 25 ++++++------ numpy/_core/src/multiarray/methods.c | 8 ++-- numpy/_core/src/multiarray/shape.c | 59 +++++++++++++++++----------- numpy/_core/src/multiarray/shape.h | 6 +++ 4 files changed, 56 insertions(+), 42 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index ebed18de2196..d07a00ebde73 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2248,19 +2248,18 @@ Shape Manipulation PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \ NPY_ORDER fortran) - Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, refcheck - ``=`` *refcheck*, order= fortran ). This function only works on - single-segment arrays. It changes the shape of *self* inplace and - will reallocate the memory for *self* if *newshape* has a - different total number of elements then the old shape. If - reallocation is necessary, then *self* must own its data, have - *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and - (unless refcheck is 0) not be referenced by any other array. - The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, - or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually - it could be used to determine how the resize operation should view - the data when constructing a differently-dimensioned array. - Returns None on success and NULL on error. + Equivalent to :meth:`ndarray.resize` (*self*, *newshape*, *refcheck*). + This function only works on single-segment arrays. It changes the shape of + *self* inplace and will reallocate the memory for *self* if *newshape* has + a different total number of elements then the old shape. If reallocation is + necessary, then *self* must own its data, have *self* - ``>base==NULL``, + have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be + referenced by any other array. The fortran argument can be + :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or + :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it + could be used to determine how the resize operation should view the data + when constructing a differently-dimensioned array. Returns None on success + and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 26b5a6e179e0..6dcc349dcd03 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1175,8 +1175,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_ssize_t size = PyTuple_Size(args); int refcheck = 1; PyArray_Dims newshape; - PyObject *ret, *obj; - + PyObject *obj; if (!NpyArg_ParseKeywords(kwds, "|i", kwlist, &refcheck)) { return NULL; @@ -1199,12 +1198,11 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) return NULL; } - ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER); + int ret = PyArray_Resize_int(self, &newshape, refcheck); npy_free_cache_dim_obj(newshape); - if (ret == NULL) { + if (ret < 0) { return NULL; } - Py_DECREF(ret); Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 83de9f19a574..c98dba9abcb2 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -26,15 +26,8 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -/*NUMPY_API - * Resize (reallocate data). Only works if nothing else is referencing this - * array and it is contiguous. If refcheck is 0, then the reference count is - * not checked and assumed to be 1. You still must own this data and have no - * weak-references and no base object. - */ -NPY_NO_EXPORT PyObject * -PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, - NPY_ORDER NPY_UNUSED(order)) +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) { npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; @@ -48,7 +41,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (!PyArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, "resize only works on single-segment arrays"); - return NULL; + return -1; } /* Compute total size of old and new arrays. The new size might overflow */ @@ -62,10 +55,11 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (new_dimensions[k] < 0) { PyErr_SetString(PyExc_ValueError, "negative dimensions not allowed"); - return NULL; + return -1; } if (npy_mul_sizes_with_overflow(&newsize, newsize, new_dimensions[k])) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } } @@ -73,14 +67,15 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, elsize = PyArray_ITEMSIZE(self); oldnbytes = oldsize * elsize; if (npy_mul_sizes_with_overflow(&newnbytes, newsize, elsize)) { - return PyErr_NoMemory(); + PyErr_NoMemory(); + return -1; } if (oldnbytes != newnbytes) { if (!(PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); - return NULL; + return -1; } if (PyArray_BASE(self) != NULL @@ -89,14 +84,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "cannot resize an array that " "references or is referenced\n" "by another array in this way. Use the np.resize function."); - return NULL; + return -1; } if (refcheck) { #ifdef PYPY_VERSION PyErr_SetString(PyExc_ValueError, "cannot resize an array with refcheck=True on PyPy.\n" "Use the np.resize function or refcheck=False"); - return NULL; + return -1; #else refcnt = Py_REFCNT(self); #endif /* PYPY_VERSION */ @@ -110,7 +105,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, "references or is referenced\n" "by another array in this way.\n" "Use the np.resize function or refcheck=False"); - return NULL; + return -1; } /* Reallocate space if needed - allocating 0 is forbidden */ @@ -119,24 +114,24 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ PyErr_SetString(PyExc_RuntimeError, "no memory handler found but OWNDATA flag set"); - return NULL; + return -1; } if (newnbytes < oldnbytes) { /* Clear now removed data (if dtype has references) */ if (PyArray_ClearBuffer( PyArray_DESCR(self), PyArray_BYTES(self) + newnbytes, elsize, oldsize-newsize, PyArray_ISALIGNED(self)) < 0) { - return NULL; + return -1; } } new_data = PyDataMem_UserRENEW(PyArray_DATA(self), - newnbytes == 0 ? elsize : newnbytes, + newnbytes == 0 ? 1 : newnbytes, handler); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->data = new_data; @@ -148,7 +143,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, int aligned = PyArray_ISALIGNED(self); if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, stride, size, aligned) < 0) { - return NULL; + return -1; } } } @@ -162,7 +157,7 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, if (dimptr == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); - return NULL; + return -1; } ((PyArrayObject_fields *)self)->dimensions = dimptr; ((PyArrayObject_fields *)self)->strides = dimptr + new_nd; @@ -175,11 +170,27 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp)); } else { - PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = 0; ((PyArrayObject_fields *)self)->dimensions = NULL; ((PyArrayObject_fields *)self)->strides = NULL; } + return 0; +} + +/*NUMPY_API + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ +NPY_NO_EXPORT PyObject * +PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, + NPY_ORDER NPY_UNUSED(order)) +{ + if (PyArray_Resize_int(self, newshape, refcheck) < 0) { + return NULL; + } Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index a9b91feb0b4a..5e87116f08df 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -3,6 +3,12 @@ #include "conversion_utils.h" +/* + * Internal version of PyArray_Resize that returns -1 on error, 0 otherwise. + */ +NPY_NO_EXPORT int +PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck); + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple From c70bf1e7f45d53dce81970ca565a7235a5b445db Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 10 Oct 2025 16:28:29 -0400 Subject: [PATCH 0855/1718] MAINT: Use PyArray_Resize to grow arrays in FromFile and FromIter Found this while trying to implement storage on the instance, but factored it out since it seems better form to just have one place in numpy where arrays are resized inplace (which ideally would not happen at all, but here is quite logical). For PyArray_FromFile, there is little benefit beyond that, but in PyArray_FromIter, it allows removing quite a bit of code that duplicated what PyArray_Resize does. --- numpy/_core/src/multiarray/ctors.c | 92 ++++++------------- numpy/_core/src/multiarray/textreading/rows.c | 27 ++---- 2 files changed, 36 insertions(+), 83 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 0a66b7fa3be1..3a43e1bd983b 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2390,7 +2390,7 @@ PyArray_FromInterface(PyObject *origin) goto fail; } if (use_scalar_assign) { - /* + /* * NOTE(seberg): I honestly doubt anyone is using this scalar path and we * could probably just deprecate (or just remove it in a 3.0 version). */ @@ -3732,21 +3732,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep) } if (((npy_intp) nread) < num) { /* - * Realloc memory for smaller number of elements, use original dtype - * which may have include a subarray (and is used for `nread`). + * Resize array to smaller number of elements. Note that original + * dtype may have included a subarray, so we may not be 1-d. */ - const size_t nsize = PyArray_MAX(nread,1) * dtype->elsize; - char *tmp; - - /* The handler is always valid */ - if((tmp = PyDataMem_UserRENEW(PyArray_DATA(ret), nsize, - PyArray_HANDLER(ret))) == NULL) { + npy_intp dims[NPY_MAXDIMS]; + dims[0] = (npy_intp)nread; + for (int i = 1; i < PyArray_NDIM(ret); i++) { + dims[i] = PyArray_DIMS(ret)[i]; + } + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(dtype); Py_DECREF(ret); - return PyErr_NoMemory(); + return NULL; } - ((PyArrayObject_fields *)ret)->data = tmp; - PyArray_DIMS(ret)[0] = nread; } Py_DECREF(dtype); return (PyObject *)ret; @@ -3998,6 +3997,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyObject *iter = NULL; PyArrayObject *ret = NULL; npy_intp i, elsize, elcount; + npy_intp dims[NPY_MAXDIMS]; if (dtype == NULL) { return NULL; @@ -4037,6 +4037,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (ret == NULL) { goto done; } + /* set up for possible resizing */ + memcpy(dims, PyArray_DIMS(ret), PyArray_NDIM(ret)*sizeof(npy_intp)); + PyArray_Dims new_dims = {dims, PyArray_NDIM(ret)}; char *item = PyArray_BYTES(ret); for (i = 0; i < count || count == -1; i++, item += elsize) { @@ -4044,14 +4047,12 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) if (value == NULL) { if (PyErr_Occurred()) { /* Fetching next item failed perhaps due to exhausting iterator */ - goto done; + goto fail; } break; } if (NPY_UNLIKELY(i >= elcount) && elsize != 0) { - char *new_data = NULL; - npy_intp nbytes; /* Grow PyArray_DATA(ret): this is similar for the strategy for PyListObject, but we use @@ -4060,31 +4061,18 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) be suitable to reuse here. */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (!npy_mul_sizes_with_overflow(&nbytes, elcount, elsize)) { - /* The handler is always valid */ - new_data = PyDataMem_UserRENEW( - PyArray_BYTES(ret), nbytes, PyArray_HANDLER(ret)); - } - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); + dims[0] = elcount; + if (PyArray_Resize_int(ret, &new_dims, 0) < 0) { Py_DECREF(value); - goto done; + goto fail; } - ((PyArrayObject_fields *)ret)->data = new_data; - /* resize array for cleanup: */ - PyArray_DIMS(ret)[0] = elcount; /* Reset `item` pointer to point into realloc'd chunk */ - item = new_data + i * elsize; - if (PyDataType_FLAGCHK(dtype, NPY_NEEDS_INIT)) { - /* Initialize new chunk: */ - memset(item, 0, nbytes - i * elsize); - } + item = ((char *)PyArray_DATA(ret)) + i * elsize; } if (PyArray_Pack(dtype, item, value) < 0) { Py_DECREF(value); - goto done; + goto fail; } Py_DECREF(value); } @@ -4093,46 +4081,22 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count) PyErr_Format(PyExc_ValueError, "iterator too short: Expected %zd but iterator had only %zd " "items.", (Py_ssize_t)count, (Py_ssize_t)i); - goto done; + goto fail; } /* * Realloc the data so that don't keep extra memory tied up and fix * the arrays first dimension (there could be more than one). */ - if (i == 0 || elsize == 0) { - /* The size cannot be zero for realloc. */ + dims[0] = i; + if (!PyArray_Resize_int(ret, &new_dims, 0)) { + goto done; } - else { - /* Resize array to actual final size (it may be too large) */ - /* The handler is always valid */ - char *new_data = PyDataMem_UserRENEW( - PyArray_DATA(ret), i * elsize, PyArray_HANDLER(ret)); - - if (new_data == NULL) { - PyErr_SetString(PyExc_MemoryError, - "cannot allocate array memory"); - goto done; - } - ((PyArrayObject_fields *)ret)->data = new_data; - if (count < 0) { - /* - * If the count was smaller than zero, the strides may be all 0 - * (even in the later dimensions for `count < 0`! - * Thus, fix all strides here again for C-contiguity. - */ - int oflags; - _array_fill_strides( - PyArray_STRIDES(ret), PyArray_DIMS(ret), PyArray_NDIM(ret), - PyArray_ITEMSIZE(ret), NPY_ARRAY_C_CONTIGUOUS, &oflags); - PyArray_STRIDES(ret)[0] = elsize; - assert(oflags & NPY_ARRAY_C_CONTIGUOUS); - } - } - PyArray_DIMS(ret)[0] = i; + fail: + Py_CLEAR(ret); - done: + done: Py_XDECREF(iter); Py_XDECREF(dtype); if (PyErr_Occurred()) { diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 2fa1fb948553..90eea23b22e2 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -118,7 +118,7 @@ create_conv_funcs( if (error) { goto error; } - + return conv_funcs; error: @@ -175,8 +175,6 @@ read_rows(stream *s, npy_intp row_size = out_descr->elsize; PyObject **conv_funcs = NULL; - bool needs_init = PyDataType_FLAGCHK(out_descr, NPY_NEEDS_INIT); - int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; @@ -311,9 +309,6 @@ read_rows(stream *s, if (data_array == NULL) { goto error; } - if (needs_init) { - memset(PyArray_BYTES(data_array), 0, PyArray_NBYTES(data_array)); - } } else { assert(max_rows >=0); @@ -354,22 +349,16 @@ read_rows(stream *s, "providing a maximum number of rows to read may help."); goto error; } - - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), alloc_size ? alloc_size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - PyErr_NoMemory(); + /* + * Resize the array. + */ + result_shape[0] = new_rows; + PyArray_Dims new_dims = {dims, 2}; + if (PyArray_Resize(data_array, &new_dims, 0, 0) == NULL) { goto error; } - /* Replace the arrays data since it may have changed */ - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = new_rows; - data_ptr = new_data + row_count * row_size; + data_ptr = PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; - if (needs_init) { - memset(data_ptr, '\0', (new_rows - row_count) * row_size); - } } for (Py_ssize_t i = 0; i < actual_num_fields; ++i) { From 9b8027f0c9e6cc3132d54f133d1bf925435e5250 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 22:07:32 -0400 Subject: [PATCH 0856/1718] MAINT: use PyArray_Resize inside text reader. --- numpy/_core/src/multiarray/textreading/rows.c | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 90eea23b22e2..7f3797b58928 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -8,6 +8,7 @@ #include "numpy/npy_3kcompat.h" #include "npy_pycompat.h" #include "alloc.h" +#include "shape.h" // For PyArray_Resize_int #include #include @@ -177,6 +178,7 @@ read_rows(stream *s, int ndim = homogeneous ? 2 : 1; npy_intp result_shape[2] = {0, 1}; + PyArray_Dims new_dims = {result_shape, ndim}; /* for resizing */ bool data_array_allocated = data_array == NULL; /* Make sure we own `data_array` for the purpose of error handling */ @@ -353,11 +355,10 @@ read_rows(stream *s, * Resize the array. */ result_shape[0] = new_rows; - PyArray_Dims new_dims = {dims, 2}; - if (PyArray_Resize(data_array, &new_dims, 0, 0) == NULL) { + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { goto error; } - data_ptr = PyArray_DATA(data_array) + row_count * row_size; + data_ptr = (char *)PyArray_DATA(data_array) + row_count * row_size; data_allocated_rows = new_rows; } @@ -463,20 +464,13 @@ read_rows(stream *s, /* * Note that if there is no data, `data_array` may still be NULL and - * row_count is 0. In that case, always realloc just in case. + * row_count is 0. In that case, always resize just in case. */ if (data_array_allocated && data_allocated_rows != row_count) { - size_t size = row_count * row_size; - char *new_data = PyDataMem_UserRENEW( - PyArray_BYTES(data_array), size ? size : 1, - PyArray_HANDLER(data_array)); - if (new_data == NULL) { - Py_DECREF(data_array); - PyErr_NoMemory(); - return NULL; + result_shape[0] = row_count; + if (PyArray_Resize_int(data_array, &new_dims, 0) < 0) { + goto error; } - ((PyArrayObject_fields *)data_array)->data = new_data; - ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } /* From fb22f55e6fae12836eab6ec7e34b7c1d8999155e Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 12 Nov 2025 19:07:11 -0800 Subject: [PATCH 0857/1718] DOC: an Mercurial -> a Mercurial [skip actions][skip azp][skip cirrus] --- numpy/distutils/misc_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 09145e1ddf52..ca7bcf0fbdd0 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -2069,7 +2069,7 @@ def make_hg_version_py(self, delete=True): If __hg_version__.py existed before, nothing is done. This is intended for working with source directories that are - in an Mercurial repository. + in a Mercurial repository. """ target = njoin(self.local_path, '__hg_version__.py') revision = self._get_hg_revision(self.local_path) From ae098dd478faa191eac81b42c968a5549962a450 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Nov 2025 11:36:16 -0700 Subject: [PATCH 0858/1718] MAINT: Bump pypa/cibuildwheel from 3.2.1 to 3.3.0 (#30209) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.2.1 to 3.3.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/9c00cb4f6b517705a3794b22395aedc36257242c...63fd63b352a9a8bdcc24791c9dbee952ee9a8abc) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 159f6b719fde..c765855825a3 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -34,7 +34,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 + - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 8049b79edef0..1ceefc2e540d 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -94,7 +94,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # v3.2.1 + uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From fabf1844667c81344ec0d0ab4958cde9cb9a845c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 13 Nov 2025 19:55:45 +0100 Subject: [PATCH 0859/1718] DOC: Release notes for the runtime signature changes (#30208) --- doc/release/upcoming_changes/30208.highlight.rst | 2 ++ doc/release/upcoming_changes/30208.improvement.rst | 12 ++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 doc/release/upcoming_changes/30208.highlight.rst create mode 100644 doc/release/upcoming_changes/30208.improvement.rst diff --git a/doc/release/upcoming_changes/30208.highlight.rst b/doc/release/upcoming_changes/30208.highlight.rst new file mode 100644 index 000000000000..c13c46c056cc --- /dev/null +++ b/doc/release/upcoming_changes/30208.highlight.rst @@ -0,0 +1,2 @@ +* Runtime signature introspection support has been significantly improved. See the + corresponding improvement note for details. diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst new file mode 100644 index 000000000000..065e91eca33d --- /dev/null +++ b/doc/release/upcoming_changes/30208.improvement.rst @@ -0,0 +1,12 @@ +Runtime signature introspection support has been significantly improved +----------------------------------------------------------------------- +Many NumPy functions, classes, and methods that previously raised ``ValueError`` when passed +to ``inspect.signature()`` now return meaningful signatures. This improves support for runtime type +checking, IDE autocomplete, documentation generation, and runtime introspection capabilities across +the NumPy API. + +Over three hundred classes and functions have been updated in total, including, but not limited to, +core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., +most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, +`arange`, `fromiter`, etc.), and many other commonly used functions, including `dot`, `concat`, +`where`, `bincount`, `can_cast`, and numerous others. From 7c8625e6d56f6938529346b6d24dc34a7d9f5446 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 13 Nov 2025 20:24:56 +0100 Subject: [PATCH 0860/1718] Move test to Python --- numpy/__init__.py | 17 ++++++ numpy/_core/src/common/blas_utils.c | 52 +++---------------- numpy/_core/src/common/blas_utils.h | 9 ++-- numpy/_core/src/multiarray/multiarraymodule.c | 23 ++++---- numpy/testing/_private/utils.py | 2 +- 5 files changed, 41 insertions(+), 62 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index a0178b211258..ef7c1ed7678a 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -870,6 +870,23 @@ def _mac_os_check(): del w del _mac_os_check + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index 365289067b88..cbf8e0dc05c5 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -1,5 +1,3 @@ -#include - #include "numpy/npy_math.h" // npy_get_floatstatus_barrier #include "numpy/numpyconfig.h" // NPY_VISIBILITY_HIDDEN #include "blas_utils.h" @@ -17,46 +15,7 @@ /* * Static variable to cache runtime check of BLAS FPE support. */ - static bool blas_supports_fpe = true; - -/* - * ARM Scalable Matrix Extension (SME) raises all floating-point error flags - * when it's used regardless of values or operations. As a consequence, - * when SME is used, all FPE state is lost and special handling is needed. - * - * For NumPy, SME is not currently used directly, but can be used via - * BLAS / LAPACK libraries. This function does a runtime check for whether - * BLAS / LAPACK can use SME and special handling around FPE is required. - * - * This may be an Accelerate bug (at least OpenBLAS consider it that way) - * but when we find an ARM system with SVE we do a runtime check for whether - * FPEs are spuriously given. - */ -static inline int -set_BLAS_causes_spurious_FPEs(void) -{ - // These are all small, so just work on stack to not worry about error - // handling. - double *x = PyMem_Calloc(20 * 20 * 3, sizeof(double)); - if (x == NULL) { - PyErr_NoMemory(); - return -1; - } - double *y = x + 20 * 20; - double *res = y + 20 * 20; - - npy_clear_floatstatus_barrier((char *)x); - - CBLAS_FUNC(cblas_dgemm)( - CblasRowMajor, CblasNoTrans, CblasNoTrans, 20, 20, 20, 1., - x, 20, y, 20, 0., res, 20); - PyMem_Free(x); - - int fpe_status = npy_get_floatstatus_barrier((char *)x); - // Entries were all zero, so we shouldn't see any FPEs - blas_supports_fpe = fpe_status != 0; - return 0; -} +static bool blas_supports_fpe = true; #endif // NPY_BLAS_CHECK_FPE_SUPPORT @@ -71,13 +30,14 @@ npy_blas_supports_fpe(void) #endif } -NPY_VISIBILITY_HIDDEN int -npy_blas_init(void) +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value) { #if NPY_BLAS_CHECK_FPE_SUPPORT - return set_BLAS_causes_spurious_FPEs(); + blas_supports_fpe = (bool)value; + return blas_supports_fpe; #endif - return 0; + return true; // ignore input not set up on this platform } NPY_VISIBILITY_HIDDEN int diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 3f5bb735281d..840c4940a87e 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -15,11 +15,6 @@ #define NPY_BLAS_CHECK_FPE_SUPPORT 0 #endif -/* Initialize BLAS environment, if needed - */ -NPY_VISIBILITY_HIDDEN int -npy_blas_init(void); - /* Runtime check if BLAS supports floating-point errors. * true - BLAS supports FPE and one can rely on them to indicate errors * false - BLAS does not support FPE. Special handling needed for FPE state @@ -27,6 +22,10 @@ npy_blas_init(void); NPY_VISIBILITY_HIDDEN bool npy_blas_supports_fpe(void); +/* Allow setting the BLAS FPE flag from Python.*/ +NPY_VISIBILITY_HIDDEN bool +npy_set_blas_supports_fpe(bool value); + /* If BLAS supports FPE, exactly the same as npy_get_floatstatus_barrier(). * Otherwise, we can't rely on FPE state and need special handling. */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 44ac8a678bbb..73ef0760d979 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4429,7 +4429,6 @@ _populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) } - static PyObject * _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) { @@ -4449,12 +4448,20 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) static PyObject * -_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { - if (npy_blas_supports_fpe()) { - Py_RETURN_TRUE; +_blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { + if (arg == Py_None) { + return PyBool_FromLong(npy_blas_supports_fpe()); + } + else if (arg == Py_True) { + return PyBool_FromLong(npy_set_blas_supports_fpe(true)); + } + else if (arg == Py_False) { + return PyBool_FromLong(npy_set_blas_supports_fpe(false)); } else { - Py_RETURN_FALSE; + PyErr_SetString(PyExc_TypeError, + "BLAS FPE support must be None, True, or False"); + return NULL; } } @@ -4700,7 +4707,7 @@ static struct PyMethodDef array_module_methods[] = { {"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage, METH_O, NULL}, {"_blas_supports_fpe", (PyCFunction)_blas_supports_fpe, - METH_NOARGS, NULL}, + METH_O, "BLAS FPE support pass None, True, or False and returns new value"}, {"_reload_guard", (PyCFunction)_reload_guard, METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, @@ -4917,10 +4924,6 @@ _multiarray_umath_exec(PyObject *m) { return -1; } - if (npy_blas_init() < 0) { - return -1; - } - #if defined(MS_WIN64) && defined(__GNUC__) PyErr_WarnEx(PyExc_Warning, "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \ diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 967d67e14a13..ed928a5ec7b4 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -90,7 +90,7 @@ class KnownFailureException(Exception): IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON -BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe() +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 From 96fc947fdecddf33bb21fec32ac94adfb561d03f Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 14 Nov 2025 10:43:24 +0200 Subject: [PATCH 0861/1718] DOC: fix links for newly rebuilt numpy-tutorials site [skip azp][skip cirrus][skip actions] --- doc/source/user/how-to-how-to.rst | 2 +- doc/source/user/quickstart.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst index 91a5c2afe09b..07640f881141 100644 --- a/doc/source/user/how-to-how-to.rst +++ b/doc/source/user/how-to-how-to.rst @@ -105,7 +105,7 @@ deep dives intended to give understanding rather than immediate assistance, and *References*, which give complete, authoritative data on some concrete part of NumPy (like its API) but aren't obligated to paint a broader picture. -For more on tutorials, see :doc:`numpy-tutorials:content/tutorial-style-guide` +For more on tutorials, see :doc:`numpy-tutorials:tutorial-style-guide` ****************************************************************************** Is this page an example of a how-to? diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 3f97f005898b..ef4a0467c706 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1479,4 +1479,4 @@ Further reading - `SciPy Tutorial `__ - `SciPy Lecture Notes `__ - A `matlab, R, IDL, NumPy/SciPy dictionary `__ -- :doc:`tutorial-svd ` +- :doc:`tutorial-svd ` From 7e17ceb3384facaad9cc7d3ba93ec995e9c9219a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 14 Nov 2025 10:58:26 +0100 Subject: [PATCH 0862/1718] Report FPE ignoring in matmul in `show_runtime()` --- numpy/lib/_utils_impl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 2e1ee23d7d58..164aa4ee3d8c 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -61,6 +61,11 @@ def show_runtime(): "not_found": features_not_found } }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + try: from threadpoolctl import threadpool_info config_found.extend(threadpool_info()) From c3e5a2039558e7bc2c17982dc5bdbba2611f7326 Mon Sep 17 00:00:00 2001 From: yasiribmcon <107119183+yasiribmcon@users.noreply.github.com> Date: Fri, 14 Nov 2025 22:41:26 +0530 Subject: [PATCH 0863/1718] BUG: Fix build on s390x with clang (#30214) For clang 20+ (_VEC_ >= 10305), fallback check relied on macro definitions for vector operations, but clang provides regular functions instead. This caused the fallback to redefine them as macros, overriding the actual vector operation logic. Signed-off-by: Yasir Ashfaq --- numpy/_core/src/common/simd/vec/utils.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/common/simd/vec/utils.h b/numpy/_core/src/common/simd/vec/utils.h index f8b28cfebd8c..7e4a7b8de8fa 100644 --- a/numpy/_core/src/common/simd/vec/utils.h +++ b/numpy/_core/src/common/simd/vec/utils.h @@ -25,14 +25,16 @@ #ifndef vec_neg #define vec_neg(a) (-(a)) #endif - #ifndef vec_and - #define vec_and(a, b) ((a) & (b)) // Vector AND - #endif - #ifndef vec_or - #define vec_or(a, b) ((a) | (b)) // Vector OR - #endif - #ifndef vec_xor - #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #if !(defined(__clang__) && __VEC__ >= 10305) + #ifndef vec_and + #define vec_and(a, b) ((a) & (b)) // Vector AND + #endif + #ifndef vec_or + #define vec_or(a, b) ((a) | (b)) // Vector OR + #endif + #ifndef vec_xor + #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR + #endif #endif #ifndef vec_sl #define vec_sl(a, b) ((a) << (b)) // Vector Shift Left From c83c349d20ca009743b72d1cf4618a89d3c7dfdb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Nov 2025 11:36:11 -0700 Subject: [PATCH 0864/1718] MAINT: Bump github/codeql-action from 4.31.2 to 4.31.3 (#30219) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.2 to 4.31.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/0499de31b99561a6d14a36a5f662c2a54f91beee...014f16e7ab1402f30e7c3329d33797e7948572db) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 45b88b851312..1546cdc10adf 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/autobuild@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2 + uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 0a65e932b12d..227f2f3788b0 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v2.1.27 + uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v2.1.27 with: sarif_file: results.sarif From 73af2357f38fdef50b1e2919fa2bfa3c012edce2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Nov 2025 04:58:51 +0100 Subject: [PATCH 0865/1718] STY: fix ``ma.MaskedAArray.tolist`` docstring indentation --- numpy/ma/core.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index a374410209f5..9b27d90852e7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6653,14 +6653,14 @@ def filled(self, fill_value=None): def tolist(self): """ - Transforms the mvoid object into a tuple. + Transforms the mvoid object into a tuple. - Masked fields are replaced by None. + Masked fields are replaced by None. - Returns - ------- - returned_tuple - Tuple of fields + Returns + ------- + returned_tuple + Tuple of fields """ _mask = self._mask if _mask is nomask: From 106a7c7051989c4416ef871f7159c8dbec7649a3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Nov 2025 06:29:12 +0100 Subject: [PATCH 0866/1718] TYP: stub ``ma.core.get_masked_subclass`` --- numpy/ma/core.pyi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 15e6b58762b4..31d66fff0ab8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -574,6 +574,9 @@ def fix_invalid( fill_value: _ScalarLike_co | None = None, ) -> _MaskedArray[Incomplete]: ... +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... + # @overload def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... From cc0f02c7346e3dcc7fdd1395ab0c3f2e5dc9a05b Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 15 Nov 2025 18:49:12 +1100 Subject: [PATCH 0867/1718] CI: fixes https://github.com/numpy/numpy/security/code-scanning/364 --- .github/workflows/emscripten.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index c765855825a3..0d1936b55b60 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -10,6 +10,9 @@ on: - '**.md' - '**.rst' +permissions: + contents: read # to fetch code (actions/checkout) + env: FORCE_COLOR: 3 @@ -20,8 +23,6 @@ concurrency: jobs: build-wasm-emscripten: - permissions: - contents: read # to fetch code (actions/checkout) name: Pyodide test runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: From 467897cd5eee996f8552c7d5ab10823242350954 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 16 Nov 2025 07:06:25 +0100 Subject: [PATCH 0868/1718] Update numpy/_core/src/common/blas_utils.h Co-authored-by: Matti Picus --- numpy/_core/src/common/blas_utils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/blas_utils.h b/numpy/_core/src/common/blas_utils.h index 840c4940a87e..79d1e5ce274c 100644 --- a/numpy/_core/src/common/blas_utils.h +++ b/numpy/_core/src/common/blas_utils.h @@ -9,7 +9,7 @@ * quite clear. * This checks always on all ARM (it is a small check overall). */ -#if defined(__aarch64__) && defined(HAVE_CBLAS) +#if defined(__APPLE__) && defined(__aarch64__) && defined(HAVE_CBLAS) #define NPY_BLAS_CHECK_FPE_SUPPORT 1 #else #define NPY_BLAS_CHECK_FPE_SUPPORT 0 From 42d1a0d6832e6cf52420da71cee133584d0bc049 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 16 Nov 2025 12:33:25 +0100 Subject: [PATCH 0869/1718] MAINT: undo change to `fromstring` text signature for 2.4.0 This reverts a very small part of gh-30147, because it broken Pythran (see pythran#2368). While a new Pythran 0.18.1 release is available, that doesn't help for the SciPy 1.15.x releases which allow numpy<2.5 and pythran<0.18, see https://github.com/scipy/scipy/blob/maintenance/1.15.x/pyproject.toml#L30-L41 So introducing this signature change only for 2.5.0 is much better. In addition, doing so doesn't leave us only a very narrow window with compatible NumPy and Pythran releases which would otherwise bite distro packagers as well. --- numpy/_core/_add_newdocs.py | 4 +--- numpy/_core/tests/test_multiarray.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index eded752b2721..6b937389defe 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1482,11 +1482,9 @@ """) +# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ - fromstring(string, dtype=None, count=-1, *, sep, like=None) - -- - fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 051a81d99d1f..2cba4eb3ec75 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10982,7 +10982,6 @@ def test_c_func_dispatcher_signature(self, func): (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), (np.fromiter, ("iter", "dtype", "count", "like")), (np.frompyfunc, ("func", "nin", "nout", "kwargs")), - (np.fromstring, ("string", "dtype", "count", "sep", "like")), (np.nested_iters, ( "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", "buffersize", From e468d88310a65c16ba9181b06462f78400e80257 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 17 Nov 2025 00:17:33 +1100 Subject: [PATCH 0870/1718] CI: remove (mainly windows) jobs from Azure pipelines (#30222) --- .github/workflows/linux.yml | 4 +++ .github/workflows/windows.yml | 62 +++++++++++++++++++++++++++++++++-- azure-pipelines.yml | 52 +++++------------------------ 3 files changed, 73 insertions(+), 45 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 74edb50d9273..bbca13925342 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -53,6 +53,10 @@ jobs: BASE_REF: ${{ github.base_ref }} run: python tools/linter.py + - name: Check Python.h is first file included + run: | + python tools/check_python_h_first.py + smoke_test: # To enable this job on a fork, comment out: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 96c175c07725..3302e054d438 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -19,7 +19,7 @@ permissions: jobs: clangcl_python64bit_openblas32: - name: x86-64, LP64 OpenBLAS + name: Clang-cl, x86-64, fast, openblas32 runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' @@ -66,8 +66,10 @@ jobs: run: | spin test -- --timeout=600 --durations=10 + + #======================================================================================= msvc_python32bit_no_openblas: - name: MSVC, ${{ matrix.architecture }} Python , no BLAS + name: MSVC, ${{ matrix.architecture }}, fast, no BLAS runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -117,3 +119,59 @@ jobs: run: | cd tools python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 + + #======================================================================================= + msvc_python64bit_openblas: + name: MSVC, x86_64, ${{ matrix.TEST_MODE }}, openblas${{ matrix.BLAS }} + runs-on: windows-2022 + strategy: + fail-fast: false + matrix: + include: + - BLAS: 64 + TEST_MODE: full + pyver: '3.14' + - BLAS: 32 + TEST_MODE: fast + pyver: '3.11' + + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + with: + python-version: ${{ matrix.pyver }} + + - name: pkg-config + run: | + choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + + - name: Dependencies + run: | + python -m pip install -r requirements/test_requirements.txt + + - name: Build and install + run: | + pip install -r requirements/ci_requirements.txt + spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} + $env:PKG_CONFIG_PATH="$pwd/.openblas" + python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + + - name: Run test suite ${{ matrix.TEST_MODE }} + run: | + cd tools + # Get a gfortran onto the path for f2py tests + $env:PATH = "c:\\rtools45\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + If ( "${{ matrix.TEST_MODE }}" -eq "full" ) { + python -m pytest --pyargs numpy -rsx -n2 --durations=10 + } else { + python -m pytest --pyargs numpy -m "not slow" -n2 --timeout=600 --durations=10 -rsx + } diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 89dee2267352..11dab7f42c6b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -37,29 +37,6 @@ stages: dependsOn: Check jobs: - - job: Lint - condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) - pool: - vmImage: 'ubuntu-22.04' - steps: - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.11' - addToPath: true - architecture: 'x64' - - script: >- - python -m pip install -r requirements/linter_requirements.txt - displayName: 'Install tools' - # pip 21.1 emits a pile of garbage messages to annoy users :) - # failOnStderr: true - - script: | - python tools/linter.py - displayName: 'Run Lint Checks' - failOnStderr: true - - script: | - python tools/check_python_h_first.py - displayName: 'Check Python.h is first file included' - - job: Linux_Python_311_32bit_full_with_asserts pool: vmImage: 'ubuntu-22.04' @@ -75,24 +52,13 @@ stages: /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - - job: Windows - timeoutInMinutes: 120 - pool: - vmImage: 'windows-2022' - strategy: - maxParallel: 3 - matrix: - Python311-64bit-fast: - PYTHON_VERSION: '3.11' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - BITS: 64 - Python312-64bit-full: - PYTHON_VERSION: '3.12' - PYTHON_ARCH: 'x64' - TEST_MODE: full - BITS: 64 - _USE_BLAS_ILP64: '1' +# - job: Windows +# timeoutInMinutes: 120 +# pool: +# vmImage: 'windows-2022' +# strategy: +# maxParallel: 3 +# matrix: # TODO pypy: uncomment when pypy3.11 comes out # PyPy311-64bit-fast: # PYTHON_VERSION: 'pypy3.11' @@ -101,5 +67,5 @@ stages: # BITS: 64 # _USE_BLAS_ILP64: '1' - steps: - - template: azure-steps-windows.yml +# steps: +# - template: azure-steps-windows.yml From 13ce032f519ef88be58a9523d5a84112765603cb Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 14:41:21 -0800 Subject: [PATCH 0871/1718] DOC: Correct an equation error in numpy.random.Generator.pareto --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index bace56b59422..e19c3919b49e 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2083,7 +2083,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{x+1}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{{(x+1)}^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. From 952843f526556f59707a2f690963d79d45c276be Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 14:43:13 -0800 Subject: [PATCH 0872/1718] DOC: Add labels to save CI resources [skip actions][skip azp][skip cirrus] From 54b69fb71a8d4bbad53f9d55fcd49f779cb3fbf3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 16 Nov 2025 16:57:21 -0700 Subject: [PATCH 0873/1718] MAINT: Update main after 2.3.5 release. - Forwardport 2.3.5-changelog.rst - Forwardport 2.3.5-notes.rst - Update .mailmap - Update release.rst [skip azp] [skip cirrus] [skip actions] --- .mailmap | 2 ++ doc/changelog/2.3.5-changelog.rst | 40 ++++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.3.5-notes.rst | 50 ++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+) create mode 100644 doc/changelog/2.3.5-changelog.rst create mode 100644 doc/source/release/2.3.5-notes.rst diff --git a/.mailmap b/.mailmap index ddadaf22a62f..18cfb272618f 100644 --- a/.mailmap +++ b/.mailmap @@ -752,6 +752,8 @@ Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yasir Ashfaq +Yasir Ashfaq <107119183+yasiribmcon@users.noreply.github.com> Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi diff --git a/doc/changelog/2.3.5-changelog.rst b/doc/changelog/2.3.5-changelog.rst new file mode 100644 index 000000000000..123e1e9d0453 --- /dev/null +++ b/doc/changelog/2.3.5-changelog.rst @@ -0,0 +1,40 @@ + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + diff --git a/doc/source/release.rst b/doc/source/release.rst index 6f1c1a22f11d..190d0512faeb 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.4.0 + 2.3.5 2.3.4 2.3.3 2.3.2 diff --git a/doc/source/release/2.3.5-notes.rst b/doc/source/release/2.3.5-notes.rst new file mode 100644 index 000000000000..8013ef468055 --- /dev/null +++ b/doc/source/release/2.3.5-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.3.5 Release Notes +========================= + +The NumPy 2.3.5 release is a patch release split between a number of maintenance +updates and bug fixes. This release supports Python versions 3.11-3.14. + + +Contributors +============ + +A total of 10 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Aaron Kollasch + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Rafael Laboissière + +* Sayed Awad +* Sebastian Berg +* Warren Weckesser +* Yasir Ashfaq + + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#29979 `__: MAINT: Prepare 2.3.x for further development +* `#30026 `__: SIMD, BLD: Backport FPMATH mode on x86-32 and filter successor... +* `#30029 `__: MAINT: Backport write_release.py +* `#30041 `__: TYP: Various typing updates +* `#30059 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len... +* `#30063 `__: BUG: Fix np.strings.slice if start > stop +* `#30076 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation (#30071) +* `#30090 `__: BUG: Fix resize when it contains references (#29970) +* `#30129 `__: BLD: update scipy-openblas, use -Dpkg_config_path (#30049) +* `#30130 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30157 `__: BLD: use scipy-openblas 0.3.30.7 (#30132) +* `#30158 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray``... +* `#30185 `__: BUG: Fix check of PyMem_Calloc return value. (#30176) +* `#30217 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30218 `__: BUG: Fix build on s390x with clang (#30214) +* `#30237 `__: ENH: Make FPE blas check a runtime check for all apple arm systems + From 697def6914200d61a5b64f1d7310488fcf082d78 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 18:39:51 -0800 Subject: [PATCH 0874/1718] fix parsing --- numpy/_core/einsumfunc.py | 61 +++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index dd5c65bc8307..2e38e0cb52c6 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -988,15 +988,11 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): if len(b_term) != len(shape_b): raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") - bat_inds = [] # appears on A, B, O - con_inds = [] # appears on A, B, . - a_keep = [] # appears on A, ., O - b_keep = [] # appears on ., B, O sizes = {} singletons = set() - # parse left term - seen = set() + # parse left term to unique indices with size > 1 + left = dict() for ix, d in zip(a_term, shape_a): if d == 1: # everything (including broadcasting) works nicely if simply ignore @@ -1004,47 +1000,50 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): # and thus should be reintroduced later singletons.add(ix) continue - - # set or check size if sizes.setdefault(ix, d) != d: + # set or check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) + left[ix] = True - if ix in seen: - continue - seen.add(ix) - - if ix in b_term: - if ix in out: - bat_inds.append(ix) - else: - con_inds.append(ix) - elif ix in out: - a_keep.append(ix) - - # parse right term - seen.clear() + # parse right term to unique indices with size > 1 + right = dict() for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons if d == 1: - singletons.add(ix) + if ix not in left: + singletons.add(ix) continue - # broadcast indices don't appear as singletons in output singletons.discard(ix) - # set or check size if sizes.setdefault(ix, d) != d: + # set or check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) + right[ix] = True - if ix in seen: - continue - seen.add(ix) - - if ix not in a_term: + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): if ix in out: - b_keep.append(ix) + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) if not con_inds: # contraction is pure multiplication, prepare inputs differently From b0aaffba0571c28427ad988a3df90404b3ce71bb Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sun, 16 Nov 2025 18:48:58 -0800 Subject: [PATCH 0875/1718] Update _generator.pyx [skip actions][skip azp][skip cirrus] Co-authored-by: Joren Hammudoglu --- numpy/random/_generator.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index e19c3919b49e..ed9b6a3d12a8 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -2083,7 +2083,7 @@ cdef class Generator: ----- The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{a}{{(x+1)}^{a+1}} , x \ge 0 + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 where :math:`a > 0` is the shape. From 59ae764e3391226952a0d6d6bd4eef7da751316f Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 18:54:24 -0800 Subject: [PATCH 0876/1718] test_einsum, add singleton broadcasting tests --- numpy/_core/tests/test_einsum.py | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 6403d47034d7..46660e59792b 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1119,6 +1119,41 @@ def test_output_order(self): tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) assert_(tmp.flags.c_contiguous) + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = (3, 1, 1), (3, 1, 3), (1, 3, 3) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq ="'cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + class TestEinsumPath: def build_operands(self, string, size_dict=global_size_dict): From 72bce0bc18d6bb1796de5328ff1935d1e871ad60 Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 19:07:59 -0800 Subject: [PATCH 0877/1718] fix stray character --- numpy/_core/tests/test_einsum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 46660e59792b..619db6f02897 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1148,7 +1148,7 @@ def test_singleton_broadcasting(self): arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) - eq ="'cacc,bcb->" + eq = "cacc,bcb->" shapes = ((1, 1, 1, 1), (1, 4, 1)) arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) From 0a0bcdb4b3fc2bccd9724bc1f8431e20870e385c Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Sun, 16 Nov 2025 21:28:38 -0800 Subject: [PATCH 0878/1718] address review comments --- numpy/_core/einsumfunc.py | 8 ++++---- numpy/_core/tests/test_einsum.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 2e38e0cb52c6..9461994f5795 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -992,7 +992,7 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons = set() # parse left term to unique indices with size > 1 - left = dict() + left = {} for ix, d in zip(a_term, shape_a): if d == 1: # everything (including broadcasting) works nicely if simply ignore @@ -1001,14 +1001,14 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons.add(ix) continue if sizes.setdefault(ix, d) != d: - # set or check size + # set and check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) left[ix] = True # parse right term to unique indices with size > 1 - right = dict() + right = {} for ix, d in zip(b_term, shape_b): # broadcast indices (size 1 on one input and size != 1 # on the other) should not be treated as singletons @@ -1019,7 +1019,7 @@ def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): singletons.discard(ix) if sizes.setdefault(ix, d) != d: - # set or check size + # set and check size raise ValueError( f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." ) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 619db6f02897..375ef03c1dd7 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1121,7 +1121,7 @@ def test_output_order(self): def test_singleton_broadcasting(self): eq = "ijp,ipq,ikq->ijk" - shapes = (3, 1, 1), (3, 1, 3), (1, 3, 3) + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) arrays = [np.random.rand(*shape) for shape in shapes] self.optimize_compare(eq, operands=arrays) From 02c8c3d4e8cf1987d8121e80828fda420b97ad1d Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Mon, 17 Nov 2025 21:04:36 +1100 Subject: [PATCH 0879/1718] CI: removes azure pipelines (#30232) --- .github/workflows/linux.yml | 44 ++++++++++++++++++ azure-pipelines.yml | 71 ----------------------------- azure-steps-windows.yml | 55 ---------------------- tools/ci/run_32_bit_linux_docker.sh | 14 ------ 4 files changed, 44 insertions(+), 140 deletions(-) delete mode 100644 azure-pipelines.yml delete mode 100644 azure-steps-windows.yml delete mode 100644 tools/ci/run_32_bit_linux_docker.sh diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index bbca13925342..5d1f29693155 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -366,3 +366,47 @@ jobs: rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests + + + Linux_Python_311_32bit_full: + name: i686, cp311, full + needs: [smoke_test] + runs-on: ubuntu-latest + container: + # There are few options for i686 images at https://quay.io/organization/pypa, + # use the glibc2.28 one + image: quay.io/pypa/manylinux_2_28_i686 + + steps: + - name: Checkout and initialize submodules + # actions/checkout doesn't work in a container image + run: | + git config --global --add safe.directory $PWD + if [ $GITHUB_EVENT_NAME != pull_request ]; then + git clone --recursive --branch=$GITHUB_REF_NAME https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git reset --hard $GITHUB_SHA + else + git clone --recursive https://github.com/${GITHUB_REPOSITORY}.git $GITHUB_WORKSPACE + git fetch origin $GITHUB_REF:my_ref_name + git checkout $GITHUB_BASE_REF + git -c user.email="you@example.com" merge --no-commit my_ref_name + fi + git submodule update --init --recursive + + - name: build + run: | + python3.11 -m venv venv + source venv/bin/activate + pip install --upgrade pip + pip install -r requirements/ci32_requirements.txt + pip install -r requirements/test_requirements.txt + + spin config-openblas --with-scipy-openblas=32 + export PKG_CONFIG_PATH=$(pwd)/.openblas + python -m pip install . -v -Csetup-args="-Dallow-noblas=false" + + - name: test + run: | + source venv/bin/activate + cd tools + python -m pytest --pyargs numpy diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index 11dab7f42c6b..000000000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,71 +0,0 @@ -trigger: - # start a new build for every push - batch: False - branches: - include: - - main - - maintenance/* - - -pr: - branches: - include: - - '*' # must quote since "*" is a YAML reserved character; we want a string - - -stages: - -- stage: Check - jobs: - - job: Skip - pool: - vmImage: 'ubuntu-22.04' - variables: - DECODE_PERCENTS: 'false' - RET: 'true' - steps: - - bash: | - git_log=`git log --max-count=1 --skip=1 --pretty=format:"%B" | tr "\n" " "` - echo "##vso[task.setvariable variable=log]$git_log" - - bash: echo "##vso[task.setvariable variable=RET]false" - condition: or(contains(variables.log, '[skip azp]'), contains(variables.log, '[azp skip]'), contains(variables.log, '[skip ci]'), contains(variables.log, '[ci skip]')) - - bash: echo "##vso[task.setvariable variable=start_main;isOutput=true]$RET" - name: result - -- stage: ComprehensiveTests - condition: and(succeeded(), eq(dependencies.Check.outputs['Skip.result.start_main'], 'true')) - dependsOn: Check - jobs: - - - job: Linux_Python_311_32bit_full_with_asserts - pool: - vmImage: 'ubuntu-22.04' - steps: - - script: | - git submodule update --init - displayName: 'Fetch submodules' - - script: | - # There are few options for i686 images at https://quay.io/organization/pypa, - # use the glibc2.17 one (manylinux2014) - docker run -v $(pwd):/numpy -e CFLAGS="-msse2 -std=c99 -UNDEBUG" \ - -e F77=gfortran-5 -e F90=gfortran-5 quay.io/pypa/manylinux2014_i686 \ - /bin/bash -xc "source /numpy/tools/ci/run_32_bit_linux_docker.sh" - displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' - -# - job: Windows -# timeoutInMinutes: 120 -# pool: -# vmImage: 'windows-2022' -# strategy: -# maxParallel: 3 -# matrix: -# TODO pypy: uncomment when pypy3.11 comes out -# PyPy311-64bit-fast: -# PYTHON_VERSION: 'pypy3.11' -# PYTHON_ARCH: 'x64' -# TEST_MODE: fast -# BITS: 64 -# _USE_BLAS_ILP64: '1' - -# steps: -# - template: azure-steps-windows.yml diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml deleted file mode 100644 index 0baf374e1e3f..000000000000 --- a/azure-steps-windows.yml +++ /dev/null @@ -1,55 +0,0 @@ -steps: -- script: git submodule update --init - displayName: 'Fetch submodules' -- task: UsePythonVersion@0 - inputs: - versionSpec: $(PYTHON_VERSION) - addToPath: true - architecture: $(PYTHON_ARCH) - -- script: python -m pip install --upgrade pip wheel - displayName: 'Install tools' - -- script: python -m pip install -r requirements/test_requirements.txt - displayName: 'Install dependencies; some are optional to avoid test skips' - -- powershell: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - displayName: 'Install utilities' - -- powershell: | - # Note: ensure the `pip install .` command remains the last one here, - # to avoid "green on failure" issues - If ( Test-Path env:DISABLE_BLAS ) { - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" -Csetup-args="-Dallow-noblas=true" - } - elseif ( Test-Path env:_USE_BLAS_ILP64 ) { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=64 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } else { - pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" - } - displayName: 'Build NumPy' - -- powershell: | - cd tools # avoid root dir to not pick up source tree - # Get a gfortran onto the path for f2py tests - $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" - If ( $env:TEST_MODE -eq "full" ) { - pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml - } else { - pytest --pyargs numpy -m "not slow" -rsx --junitxml=junit/test-results.xml - } - displayName: 'Run NumPy Test Suite' - -- task: PublishTestResults@2 - condition: succeededOrFailed() - inputs: - testResultsFiles: '**/test-*.xml' - failTaskOnFailedTests: true - testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh deleted file mode 100644 index bb0aedf88fcf..000000000000 --- a/tools/ci/run_32_bit_linux_docker.sh +++ /dev/null @@ -1,14 +0,0 @@ -set -xe - -git config --global --add safe.directory /numpy -cd /numpy -/opt/python/cp311-cp311/bin/python -mvenv venv -source venv/bin/activate -pip install -r requirements/ci32_requirements.txt -python3 -m pip install -r requirements/test_requirements.txt -echo CFLAGS \$CFLAGS -spin config-openblas --with-scipy-openblas=32 -export PKG_CONFIG_PATH=/numpy/.openblas -python3 -m pip install . -cd tools -python3 -m pytest --pyargs numpy From e684ebbee9aa01649c5d913ad3527fa1f7200135 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 17 Nov 2025 10:59:47 +0100 Subject: [PATCH 0880/1718] BUG: Add missing `PyErr_Occurred()` check to fast-path We could side-step this check unless it's a legacy ufunc, but there is little point as this is barely measureable. Since someone would otherwise ask: No you can't move it into the legacy wrapping stuff because this has to happen at finalization when the GIL is held again. --- numpy/_core/src/umath/_umath_tests.c.src | 9 +++++++++ numpy/_core/src/umath/ufunc_object.c | 4 ++++ numpy/_core/tests/test_umath.py | 9 +++++++++ 3 files changed, 22 insertions(+) diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 845f51ebc94f..a1b64ecc0444 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -460,6 +460,15 @@ addUfuncs(PyObject *dictionary) { } PyDict_SetItemString(dictionary, "always_error", f); Py_DECREF(f); + f = PyUFunc_FromFuncAndData(always_error_functions, always_error_data, + always_error_signatures, 1, 1, 1, PyUFunc_None, "always_error_unary", + "simply, broken, ufunc that sets an error (but releases the GIL).", + 0); + if (f == NULL) { + return -1; + } + PyDict_SetItemString(dictionary, "always_error_unary", f); + Py_DECREF(f); f = PyUFunc_FromFuncAndDataAndSignature(always_error_functions, always_error_data, always_error_signatures, 1, 2, 1, PyUFunc_None, "always_error_gufunc", diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index a9f00f351459..6bf6832dfb9b 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4379,6 +4379,10 @@ try_trivial_scalar_call( ret = strided_loop(&context, data, &n, strides, auxdata); NPY_AUXDATA_FREE(auxdata); if (ret == 0) { + if (PyErr_Occurred()) { + ret = -1; + goto bail; + } if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { // Check for any unmasked floating point errors (note: faster // than _check_ufunc_fperr as one doesn't need mask up front). diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index cd0ec7d04f69..f99441180182 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4893,6 +4893,15 @@ def test_bad_legacy_ufunc_silent_errors(): ncu_tests.always_error.at(arr, [0, 1, 2], arr) +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + @pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) def test_bad_legacy_gufunc_silent_errors(x1): # Verify that an exception raised in a gufunc loop propagates correctly. From a50c9d1d3440a412729c2479159a19d2b214792e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:14:41 +0100 Subject: [PATCH 0881/1718] TYP: ``ravel``: less awkward return types (#30224) --- numpy/_core/fromnumeric.pyi | 9 +++------ numpy/typing/tests/data/reveal/fromnumeric.pyi | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 29431efef532..2a9762240e3d 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -561,14 +561,11 @@ def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np @overload def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... @overload -def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... @overload -def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... @overload -def ravel( - a: complex | _NestedSequence[complex], - order: _OrderKACF = "C", -) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index f6cc9b9fe0d7..58ea2c5f8732 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -127,7 +127,7 @@ assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) From 25c22b43eda3c0808efcd897131c95fa53d410b2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:19:41 +0100 Subject: [PATCH 0882/1718] TYP, DEP: ``numpy.fix`` pending deprecation (#30168) * TYP, DEP: ``numpy.fix`` pending deprecation * DOC: release note for pending deprecation of ``np.fix`` --- .../upcoming_changes/30168.deprecation.rst | 5 ++++ numpy/lib/_ufunclike_impl.pyi | 25 +++++++------------ numpy/typing/tests/data/pass/ufunclike.py | 12 ++++----- numpy/typing/tests/data/reveal/ufunclike.pyi | 12 ++++----- 4 files changed, 26 insertions(+), 28 deletions(-) create mode 100644 doc/release/upcoming_changes/30168.deprecation.rst diff --git a/doc/release/upcoming_changes/30168.deprecation.rst b/doc/release/upcoming_changes/30168.deprecation.rst new file mode 100644 index 000000000000..81673397590d --- /dev/null +++ b/doc/release/upcoming_changes/30168.deprecation.rst @@ -0,0 +1,5 @@ +``np.fix`` is pending deprecation +--------------------------------- +The `numpy.fix` function will be deprecated in a future release. It is recommended to use +`numpy.trunc` instead, as it provides the same functionality of truncating decimal values to their +integer parts. Static type checkers might already report a warning for the use of `numpy.fix`. diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index d5c3151fb892..0f00767356e0 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,4 +1,5 @@ from typing import Any, TypeVar, overload +from typing_extensions import deprecated import numpy as np from numpy import floating, object_ @@ -14,25 +15,17 @@ __all__ = ["fix", "isneginf", "isposinf"] _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) @overload -def fix( # type: ignore[misc] - x: _FloatLike_co, - out: None = None, -) -> floating: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _FloatLike_co, out: None = None) -> floating: ... @overload -def fix( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[floating]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... @overload -def fix( - x: _ArrayLikeObject_co, - out: None = None, -) -> NDArray[object_]: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... @overload -def fix( - x: _ArrayLikeFloat_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... @overload def isposinf( # type: ignore[misc] diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index f69ec8eaf4ed..7e556d10bef7 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -32,12 +32,12 @@ def __array__(self, dtype: np.typing.DTypeLike | None = None, AR_LIKE_O = [Object(), Object(), Object()] AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") -np.fix(AR_LIKE_b) -np.fix(AR_LIKE_u) -np.fix(AR_LIKE_i) -np.fix(AR_LIKE_f) -np.fix(AR_LIKE_O) -np.fix(AR_LIKE_f, out=AR_U) +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] np.isposinf(AR_LIKE_b) np.isposinf(AR_LIKE_u) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index aaae5e80e470..c679b82d2836 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -11,12 +11,12 @@ AR_LIKE_O: list[np.object_] AR_U: npt.NDArray[np.str_] -assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) -assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) -assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) From 6521abc70eca5a6543f864575a6ffed2c8a8dda0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:25:27 +0100 Subject: [PATCH 0883/1718] ENH, TYP: transparent ``ma.extras._fromnxfunction`` runtime signatures (#30229) * ENH: transparent ``ma.extras._fromnxfunction`` runtime signatures * TYP: stub the ``ma.extras._fromnxfunction`` functions * STY: appease ruff --- numpy/_core/shape_base.pyi | 9 +- numpy/lib/_shape_base_impl.pyi | 4 +- numpy/lib/_twodim_base_impl.pyi | 1 + numpy/ma/core.py | 14 --- numpy/ma/extras.py | 203 +++++++++++------------------- numpy/ma/extras.pyi | 211 ++++++++++++++++++++++++++++---- numpy/ma/tests/test_extras.py | 13 ++ 7 files changed, 281 insertions(+), 174 deletions(-) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b4e8b473ed71..a0fbefce0b3c 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -22,6 +22,7 @@ _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) ### +# keep in sync with `numpy.ma.extras.atleast_1d` @overload def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -35,7 +36,7 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_2d` @overload def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -49,7 +50,7 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.atleast_3d` @overload def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... @overload @@ -63,7 +64,7 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... -# +# keep in sync with `numpy.ma.extras.vstack` @overload def vstack( tup: Sequence[_ArrayLike[_ScalarT]], @@ -86,6 +87,7 @@ def vstack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.hstack` @overload def hstack( tup: Sequence[_ArrayLike[_ScalarT]], @@ -108,6 +110,7 @@ def hstack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.stack` @overload def stack( arrays: Sequence[_ArrayLike[_ScalarT]], diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index f8ba31d0f774..352f57dd810a 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -132,12 +132,13 @@ def row_stack( casting: _CastingKind = "same_kind", ) -> NDArray[Any]: ... -# +# keep in sync with `numpy.ma.extras.column_stack` @overload def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.dstack` @overload def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... @overload @@ -169,6 +170,7 @@ def split( axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... +# keep in sync with `numpy.ma.extras.hsplit` @overload def hsplit( ary: _ArrayLike[_ScalarT], diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 7396fa1b0370..cc1d6b6245a9 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -131,6 +131,7 @@ def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `numpy.ma.extras.diagflat` @overload def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 9b27d90852e7..e0ead317d9f6 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -46,7 +46,6 @@ from numpy._core import multiarray as mu from numpy._core.numeric import normalize_axis_tuple from numpy._utils import set_module -from numpy._utils._inspect import formatargspec, getargspec __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', @@ -134,19 +133,6 @@ def doc_note(initialdoc, note): return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) -# TODO: remove/deprecate once `ma.extras._fromnxfunction.getdoc` no longer uses it -def get_object_signature(obj): - """ - Get the signature from obj - - """ - try: - sig = formatargspec(*getargspec(obj)) - except TypeError: - sig = '' - return sig - - ############################################################################### # Exceptions # ############################################################################### diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 6d5569e833dd..7387d4f9beb7 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -19,6 +19,7 @@ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', ] +import functools import itertools import warnings @@ -244,151 +245,93 @@ def masked_all_like(arr): #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: - """ - Defines a wrapper to adapt NumPy functions to masked arrays. - - - An instance of `_fromnxfunction` can be called with the same parameters - as the wrapped NumPy function. The docstring of `newfunc` is adapted from - the wrapped function as well, see `getdoc`. - This class should not be used directly. Instead, one of its extensions that - provides support for a specific type of input should be used. +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. Parameters ---------- - funcname : str - The name of the function to be adapted. The function should be - in the NumPy namespace (i.e. ``np.funcname``). + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function - """ + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function - def __init__(self, funcname): - self.__name__ = funcname - self.__qualname__ = funcname - self.__doc__ = self.getdoc() + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) - def getdoc(self): - """ - Retrieve the docstring and signature from the function. + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper - The ``__doc__`` attribute of the function is used as the docstring for - the new masked array version of the function. A note on application - of the function to the mask is appended. + return decorator - Parameters - ---------- - None - """ - npfunc = getattr(np, self.__name__, None) - doc = getattr(npfunc, '__doc__', None) - if doc: - sig = ma.get_object_signature(npfunc) - doc = ma.doc_note(doc, "The function is applied to both the _data " - "and the _mask, if any.") - if sig: - sig = self.__name__ + sig + "\n\n" - return sig + doc - return +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) - def __call__(self, *args, **params): - pass +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) -class _fromnxfunction_single(_fromnxfunction): +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): """ - A version of `_fromnxfunction` that is called with a single array - argument followed by auxiliary args that are passed verbatim for - both the data and mask calls. + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - if isinstance(x, ndarray): - _d = func(x.__array__(), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - else: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_seq(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with a single sequence - of arrays followed by auxiliary args that are passed verbatim for - both the data and mask calls. - """ - def __call__(self, x, *args, **params): - func = getattr(np, self.__name__) - _d = func(tuple(np.asarray(a) for a in x), *args, **params) - _m = func(tuple(getmaskarray(a) for a in x), *args, **params) - return masked_array(_d, mask=_m) - - -class _fromnxfunction_args(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. The first non-array-like input marks the beginning of the - arguments that are passed verbatim for both the data and mask calls. - Array arguments are processed independently and the results are - returned in a list. If only one array is found, the return value is - just the processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - arrays = [] - args = list(args) - while len(args) > 0 and issequence(args[0]): - arrays.append(args.pop(0)) - res = [] - for x in arrays: - _d = func(np.asarray(x), *args, **params) - _m = func(getmaskarray(x), *args, **params) - res.append(masked_array(_d, mask=_m)) - if len(arrays) == 1: - return res[0] - return res - - -class _fromnxfunction_allargs(_fromnxfunction): - """ - A version of `_fromnxfunction` that is called with multiple array - arguments. Similar to `_fromnxfunction_args` except that all args - are converted to arrays even if they are not so already. This makes - it possible to process scalars as 1-D arrays. Only keyword arguments - are passed through verbatim for the data and mask calls. Arrays - arguments are processed independently and the results are returned - in a list. If only one arg is present, the return value is just the - processed array instead of a list. - """ - def __call__(self, *args, **params): - func = getattr(np, self.__name__) - res = [] - for x in args: - _d = func(np.asarray(x), **params) - _m = func(getmaskarray(x), **params) - res.append(masked_array(_d, mask=_m)) - if len(args) == 1: - return res[0] - return res - - -atleast_1d = _fromnxfunction_allargs('atleast_1d') -atleast_2d = _fromnxfunction_allargs('atleast_2d') -atleast_3d = _fromnxfunction_allargs('atleast_3d') - -vstack = row_stack = _fromnxfunction_seq('vstack') -hstack = _fromnxfunction_seq('hstack') -column_stack = _fromnxfunction_seq('column_stack') -dstack = _fromnxfunction_seq('dstack') -stack = _fromnxfunction_seq('stack') - -hsplit = _fromnxfunction_single('hsplit') - -diagflat = _fromnxfunction_single('diagflat') + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) #####-------------------------------------------------------------------------- diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index afd03300ff5f..70881bd15c8a 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,17 @@ from _typeshed import Incomplete +from collections.abc import Sequence +from typing import SupportsIndex, TypeAlias, TypeVar, overload import numpy as np +from numpy import _CastingKind +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLike, + _DTypeLike, + _ShapeLike, +) from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator @@ -55,39 +66,187 @@ __all__ = [ "vstack", ] -def count_masked(arr, axis=None): ... -def masked_all(shape, dtype=float): ... # noqa: PYI014 -def masked_all_like(arr): ... +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) + +_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_1d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_2d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... -class _fromnxfunction: - __name__: Incomplete - __doc__: Incomplete - def __init__(self, funcname) -> None: ... - def getdoc(self): ... - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_3d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... -class _fromnxfunction_single(_fromnxfunction): - def __call__(self, x, *args, **params): ... +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -class _fromnxfunction_seq(_fromnxfunction): - def __call__(self, x, *args, **params): ... +row_stack = vstack -class _fromnxfunction_allargs(_fromnxfunction): - def __call__(self, *args, **params): ... +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... -atleast_1d: _fromnxfunction_allargs -atleast_2d: _fromnxfunction_allargs -atleast_3d: _fromnxfunction_allargs +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -vstack: _fromnxfunction_seq -row_stack: _fromnxfunction_seq -hstack: _fromnxfunction_seq -column_stack: _fromnxfunction_seq -dstack: _fromnxfunction_seq -stack: _fromnxfunction_seq +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... -hsplit: _fromnxfunction_single -diagflat: _fromnxfunction_single +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... + +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... + +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# TODO: everything below + +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 +def masked_all_like(arr): ... def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 07f0fbdd7f97..1993ffe3e90d 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -5,6 +5,7 @@ :contact: pierregm_at_uga_dot_edu """ +import inspect import itertools import pytest @@ -1811,6 +1812,18 @@ def test_shape_scalar(self): assert_equal(b.shape, (1, 1)) assert_equal(b.mask.shape, b.data.shape) + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + class TestNDEnumerate: From f1b6878d9a48d2c1bb208a0be0b00b63ae977f53 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 17 Nov 2025 21:28:04 +0100 Subject: [PATCH 0884/1718] TYP: ``_core.numeric``: shape-typing and fixed overlapping overloads (#30233) --- numpy/_core/numeric.pyi | 525 ++++++++---------- .../tests/data/reveal/array_constructors.pyi | 20 +- numpy/typing/tests/data/reveal/numeric.pyi | 153 +++-- 3 files changed, 343 insertions(+), 355 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 48576c77fd56..9be6000f0a8a 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,11 +1,10 @@ +from _typeshed import Incomplete from builtins import bool as py_bool from collections.abc import Callable, Sequence from typing import ( Any, Final, Literal as L, - Never, - NoReturn, SupportsAbs, SupportsIndex, TypeAlias, @@ -36,14 +35,21 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeObject_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _ArrayLikeUInt_co, + _CDoubleCodes, + _Complex128Codes, + _DoubleCodes, _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntCodes, _NestedSequence, + _NumberLike_co, _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArray, _SupportsArrayFunc, _SupportsDType, ) @@ -628,10 +634,12 @@ __all__ = [ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) _NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) _DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) + _AnyShapeT = TypeVar( "_AnyShapeT", tuple[()], @@ -641,9 +649,42 @@ _AnyShapeT = TypeVar( tuple[int, int, int, int], tuple[int, ...], ) +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) _CorrelateMode: TypeAlias = L["valid", "same", "full"] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool +_TD64_co: TypeAlias = np.timedelta64 | _Int_co + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] +_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] + +_DTypeLikeInt: TypeAlias = type[int] | _IntCodes +_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes +_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes + +### + # keep in sync with `ones_like` @overload def zeros_like( @@ -913,249 +954,100 @@ def count_nonzero( def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> py_bool: ... - -def argwhere(a: ArrayLike) -> NDArray[intp]: ... +def isfortran(a: ndarray | generic) -> py_bool: ... -def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# keep in sync with `convolve` @overload def correlate( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = "valid", -) -> NDArray[Any]: ... -@overload -def correlate( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = "valid", -) -> NDArray[np.bool]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def correlate( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = "valid", -) -> NDArray[unsignedinteger]: ... -@overload -def correlate( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = "valid", -) -> NDArray[signedinteger]: ... +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def correlate( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = "valid", -) -> NDArray[floating]: ... +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def correlate( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = "valid", -) -> NDArray[complexfloating]: ... +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def correlate( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = "valid", -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def correlate( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = "valid", -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep in sync with `correlate` @overload def convolve( - a: _ArrayLike[Never], - v: _ArrayLike[Never], - mode: _CorrelateMode = "full", -) -> NDArray[Any]: ... -@overload -def convolve( - a: _ArrayLikeBool_co, - v: _ArrayLikeBool_co, - mode: _CorrelateMode = "full", -) -> NDArray[np.bool]: ... -@overload -def convolve( - a: _ArrayLikeUInt_co, - v: _ArrayLikeUInt_co, - mode: _CorrelateMode = "full", -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... @overload -def convolve( - a: _ArrayLikeInt_co, - v: _ArrayLikeInt_co, - mode: _CorrelateMode = "full", -) -> NDArray[signedinteger]: ... +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... @overload -def convolve( - a: _ArrayLikeFloat_co, - v: _ArrayLikeFloat_co, - mode: _CorrelateMode = "full", -) -> NDArray[floating]: ... +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... @overload -def convolve( - a: _ArrayLikeComplex_co, - v: _ArrayLikeComplex_co, - mode: _CorrelateMode = "full", -) -> NDArray[complexfloating]: ... +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... @overload def convolve( - a: _ArrayLikeTD64_co, - v: _ArrayLikeTD64_co, - mode: _CorrelateMode = "full", -) -> NDArray[timedelta64]: ... + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... @overload def convolve( - a: _ArrayLikeObject_co, - v: _ArrayLikeObject_co, - mode: _CorrelateMode = "full", -) -> NDArray[object_]: ... + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload @overload def outer( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - out: None = None, -) -> NDArray[Any]: ... -@overload -def outer( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - out: None = None, -) -> NDArray[np.bool]: ... -@overload -def outer( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, - out: None = None, -) -> NDArray[unsignedinteger]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... @overload -def outer( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - out: None = None, -) -> NDArray[signedinteger]: ... +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... @overload -def outer( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[floating]: ... +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - out: None = None, -) -> NDArray[complexfloating]: ... +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... @overload -def outer( - a: _ArrayLikeTD64_co, - b: _ArrayLikeTD64_co, - out: None = None, -) -> NDArray[timedelta64]: ... +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... @overload -def outer( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - out: None = None, -) -> NDArray[object_]: ... +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - out: _ArrayT, -) -> _ArrayT: ... +def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload def tensordot( - a: _ArrayLike[_NumericScalarT], - b: _ArrayLike[_NumericScalarT], - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[_NumericScalarT]: ... + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... @overload -def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[bool_]: ... +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[int_ | Any]: ... + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[float64 | Any]: ... + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[complex128 | Any]: ... - -@overload -def roll( - a: _ArrayLike[_ScalarT], - shift: _ShapeLike, - axis: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def roll( - a: ArrayLike, - shift: _ShapeLike, - axis: _ShapeLike | None = None, -) -> NDArray[Any]: ... - -def rollaxis( - a: NDArray[_ScalarT], - axis: int, - start: int = 0, -) -> NDArray[_ScalarT]: ... + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... -def moveaxis( - a: NDArray[_ScalarT], - source: _ShapeLike, - destination: _ShapeLike, -) -> NDArray[_ScalarT]: ... - -@overload -def cross( - a: _ArrayLike[Never], - b: _ArrayLike[Never], - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NDArray[Any]: ... -@overload -def cross( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NoReturn: ... +# @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], axisa: int = -1, axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[unsignedinteger]: ... +) -> NDArray[_AnyNumericScalarT]: ... @overload def cross( a: _ArrayLikeInt_co, @@ -1164,7 +1056,7 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[signedinteger]: ... +) -> NDArray[np.int_ | Any]: ... @overload def cross( a: _ArrayLikeFloat_co, @@ -1173,7 +1065,7 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[floating]: ... +) -> NDArray[np.float64 | Any]: ... @overload def cross( a: _ArrayLikeComplex_co, @@ -1182,109 +1074,104 @@ def cross( axisb: int = -1, axisc: int = -1, axis: int | None = None, -) -> NDArray[complexfloating]: ... -@overload -def cross( - a: _ArrayLikeObject_co, - b: _ArrayLikeObject_co, - axisa: int = -1, - axisb: int = -1, - axisc: int = -1, - axis: int | None = None, -) -> NDArray[object_]: ... +) -> NDArray[np.complex128 | Any]: ... +# @overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - sparse: L[False] = False, -) -> NDArray[int_]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int], - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: type[int] = ..., - *, - sparse: L[True], -) -> tuple[NDArray[int_], ...]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[False] = False, -) -> NDArray[_ScalarT]: ... -@overload -def indices( - dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], - sparse: L[True], -) -> tuple[NDArray[_ScalarT], ...]: ... +def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... @overload -def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None = ..., - sparse: L[False] = False, -) -> NDArray[Any]: ... +def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... @overload +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... +def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... -@overload + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +@overload # 2d, dtype=, sparse=True def indices( - dimensions: Sequence[int], - dtype: DTypeLike | None = ..., - *, - sparse: L[True], -) -> tuple[NDArray[Any], ...]: ... + dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] +) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... +# def fromfunction( function: Callable[..., _T], shape: Sequence[int], *, - dtype: DTypeLike | None = ..., + dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, - **kwargs: Any, + **kwargs: object, ) -> _T: ... +# def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... +# def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... -def base_repr( - number: SupportsAbs[float], - base: float = 2, - padding: SupportsIndex | None = 0, -) -> str: ... - -@overload -def identity( - n: int, - dtype: None = None, - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[float64]: ... -@overload -def identity( - n: int, - dtype: _DTypeLike[_ScalarT], - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def identity( - n: int, - dtype: DTypeLike | None = None, - *, - like: _SupportsArrayFunc | None = None, -) -> NDArray[Any]: ... +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... +# def allclose( a: ArrayLike, b: ArrayLike, @@ -1293,30 +1180,80 @@ def allclose( equal_nan: py_bool = False, ) -> py_bool: ... -@overload +# +@overload # scalar, scalar def isclose( - a: _ScalarLike_co, - b: _ScalarLike_co, + a: _NumberLike_co, + b: _NumberLike_co, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, ) -> np.bool: ... -@overload +@overload # known shape, same shape or scalar +def isclose( + a: np.ndarray[_ShapeT], + b: np.ndarray[_ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose( + a: np.ndarray[_ShapeT] | _NumberLike_co, + b: np.ndarray[_ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape def isclose( a: ArrayLike, b: ArrayLike, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> NDArray[np.bool]: ... +) -> NDArray[np.bool] | Any: ... +# def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... - def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... +# @overload def astype( - x: ndarray[_ShapeT, dtype], + x: ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], /, *, @@ -1325,10 +1262,10 @@ def astype( ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( - x: ndarray[_ShapeT, dtype], + x: ndarray[_ShapeT], dtype: DTypeLike | None, /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT, dtype]: ... +) -> ndarray[_ShapeT]: ... diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index b0ba67d6a087..ba8fc4db23c9 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -216,9 +216,23 @@ assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...] assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) -assert_type(np.identity(10), npt.NDArray[np.float64]) -assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 247294cf34c6..24f97d2d0784 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -22,88 +22,125 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_O: npt.NDArray[np.object_] -B: list[int] -C: SubClass +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### assert_type(np.count_nonzero(i8), np.intp) assert_type(np.count_nonzero(AR_i8), np.intp) -assert_type(np.count_nonzero(B), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) assert_type(np.isfortran(AR_i8), bool) -assert_type(np.argwhere(i8), npt.NDArray[np.intp]) -assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) -assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) - -assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) -assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) -assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) - -assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.int_ | Any]) +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.int64]) -assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.int64]) assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) -assert_type(np.isscalar(B), bool) +assert_type(np.isscalar(_to_1d_int), bool) assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) -assert_type(np.roll(B, 1), npt.NDArray[Any]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) -assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) -assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) - assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) @@ -116,18 +153,18 @@ assert_type(np.binary_repr(1), str) assert_type(np.base_repr(1), str) assert_type(np.allclose(i8, AR_i8), bool) -assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) assert_type(np.allclose(AR_i8, AR_i8), bool) assert_type(np.isclose(i8, i8), np.bool) assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) -assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.array_equal(i8, AR_i8), bool) -assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) assert_type(np.array_equal(AR_i8, AR_i8), bool) assert_type(np.array_equiv(i8, AR_i8), bool) -assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) assert_type(np.array_equiv(AR_i8, AR_i8), bool) From ab06e8c3c96b280fd9747667342cd79fffb03d8e Mon Sep 17 00:00:00 2001 From: Amelia Thurdekoos Date: Mon, 17 Nov 2025 16:11:56 -0800 Subject: [PATCH 0885/1718] DOC: Update wording in numpy.coremath --- doc/source/reference/c-api/coremath.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index c07abb47bc10..cc46ba744a49 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,8 +1,7 @@ NumPy core math library ======================= -The numpy core math library (``npymath``) is a first step in this direction. This -library contains most math-related C99 functionality, which can be used on +This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. From a91953965abeb918fe925d82d49bdf50e0ef5dc2 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 18 Nov 2025 09:05:17 +0200 Subject: [PATCH 0886/1718] DOC: remove mention of 'skip azp' since we no longer use azure [skip actions][skip cirrus] --- doc/RELEASE_WALKTHROUGH.rst | 2 +- doc/source/dev/development_workflow.rst | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index ac306c682148..c8c1f129c0b2 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -290,7 +290,7 @@ Update the ``version`` in ``pyproject.toml``:: $ gvim pyproject.toml Commit the result, edit the commit message, note the files in the commit, and -add a line ``[skip azp] [skip cirrus] [skip actions]``, then push:: +add a line ``[skip cirrus] [skip actions]``, then push:: $ git commit -a -m"MAINT: Prepare 2.4.x for further development" $ git rebase -i HEAD^ diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index fa5f8b1e65b7..10b07cc1f437 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -205,13 +205,6 @@ these fragments in each commit message of a PR: settings. `See the configuration files for these checks. `__ -* ``[skip azp]``: skip Azure jobs - - `Azure `__ is - where all comprehensive tests are run. This is an expensive run, and one you - could typically skip if you do documentation-only changes, for example. - `See the main configuration file for these checks. `__ - * ``[skip circle]``: skip CircleCI jobs `CircleCI `__ is where we build the documentation and From 195c4b2b8991356c4b9d5afe15d897c29c1ad058 Mon Sep 17 00:00:00 2001 From: Simola Nayak <14366306+simolanayak@users.noreply.github.com> Date: Tue, 18 Nov 2025 01:28:12 -0800 Subject: [PATCH 0887/1718] Merge pull request #30184 from simolanayak/test-numeric-lines MAINT: Enable linting with ruff E501 #28947 (test-numeric.py) --- numpy/_core/tests/test_numeric.py | 28 ++++++++++++++++++++-------- ruff.toml | 1 - 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 485147500461..2c5535a19c77 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1660,8 +1660,10 @@ def test_nonzero_onedim(self): # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], # dtype=[('a', 'i4'), ('b', 'i2')]) - x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], - dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) assert_equal(np.count_nonzero(x['c']), 3) @@ -1995,7 +1997,9 @@ def test_boolean(self): g1 = randint(0, 5, size=15) g2 = randint(0, 8, size=15) V[g1, g2] = -V[g1, g2] - assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -2261,7 +2265,10 @@ def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) assert_(not res) assert_(type(res) is bool) @@ -3116,7 +3123,9 @@ def tst_isclose_allclose(self, x, y): if np.isscalar(x) and np.isscalar(y): assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) else: - assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) def test_ip_all_isclose(self): self._setup() @@ -3210,7 +3219,8 @@ def test_tol_warnings(self): for i in b: for j in b: - # Making sure that i and j are not both numbers, because that won't create a warning + # Making sure that i and j are not both numbers, + # because that won't create a warning if (i == 1) and (j == 1): continue @@ -3220,7 +3230,8 @@ def test_tol_warnings(self): c = np.isclose(a, a, atol=i, rtol=j) assert len(w) == 1 assert issubclass(w[-1].category, RuntimeWarning) - assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) class TestStdVar: @@ -4244,7 +4255,8 @@ def test_zero_dimension(self): def test_zero_dimensional(self): # gh-12130 arr_0d = np.array(1) - ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) assert_array_equal(ret, arr_0d) diff --git a/ruff.toml b/ruff.toml index 8b4d8358ba4a..b25a34d45984 100644 --- a/ruff.toml +++ b/ruff.toml @@ -90,7 +90,6 @@ ignore = [ "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_umath.py" = ["E501"] -"numpy/_core/tests/test_numeric.py" = ["E501"] "numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] From e4a68f45ad0be0ad34e03fb960559aa3f98ddb64 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 6 Nov 2025 17:28:25 +0000 Subject: [PATCH 0888/1718] CI: add check for numpy-release version of scipy-openblas --- .github/workflows/linux.yml | 8 +++ .github/workflows/linux_blas.yml | 2 +- tools/check_openblas_version.py | 113 +++++++++++++++++++++++++++---- 3 files changed, 109 insertions(+), 14 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 5d1f29693155..c0effa244646 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -342,6 +342,14 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + repository: numpy/numpy-release + path: numpy-release + persist-credentials: false + - name: Check scipy-openblas version in release pipelines + run: | + python tools/check_openblas_version.py --req-files numpy-release/requirements/openblas_requirements.txt - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 7b7647044002..fccc7e3ad84b 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -107,7 +107,7 @@ jobs: - name: Ensure scipy-openblas run: | set -ex - spin python tools/check_openblas_version.py 0.3.26 + spin python tools/check_openblas_version.py -- --min-version 0.3.30 - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' diff --git a/tools/check_openblas_version.py b/tools/check_openblas_version.py index 9aa0b265dea5..085308b71171 100644 --- a/tools/check_openblas_version.py +++ b/tools/check_openblas_version.py @@ -1,20 +1,107 @@ """ -usage: check_openblas_version.py +Checks related to the OpenBLAS version used in CI. -Check the blas version is blas from scipy-openblas and is higher than -min_version -example: check_openblas_version.py 0.3.26 +Options: +1. Check that the BLAS used at build time is (a) scipy-openblas, and (b) its version is + higher than a given minimum version. Note: this method only seems to give + the first 3 version components, so 0.3.30.0.7 gets translated to 0.3.30 when reading + it back out from `scipy.show_config()`. +2. Check requirements files in the main numpy repo and compare with the numpy-release + repo. Goal is to ensure that `numpy-release` is not behind. + +Both of these checks are primarily useful in a CI job. + +Examples: + + # Requires install numpy + $ python check_openblas_version.py --min-version 0.3.30 + + # Only needs the requirements files + $ python check_openblas_version.py --req-files \ + ../numpy-release/requirements/openblas_requirements.txt """ +import argparse +import os.path import pprint -import sys -import numpy -version = sys.argv[1] -deps = numpy.show_config('dicts')['Build Dependencies'] -assert "blas" in deps -print("Build Dependencies: blas") -pprint.pprint(deps["blas"]) -assert deps["blas"]["version"].split(".") >= version.split(".") -assert deps["blas"]["name"] == "scipy-openblas" +def check_built_version(min_version): + import numpy + deps = numpy.show_config('dicts')['Build Dependencies'] + assert "blas" in deps + print("Build Dependencies: blas") + pprint.pprint(deps["blas"]) + assert deps["blas"]["version"].split(".") >= min_version.split(".") + assert deps["blas"]["name"] == "scipy-openblas" + + +def check_requirements_files(reqfile): + if not os.path.exists(reqfile): + print(f"Path does not exist: {reqfile}") + + def get_version(line): + req = line.split(";")[0].split("==")[1].split(".")[:5] + return tuple(int(s) for s in req) + + def parse_reqs(reqfile): + with open(reqfile) as f: + lines = f.readlines() + + v32 = None + v64 = None + for line in lines: + if "scipy-openblas32" in line: + v32 = get_version(line) + if "scipy-openblas64" in line: + v64 = get_version(line) + if v32 is None or v64 is None: + raise AssertionError("Expected `scipy-openblas32` and " + "`scipy-openblas64` in `ci_requirements.txt`, " + f"got:\n {' '.join(lines)}") + return v32, v64 + + this_dir = os.path.abspath(os.path.dirname(__file__)) + reqfile_thisrepo = os.path.join(this_dir, '..', 'requirements', + 'ci_requirements.txt') + + v32_thisrepo, v64_thisrepo = parse_reqs(reqfile_thisrepo) + v32_rel, v64_rel = parse_reqs(reqfile) + + def compare_versions(v_rel, v_thisrepo, bits): + if not v_rel >= v_thisrepo: + raise AssertionError(f"`numpy-release` version of scipy-openblas{bits} " + f"{v_rel} is behind this repo: {v_thisrepo}") + + compare_versions(v64_rel, v64_thisrepo, "64") + compare_versions(v32_rel, v32_thisrepo, "32") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--req-files", + type=str, + help="Path to the requirements file to compare with the one in this repo" + ) + parser.add_argument( + "--min-version", + type=str, + help="The minimum version that should have been used at build time for " + "installed `numpy` package" + ) + args = parser.parse_args() + + if args.min_version is None and args.req_files is None: + raise ValueError("One of `--req-files` or `--min-version` needs to be " + "specified") + + if args.min_version: + check_built_version(args.min_version) + + if args.req_files: + check_requirements_files(args.req_files) + + +if __name__ == '__main__': + main() From 31ea0a31800645c6e0fcf2e5e54b714d1ff99ddd Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 18 Nov 2025 21:45:28 +0530 Subject: [PATCH 0889/1718] BUG: fix data race in wrapping_auxdata_freelist by making it thread-local (#30228) --- numpy/_core/src/umath/wrapping_array_method.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index b340e72bdbfd..924bac9524e9 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -83,8 +83,8 @@ typedef struct { #define WRAPPING_AUXDATA_FREELIST_SIZE 5 -static int wrapping_auxdata_freenum = 0; -static wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; +static NPY_TLS int wrapping_auxdata_freenum = 0; +static NPY_TLS wrapping_auxdata *wrapping_auxdata_freelist[WRAPPING_AUXDATA_FREELIST_SIZE] = {NULL}; static void From 0c7ddfc343ee0b7937e893227d2090ee4dce090e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Nov 2025 17:06:33 +0000 Subject: [PATCH 0890/1718] MAINT: Bump github/codeql-action from 4.31.3 to 4.31.4 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.3 to 4.31.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/014f16e7ab1402f30e7c3329d33797e7948572db...e12f0178983d466f2f6028f5cc7a6d786fd97f4b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1546cdc10adf..bc4ff9f9cdc5 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/init@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/autobuild@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@014f16e7ab1402f30e7c3329d33797e7948572db # v4.31.3 + uses: github/codeql-action/analyze@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 227f2f3788b0..165a172b9ee4 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@014f16e7ab1402f30e7c3329d33797e7948572db # v2.1.27 + uses: github/codeql-action/upload-sarif@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v2.1.27 with: sarif_file: results.sarif From 6faf12368c02e465d0980db9ff5b344944e752e9 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 18 Nov 2025 18:48:58 +0100 Subject: [PATCH 0891/1718] TYP: Shape-typing in ``lib._twodim_base_impl`` (#30231) --- numpy/lib/_twodim_base_impl.pyi | 307 ++++++++---------- numpy/typing/tests/data/fail/twodim_base.pyi | 33 +- .../typing/tests/data/reveal/twodim_base.pyi | 262 +++++++++------ 3 files changed, 333 insertions(+), 269 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index cc1d6b6245a9..58582119429a 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,31 +1,26 @@ +from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import Any, Literal as L, TypeAlias, TypeVar, overload +from typing import ( + Any, + Literal as L, + Never, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) import numpy as np -from numpy import ( - _OrderCF, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - int_, - intp, - object_, - signedinteger, - timedelta64, -) +from numpy import _OrderCF from numpy._typing import ( ArrayLike, DTypeLike, NDArray, _ArrayLike, - _ArrayLikeComplex_co, - _ArrayLikeFloat_co, - _ArrayLikeInt_co, - _ArrayLikeObject_co, _DTypeLike, + _NumberLike_co, + _ScalarLike_co, _SupportsArray, _SupportsArrayFunc, ) @@ -51,36 +46,63 @@ __all__ = [ ### _T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) _InexactT = TypeVar("_InexactT", bound=np.inexact) +_NumberT = TypeVar("_NumberT", bound=np.number) +_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) _NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) -# The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] - _Int_co: TypeAlias = np.integer | np.bool _Float_co: TypeAlias = np.floating | _Int_co _Number_co: TypeAlias = np.number | np.bool +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +# Workaround for mypy's and pyright's lack of compliance with the typing spec for +# overloads for gradual types. This works because only `Any` and `Never` are assignable +# to `Never`. +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] + _ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] _ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] _ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] _ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] _ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] + +_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] +_Histogram2D: TypeAlias = tuple[_Array1D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] + +@type_check_only +class _HasShapeAndNDim(Protocol): + @property # TODO: require 2d shape once shape-typing has matured + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + ### +# keep in sync with `flipud` +@overload +def fliplr(m: _ArrayT) -> _ArrayT: ... @overload def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... +# keep in sync with `fliplr` +@overload +def flipud(m: _ArrayT) -> _ArrayT: ... @overload def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... +# @overload def eye( N: int, @@ -91,7 +113,7 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[float64]: ... +) -> _Array2D[np.float64]: ... @overload def eye( N: int, @@ -102,7 +124,7 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def eye( N: int, @@ -113,7 +135,7 @@ def eye( order: _OrderCF = "C", device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def eye( N: int, @@ -124,19 +146,31 @@ def eye( *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[Any]: ... +) -> _Array2D[Incomplete]: ... +# +@overload +def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +@overload +def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... @overload def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload -def diag(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... @overload -def diagflat(v: ArrayLike, k: int = 0) -> NDArray[Any]: ... +def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... +# @overload def tri( N: int, @@ -145,7 +179,7 @@ def tri( dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] *, like: _SupportsArrayFunc | None = None -) -> NDArray[float64]: ... +) -> _Array2D[np.float64]: ... @overload def tri( N: int, @@ -154,7 +188,7 @@ def tri( dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def tri( N: int, @@ -163,7 +197,7 @@ def tri( *, dtype: _DTypeLike[_ScalarT], like: _SupportsArrayFunc | None = None -) -> NDArray[_ScalarT]: ... +) -> _Array2D[_ScalarT]: ... @overload def tri( N: int, @@ -172,69 +206,55 @@ def tri( dtype: DTypeLike | None = ..., # = float *, like: _SupportsArrayFunc | None = None -) -> NDArray[Any]: ... +) -> _Array2D[Any]: ... +# keep in sync with `triu` +@overload +def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... @overload def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# keep in sync with `tril` +@overload +def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... @overload def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... @overload def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... +# we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +@overload +def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +@overload +def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... +@overload +def vander(x: list[float], N: int | None = None, increasing: bool = False) -> _Array2D[np.float64]: ... @overload -def vander( # type: ignore[misc] - x: _ArrayLikeInt_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[signedinteger]: ... -@overload -def vander( # type: ignore[misc] - x: _ArrayLikeFloat_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[floating]: ... -@overload -def vander( - x: _ArrayLikeComplex_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[complexfloating]: ... -@overload -def vander( - x: _ArrayLikeObject_co, - N: int | None = None, - increasing: bool = False, -) -> NDArray[object_]: ... +def vander(x: list[complex], N: int | None = None, increasing: bool = False) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = False) -> _Array2D[Any]: ... +# @overload def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT], - y: _ArrayLike1D[_ComplexFloatingT | _Float_co], + x: _ArrayLike1D[_ComplexT], + y: _ArrayLike1D[_ComplexT | _Float_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +) -> _Histogram2D[_ComplexT]: ... @overload def histogram2d( - x: _ArrayLike1D[_ComplexFloatingT | _Float_co], - y: _ArrayLike1D[_ComplexFloatingT], + x: _ArrayLike1D[_ComplexT | _Float_co], + y: _ArrayLike1D[_ComplexT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_ComplexFloatingT], - NDArray[_ComplexFloatingT], -]: ... +) -> _Histogram2D[_ComplexT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT], @@ -243,11 +263,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +) -> _Histogram2D[_InexactT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT | _Int_co], @@ -256,11 +272,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_InexactT], - NDArray[_InexactT], -]: ... +) -> _Histogram2D[_InexactT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -269,11 +281,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[float64], - NDArray[float64], -]: ... +) -> _Histogram2D[np.float64]: ... @overload def histogram2d( x: Sequence[complex], @@ -282,11 +290,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[complex128 | float64], - NDArray[complex128 | float64], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -295,11 +299,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT], - NDArray[_NumberCoT], -]: ... +) -> _Histogram2D[_NumberCoT]: ... @overload def histogram2d( x: _ArrayLike1D[_InexactT], @@ -308,11 +308,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | _InexactT], - NDArray[_NumberCoT | _InexactT], -]: ... +) -> _Histogram2D[_InexactT | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT | Any]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -321,11 +326,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | float64], - NDArray[_NumberCoT | float64], -]: ... +) -> _Histogram2D[np.float64 | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... @overload def histogram2d( x: Sequence[complex], @@ -334,24 +344,16 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[_NumberCoT | complex128 | float64], - NDArray[_NumberCoT | complex128 | float64], -]: ... +) -> _Histogram2D[np.complex128 | _NumberCoT]: ... @overload def histogram2d( - x: _ArrayLike1DNumber_co, - y: _ArrayLike1DNumber_co, - bins: Sequence[Sequence[bool]], + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1DNumber_co | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.bool], - NDArray[np.bool], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -360,11 +362,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.int_ | np.bool], - NDArray[np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.int_]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -373,11 +371,7 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.float64 | np.int_ | np.bool], - NDArray[np.float64 | np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.float64 | Any]: ... @overload def histogram2d( x: _ArrayLike1DNumber_co, @@ -386,46 +380,29 @@ def histogram2d( range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> tuple[ - NDArray[float64], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], - NDArray[np.complex128 | np.float64 | np.int_ | np.bool], -]: ... +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[_ArrayLike1DNumber_co | int] | int, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[Any]: ... # NOTE: we're assuming/demanding here the `mask_func` returns # an ndarray of shape `(n, n)`; otherwise there is the possibility # of the output tuple having more or less than 2 elements @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[int], - k: int = 0, -) -> tuple[NDArray[intp], NDArray[intp]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... @overload -def mask_indices( - n: int, - mask_func: _MaskFunc[_T], - k: _T, -) -> tuple[NDArray[intp], NDArray[intp]]: ... +def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... -def tril_indices( - n: int, - k: int = 0, - m: int | None = None, -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def tril_indices_from( - arr: NDArray[Any], - k: int = 0, -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# +def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... +def triu_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... -def triu_indices( - n: int, - k: int = 0, - m: int | None = None, -) -> tuple[NDArray[int_], NDArray[int_]]: ... - -def triu_indices_from( - arr: NDArray[Any], - k: int = 0, -) -> tuple[NDArray[int_], NDArray[int_]]: ... +# these will accept anything with `shape: tuple[int, int]` and `ndim: int` attributes +def tril_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... +def triu_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... diff --git a/numpy/typing/tests/data/fail/twodim_base.pyi b/numpy/typing/tests/data/fail/twodim_base.pyi index e146d68c7418..473419cecef1 100644 --- a/numpy/typing/tests/data/fail/twodim_base.pyi +++ b/numpy/typing/tests/data/fail/twodim_base.pyi @@ -1,32 +1,39 @@ -from typing import Any +from typing import type_check_only import numpy as np import numpy.typing as npt -def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: ... +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] -def func2(ar: npt.NDArray[Any], a: float) -> float: ... +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... -AR_b: npt.NDArray[np.bool] -AR_m: npt.NDArray[np.timedelta64] - -AR_LIKE_b: list[bool] +### np.eye(10, M=20.0) # type: ignore[call-overload] np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.diag(AR_b, k=0.5) # type: ignore[call-overload] -np.diagflat(AR_b, k=0.5) # type: ignore[call-overload] +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] np.tri(10, M=20.0) # type: ignore[call-overload] np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] -np.tril(AR_b, k=0.5) # type: ignore[call-overload] -np.triu(AR_b, k=0.5) # type: ignore[call-overload] +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] -np.vander(AR_m) # type: ignore[arg-type] +np.vander(_nd_td64) # type: ignore[type-var] -np.histogram2d(AR_m) # type: ignore[call-overload] +np.histogram2d(_nd_td64) # type: ignore[call-overload] np.mask_indices(10, func1) # type: ignore[arg-type] np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] + +np.tril_indices(3.14) # type: ignore[arg-type] + +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 7e9563a38611..30d4f408f1a9 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,145 +1,225 @@ -from typing import Any, TypeVar, assert_type +from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only import numpy as np import numpy.typing as npt _ScalarT = TypeVar("_ScalarT", bound=np.generic) -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... - -def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... - -AR_b: npt.NDArray[np.bool] -AR_u: npt.NDArray[np.uint64] -AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] -AR_c: npt.NDArray[np.complex128] -AR_O: npt.NDArray[np.object_] - -AR_LIKE_b: list[bool] -AR_LIKE_c: list[complex] - -assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) -assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_ND: TypeAlias = tuple[Any, ...] -assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) -assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) +_Indices2D: TypeAlias = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] -assert_type(np.eye(10), npt.NDArray[np.float64]) -assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) +### -assert_type(np.diag(AR_b), npt.NDArray[np.bool]) -assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] -assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) -assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] -assert_type(np.tri(10), npt.NDArray[np.float64]) -assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) +_to_1d_f64: list[float] +_to_1d_c128: list[complex] -assert_type(np.tril(AR_b), npt.NDArray[np.bool]) -assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.triu(AR_b), npt.NDArray[np.bool]) -assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) - -assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger]) -assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating]) -assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating]) -assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +@type_check_only +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d assert_type( - np.histogram2d(AR_LIKE_c, AR_LIKE_c), + np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128 | np.float64], - npt.NDArray[np.complex128 | np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_b), + np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) assert_type( - np.histogram2d(AR_f, AR_i), + np.histogram2d(_nd_i64, _nd_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_i, AR_f), + np.histogram2d(_nd_f64, _nd_i64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.float64], - npt.NDArray[np.float64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + np.histogram2d(_nd_i64, _nd_f64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], ], ) assert_type( - np.histogram2d(AR_f, AR_c, bins=8), + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_f, bins=(8, 5)), + np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.complex128], - npt.NDArray[np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_i, bins=AR_u), + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.uint64], - npt.NDArray[np.uint64], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], ], ) assert_type( - np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - npt.NDArray[np.float64], - npt.NDArray[np.bool | np.complex128], - npt.NDArray[np.bool | np.complex128], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], ) -assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) -assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) - -assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) - -assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) From dd69ebad5c3816535af0660df7568d0936b4200b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 18 Nov 2025 18:51:12 +0100 Subject: [PATCH 0892/1718] TYP: Annotate remaining ``ma.MaskedArray`` methods (#30221) --- numpy/__init__.pyi | 6 +++- numpy/ma/core.pyi | 74 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 65 insertions(+), 15 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a04ec3139250..b973e43093ca 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2273,6 +2273,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *args: SupportsIndex, ) -> str: ... + # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... @overload @@ -2291,6 +2292,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + # keep in sync with `ma.MaskedArray.squeeze` def squeeze( self, /, @@ -2418,7 +2420,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... - # + # keep in sync with `ma.MaskedArray.diagonal` def diagonal( self, offset: SupportsIndex = 0, @@ -2536,11 +2538,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... + # keep in sync with `ma.MaskedArray.repeat` @overload def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... @overload def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 31d66fff0ab8..62dc32c13d97 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -960,7 +960,8 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: Any + __array_priority__: Final[Literal[15]] = 15 + @overload def __new__( cls, @@ -1615,10 +1616,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - get_imag: Any + def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + + # @property # type: ignore[misc] def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - get_real: Any + def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -1630,9 +1633,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... - # keep roughly in sync with `ma.core.ravel` - def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... - # Keep in sync with `ndarray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) @@ -2317,34 +2317,80 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = "raise", ) -> _ArrayT: ... - copy: Any - diagonal: Any - flatten: Any + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # keep in sync with `ndarray.repeat` + @override @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def repeat( self, + /, repeats: _ArrayLikeInt_co, axis: SupportsIndex, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... - squeeze: Any + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + + # keep in sync with `ndarray.squeeze` + @override + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # - def toflex(self) -> Incomplete: ... - def torecords(self) -> Incomplete: ... - def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] - def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + @overload + def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[_T]]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] # + @override def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` From 4f97a4e64e6389f5b1be66954b98d5f2078afacc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Nov 2025 11:27:10 -0700 Subject: [PATCH 0893/1718] MAINT: Bump actions/checkout from 5.0.0 to 5.0.1 (#30252) Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 5.0.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/08c6903cd8c0fde910a37f88322edcfb5dd907a8...93cb6efe18208431cddfb8368fd83d5badbf9bfd) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 46 insertions(+), 46 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1546cdc10adf..ba75ce30ce33 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 290ddfa75ffe..54e136839053 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index c010cd5c9783..279020cbe365 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index b2fd18051ddd..c27e9ef4e88c 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 0d1936b55b60..906dec766b24 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 9b23c1939b00..93426a95e8c0 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c0effa244646..4e3e01235e65 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -102,7 +102,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -132,7 +132,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -172,7 +172,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -296,13 +296,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -334,7 +334,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -342,7 +342,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index fccc7e3ad84b..b141c33165cf 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 5fd10b73b9be..b27a665c0e2b 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index de77a8410f18..74cc00f52e7d 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 399bec631713..8dc888aa94c4 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.11", "3.14t"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 0e9e63d53d1a..34780b0fdd8c 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -53,7 +53,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index d8e4a7eb5817..74b3fac96b6d 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 227f2f3788b0..20d0c642849e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v3.1.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 43e60fae80b9..d9f72a61b199 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.11", "3.14"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1ceefc2e540d..bc89ffab3423 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -44,7 +44,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3302e054d438..8136d80c6a88 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 with: submodules: recursive fetch-tags: true From 68da3c3ef579e20c3d4e745abbe94e46f36c7a80 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 00:14:07 +0530 Subject: [PATCH 0894/1718] BUG: fix data race in `PyArray_DescrHash` (#30234) * BUG: fix data race in PyArray_DescrHash * fix return type --- numpy/_core/src/common/npy_atomic.h | 12 ++++++++++ numpy/_core/src/multiarray/hashdescr.c | 32 +++++++++++--------------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index f5b41d7068be..61a31acc13e0 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -79,6 +79,12 @@ npy_atomic_load_ptr(const void *obj) { #endif } +static inline npy_hash_t +npy_atomic_load_hash_t(const npy_hash_t *obj) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + return (npy_hash_t)npy_atomic_load_ptr((const void *)obj); +} + static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS @@ -104,6 +110,12 @@ npy_atomic_store_ptr(void *obj, void *value) #endif } +static inline void +npy_atomic_store_hash_t(npy_hash_t *obj, npy_hash_t value) { + assert(sizeof(npy_hash_t) == sizeof(void *)); + npy_atomic_store_ptr((void *)obj, (void *)value); +} + #undef MSC_ATOMICS #undef STDC_ATOMICS #undef GCC_ATOMICS diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index d5359d86390f..0117ef218a81 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -6,6 +6,7 @@ #include +#include "npy_atomic.h" #include "npy_config.h" @@ -256,12 +257,13 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l) } /* - * Return 0 if successful + * Return hash on success, -1 on failure */ -static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) +static npy_hash_t _PyArray_DescrHashImp(PyArray_Descr *descr) { PyObject *l, *tl; int st; + npy_hash_t hash; l = PyList_New(0); if (l == NULL) { @@ -283,25 +285,16 @@ static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash) if (tl == NULL) return -1; - *hash = PyObject_Hash(tl); + hash = PyObject_Hash(tl); Py_DECREF(tl); - if (*hash == -1) { - /* XXX: does PyObject_Hash set an exception on failure ? */ -#if 0 - PyErr_SetString(PyExc_SystemError, - "(Hash) Error while hashing final tuple"); -#endif - return -1; - } - - return 0; + return hash; } NPY_NO_EXPORT npy_hash_t PyArray_DescrHash(PyObject* odescr) { PyArray_Descr *descr; - int st; + npy_hash_t hash; if (!PyArray_DescrCheck(odescr)) { PyErr_SetString(PyExc_ValueError, @@ -310,12 +303,15 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - if (descr->hash == -1) { - st = _PyArray_DescrHashImp(descr, &descr->hash); - if (st) { + hash = npy_atomic_load_hash_t(&descr->hash); + + if (hash == -1) { + hash = _PyArray_DescrHashImp(descr); + if (hash == -1) { return -1; } + npy_atomic_store_hash_t(&descr->hash, hash); } - return descr->hash; + return hash; } From 676d6c1d927484847f065736d83c4163b287e5e1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 18 Nov 2025 20:30:05 +0100 Subject: [PATCH 0895/1718] CI: update ``paths-ignore`` for mypy and wheels workflows --- .github/workflows/mypy.yml | 19 +++++++++++-------- .github/workflows/wheels.yml | 4 ++++ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 34780b0fdd8c..2b41011315da 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -18,14 +18,17 @@ on: - main - maintenance/** paths-ignore: - - '.circlecl/' - - '.devcontainer/' - - 'benchmarks/' - - 'branding/' - - 'docs/' - - 'meson_cpu/' - - 'tools/' - - 'vendored-meson/' + - '**.md' + - '**.rst' + - '.circlecl/**' + - '.devcontainer/**' + - '.spin/**' + - 'benchmarks/**' + - 'branding/**' + - 'doc/**' + - 'meson_cpu/**' + - 'tools/**' + - 'vendored-meson/**' workflow_dispatch: defaults: diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bc89ffab3423..044020adeff8 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -9,6 +9,10 @@ on: branches: - main - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' workflow_dispatch: concurrency: From a79233cfa506d96477afed0d900d3f7995cbac71 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 19 Nov 2025 02:00:42 +0200 Subject: [PATCH 0896/1718] BUG: always ignore FPE when Accelerate is the BLAS backend (#30255) --- numpy/_core/src/common/blas_utils.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/src/common/blas_utils.c b/numpy/_core/src/common/blas_utils.c index cbf8e0dc05c5..43d4b1e845f0 100644 --- a/numpy/_core/src/common/blas_utils.c +++ b/numpy/_core/src/common/blas_utils.c @@ -14,8 +14,13 @@ #if NPY_BLAS_CHECK_FPE_SUPPORT /* * Static variable to cache runtime check of BLAS FPE support. + * Will always be false (ignore all FPE) when accelerate is the compiled backend */ + #if defined(ACCELERATE_NEW_LAPACK) +static bool blas_supports_fpe = false; + #else static bool blas_supports_fpe = true; + #endif // ACCELERATE_NEW_LAPACK #endif // NPY_BLAS_CHECK_FPE_SUPPORT From 8eb840a074ced9678f7c5bc1257842131892fcb1 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 19 Nov 2025 03:08:43 +0100 Subject: [PATCH 0897/1718] ENH: ``ufunc.__signature__`` (#30211) * ENH: ``ufunc.__signature__`` * DOC: document ``ufunc.__signature__`` * MAINT: apply review suggestions Co-authored-by: Marten van Kerkwijk * TST: more and better testing of `ufunc.__signature__` * STY: consistent `ufunc_getset[]` ordering Co-authored-by: Marten van Kerkwijk * DOC: fix table alignment * ENH: Cache the `ufunc.__signature__` signature object on access * TST: fix test failure caused by the new `ufunc.__signature__` * BUG: avoid adding `ufunc.__signature__` as class atrribute * TYP: appease stubtest * DOC: mention ufuncs in the `inpsect.signature` release notes * MAINT: interned `__signature__` comparison in `ufunc` attribute access Co-authored-by: Marten van Kerkwijk --------- Co-authored-by: Marten van Kerkwijk --- .../upcoming_changes/30208.improvement.rst | 4 +- doc/source/reference/ufuncs.rst | 19 +++--- numpy/__init__.pyi | 3 + numpy/_core/_internal.py | 50 ++++++++++++++ numpy/_core/src/common/npy_import.h | 1 + numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + numpy/_core/src/umath/ufunc_object.c | 63 +++++++++++++++-- numpy/_core/tests/test_ufunc.py | 67 +++++++++++++++++++ numpy/_core/tests/test_umath.py | 2 + 10 files changed, 195 insertions(+), 16 deletions(-) diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst index 065e91eca33d..ad9faaedfb6b 100644 --- a/doc/release/upcoming_changes/30208.improvement.rst +++ b/doc/release/upcoming_changes/30208.improvement.rst @@ -8,5 +8,5 @@ the NumPy API. Over three hundred classes and functions have been updated in total, including, but not limited to, core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, -`arange`, `fromiter`, etc.), and many other commonly used functions, including `dot`, `concat`, -`where`, `bincount`, `can_cast`, and numerous others. +`arange`, `fromiter`, etc.), all :ref:`ufuncs`, and many other commonly used functions, including +`dot`, `concat`, `where`, `bincount`, `can_cast`, and numerous others. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 03d86cd057d2..0c675718818b 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -52,7 +52,7 @@ tuple holding a single array) is also valid. If 'out' is None (the default), a uninitialized output array is created, which will be filled in the ufunc. At the end, this array is returned unless it is zero-dimensional, in which case it is converted to a scalar; -this conversion can be avoided by passing in ``out=...``. This can also be +this conversion can be avoided by passing in ``out=...``. This can also be spelled `out=Ellipsis` if you think that is clearer. Note that the output is filled only in the places that the broadcast @@ -183,14 +183,17 @@ possess. None of the attributes can be set. pair: ufunc; attributes -============ ================================================================= -**__doc__** A docstring for each ufunc. The first part of the docstring is - dynamically generated from the number of outputs, the name, and - the number of inputs. The second part of the docstring is - provided at creation time and stored with the ufunc. +================= ================================================================= +**__doc__** A docstring for each ufunc. The first part of the docstring is + dynamically generated from the number of outputs, the name, and + the number of inputs. The second part of the docstring is + provided at creation time and stored with the ufunc. -**__name__** The name of the ufunc. -============ ================================================================= +**__name__** The name of the ufunc. + +**__signature__** The call signature of the ufunc, as an :class:`inspect.Signature` + object. +================= ================================================================= .. autosummary:: :toctree: generated/ diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b973e43093ca..7d91e3d384de 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5,6 +5,7 @@ import mmap import ctypes as ct import array as _array import datetime as dt +import inspect from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -5731,6 +5732,8 @@ class str_(character[str], str): # type: ignore[misc] # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: + __signature__: Final[inspect.Signature] + @property def __name__(self) -> LiteralString: ... @property diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 490b3407997a..7c64daf30dbd 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -855,6 +855,8 @@ def _ufunc_doc_signature_formatter(ufunc): Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. """ # input arguments are simple @@ -893,6 +895,54 @@ def _ufunc_doc_signature_formatter(ufunc): return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), + ) + + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + + def npy_ctypes_check(cls): # determine if a class comes from ctypes, in order to work around # a bug in the buffer protocol for those objects, bpo-10746 diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index d7fb8d20857d..fec1b22f3975 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -45,6 +45,7 @@ typedef struct npy_runtime_imports_struct { PyObject *_std; PyObject *_sum; PyObject *_ufunc_doc_signature_formatter; + PyObject *_ufunc_inspect_signature_builder; PyObject *_usefields; PyObject *_var; PyObject *_view_is_safe; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 9fd321a375a2..c7f4c51c6675 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -64,6 +64,7 @@ intern_strings(void) INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); INTERN_STRING(__doc__, "__doc__"); + INTERN_STRING(__signature__, "__signature__"); INTERN_STRING(copy, "copy"); INTERN_STRING(dl_device, "dl_device"); INTERN_STRING(max_version, "max_version"); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index a6901c858374..365673ffb0b4 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -43,6 +43,7 @@ typedef struct npy_interned_str_struct { PyObject *pyvals_name; PyObject *legacy; PyObject *__doc__; + PyObject *__signature__; PyObject *copy; PyObject *dl_device; PyObject *max_version; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bf6832dfb9b..9c0c0a4f8eab 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -6575,8 +6575,8 @@ static struct PyMethodDef ufunc_methods[] = { }; -/****************************************************************************** - *** UFUNC GETSET *** +/***************************************************************************** + *** UFUNC GETSET *** *****************************************************************************/ @@ -6639,6 +6639,57 @@ ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) } } +static PyObject * +ufunc_get_inspect_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) +{ + PyObject *signature; + + // If there is a __signature__ in the instance __dict__, use it. + int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__signature__, + &signature); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return signature; + } + + if (npy_cache_import_runtime( + "numpy._core._internal", "_ufunc_inspect_signature_builder", + &npy_runtime_imports._ufunc_inspect_signature_builder) == -1) { + return NULL; + } + + signature = PyObject_CallFunctionObjArgs( + npy_runtime_imports._ufunc_inspect_signature_builder, + (PyObject *)ufunc, NULL); + if (signature == NULL) { + return NULL; + } + + // Cache the result in the instance dict for next time + if (PyDict_SetItem(ufunc->dict, npy_interned_str.__signature__, + signature) < 0) { + Py_DECREF(signature); + return NULL; + } + + return signature; +} + +static PyObject * +ufunc_getattro(PyObject *obj, PyObject *name) +{ + // __signature__ special-casing to prevent class attribute access + if (PyUnicode_Check(name) && + PyUnicode_Compare(name, npy_interned_str.__signature__) == 0) { + return ufunc_get_inspect_signature((PyUFuncObject *)obj, NULL); + } + + // For all other attributes, use default behavior + return PyObject_GenericGetAttr(obj, name); +} + static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { @@ -6725,6 +6776,9 @@ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, (setter)ufunc_set_doc, NULL, NULL}, + {"__name__", + (getter)ufunc_get_name, + NULL, NULL, NULL}, {"nin", (getter)ufunc_get_nin, NULL, NULL, NULL}, @@ -6740,9 +6794,6 @@ static PyGetSetDef ufunc_getset[] = { {"types", (getter)ufunc_get_types, NULL, NULL, NULL}, - {"__name__", - (getter)ufunc_get_name, - NULL, NULL, NULL}, {"identity", (getter)ufunc_get_identity, NULL, NULL, NULL}, @@ -6783,7 +6834,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, - .tp_getattro = PyObject_GenericGetAttr, + .tp_getattro = ufunc_getattro, .tp_setattro = PyObject_GenericSetAttr, // TODO when Python 3.12 is the minimum supported version, // use Py_TPFLAGS_MANAGED_DICT diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index a29fae0539e4..09d01eab8186 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -3336,3 +3336,70 @@ def test_long_arrays(self): t[28][414] = 1 tc = np.cos(t) assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index f99441180182..c10e4d1cf387 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1,4 +1,5 @@ import fnmatch +import inspect import itertools import operator import platform @@ -4053,6 +4054,7 @@ def test_ufunc_docstring(self): expected_dict = ( {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} ) + expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc From 0ecf7805870507d6c0bbfa97f529eb82741284d0 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 17:40:59 +0530 Subject: [PATCH 0898/1718] BUG: fix data race in ufunc scalar fast path This PR adds read lock while accessing the dispatch cache in try_trivial_scalar_call to fix the data race. Fixes gh-30251 --- numpy/_core/src/common/npy_hashtable.cpp | 17 +++++++++++++++++ numpy/_core/src/common/npy_hashtable.h | 5 +++++ numpy/_core/src/umath/dispatching.cpp | 6 +----- numpy/_core/src/umath/ufunc_object.c | 6 ++++++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp index ffd67d403853..27e014ca00e0 100644 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -240,3 +240,20 @@ PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) PyObject *res = find_item(tb, key)[0]; return res; } + +#ifdef Py_GIL_DISABLED + +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key) +{ + PyObject *res; + std::shared_mutex *mutex = (std::shared_mutex *)tb->mutex; + NPY_BEGIN_ALLOW_THREADS + mutex->lock_shared(); + NPY_END_ALLOW_THREADS + res = find_item(tb, key)[0]; + mutex->unlock_shared(); + return res; +} + +#endif // Py_GIL_DISABLED diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index cd061ba6fa11..aed805e1791d 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -27,6 +27,11 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace); +#ifdef Py_GIL_DISABLED +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key); +#endif // Py_GIL_DISABLED + NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 3ce6624bbf4a..0857f7c8a768 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -982,13 +982,9 @@ promote_and_get_info_and_ufuncimpl_with_locking( npy_bool legacy_promotion_is_possible) { std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); - NPY_BEGIN_ALLOW_THREADS - mutex->lock_shared(); - NPY_END_ALLOW_THREADS - PyObject *info = PyArrayIdentityHash_GetItem( + PyObject *info = PyArrayIdentityHash_GetItemWithLock( (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); - mutex->unlock_shared(); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 9c0c0a4f8eab..eae5b666197a 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4337,9 +4337,15 @@ try_trivial_scalar_call( // Try getting info from the (private) cache. Fall back if not found, // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; +#ifdef Py_GIL_DISABLED + PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); +#else PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); +#endif if (info == NULL) { goto bail; } From a684abe04c930457410db79564ae5dc58e7bb0d4 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 19 Nov 2025 22:57:44 +0530 Subject: [PATCH 0899/1718] TST: mark tests which call gc.collect() as thread unsafe (#30259) --- numpy/_core/tests/test_multiarray.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 2cba4eb3ec75..5410f7677b9f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4652,6 +4652,7 @@ def test_non_contiguous_array(self): assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_roundtrip(self): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -9845,6 +9846,7 @@ def _make_readonly(x): x.flags.writeable = False return x + @pytest.mark.thread_unsafe(reason="calls gc.collect()") @pytest.mark.parametrize('arr', [ np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), @@ -9893,6 +9895,7 @@ def test_ctypes_data_as_holds_reference(self, arr): break_cycles() assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_ctypes_as_parameter_holds_reference(self): arr = np.array([None]).copy() @@ -10216,6 +10219,7 @@ def __array_finalize__(self, obj): with pytest.raises(RuntimeError, match="boohoo!"): np.arange(10).view(BadAttributeArray) + @pytest.mark.thread_unsafe(reason="calls gc.collect()") def test_lifetime_on_error(self): # gh-11237 class RaisesInFinalize(np.ndarray): From 598dbf69a66efafa310b6edc3abe634fba970c1f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 20 Nov 2025 03:44:51 -0700 Subject: [PATCH 0900/1718] TST: scalar fast path multithreaded test (#30263) * TST: Add a multithreaded test for the scalar fast path * MAINT: add comments explaining the locking in the dispatch cache * TST: make the test more exhaustive --- numpy/_core/src/common/npy_hashtable.h | 2 +- numpy/_core/src/umath/dispatching.cpp | 7 +++++++ numpy/_core/src/umath/ufunc_object.c | 3 +++ numpy/_core/tests/test_multithreading.py | 9 ++++----- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index aed805e1791d..02acc12d3191 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -18,7 +18,7 @@ typedef struct { npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED - void *mutex; + void *mutex; /* std::shared_mutex, prevents races to fill the cache */ #endif } PyArrayIdentityHash; diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 0857f7c8a768..db5698d8a819 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -1093,6 +1093,13 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } + /* + * We hold the GIL here, so on the GIL-enabled build the GIL prevents + * races to fill the promotion cache. + * + * On the free-threaded build we need to set up our own locking to prevent + * races to fill the promotion cache. + */ #ifdef Py_GIL_DISABLED PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index eae5b666197a..c4e941f65bfe 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4338,6 +4338,9 @@ try_trivial_scalar_call( // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; #ifdef Py_GIL_DISABLED + // Other threads may be in the process of filling the dispatch cache, + // so we need to acquire the free-threading-specific dispatch cache mutex + // before reading the cache PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 149a216fa1cd..b78f476d33e2 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -29,11 +29,10 @@ def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads # to a data race that causes crashes or spurious exceptions - def func(): - arr = np.random.random((25,)) - np.isnan(arr) - - run_threaded(func, 500) + for dtype in [np.float32, np.float64, np.int32]: + for op in [np.random.random((25,)).astype(dtype), dtype(25)]: + for ufunc in [np.isnan, np.sin]: + run_threaded(lambda: ufunc(op), 500) # see gh-26690 NUM_THREADS = 50 From 229c0170a935fe37964380dbe9b6d82d0708593d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 17:06:19 +0000 Subject: [PATCH 0901/1718] MAINT: Bump actions/checkout from 5.0.1 to 6.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.1 to 6.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/93cb6efe18208431cddfb8368fd83d5badbf9bfd...1af3b93b6815bc44a9784bd300feb67ff0d1eeb3) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 46 insertions(+), 46 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 318ad87039f2..ae31d4f33040 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 54e136839053..bb185282b083 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 279020cbe365..03dc0b41f987 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index c27e9ef4e88c..28fe31642faa 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 906dec766b24..2b7eb24f9812 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 93426a95e8c0..3817f3ebc0d8 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 4e3e01235e65..1d8cb119705b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -102,7 +102,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -132,7 +132,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -172,7 +172,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -296,13 +296,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -334,7 +334,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -342,7 +342,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.11' - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index b141c33165cf..8dba9281443b 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index b27a665c0e2b..e4688e50e5bc 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 74cc00f52e7d..6078e2f15378 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8dc888aa94c4..76d87a5e5022 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.11", "3.14t"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 2b41011315da..51c03285c990 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 74b3fac96b6d..dcdad4ff36ba 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a43e793e1e98..5b5a78c59029 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v3.1.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index d9f72a61b199..0cfbd6197de0 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.11", "3.14"] steps: - - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 044020adeff8..24061ae7b014 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -48,7 +48,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 8136d80c6a88..20584f60219f 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: submodules: recursive fetch-tags: true From 1aa31099403cbf5ba17dab857666986d57749834 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 20 Nov 2025 21:35:03 +0100 Subject: [PATCH 0902/1718] ENH: Use descriptor rather than custom `tp_getattro` (#30270) * ENH: Use descriptor rather than custom `tp_getattro` To me a custom `tp_getattro` feels a bit wrong, so instead use a descriptor protocol to solve this. That is unfortunately slightly annoying/complex. Static types do however have a `tp_dict` (a fact that I had missed...). So while normal properties don't behave well enough for signatures putting a `__signature__` into the `__dict__` does work perfectly fine, although admittedly it is bit awkward to set up. This should ensure that fast-paths that rely on the default getattr not being replaced should remain active. * Add comment about __signature__ definition based on review * Skip test on PyPy and rename to _signature_descriptor --- numpy/_core/src/multiarray/multiarraymodule.c | 11 ++++ numpy/_core/src/umath/ufunc_object.c | 54 +------------------ numpy/_core/tests/test_umath.py | 1 + numpy/_globals.py | 25 +++++++++ 4 files changed, 39 insertions(+), 52 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 73ef0760d979..8bede253a22f 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4966,7 +4966,18 @@ _multiarray_umath_exec(PyObject *m) { return -1; } + /* Set __signature__ to None on the type (the instance has a property) */ + s = npy_import("numpy._globals", "_signature_descriptor"); + if (s == NULL) { + return -1; + } + PyUFunc_Type.tp_dict = Py_BuildValue( + "{ON}", npy_interned_str.__signature__, s); + if (PyUFunc_Type.tp_dict == NULL) { + return -1; + } if (PyType_Ready(&PyUFunc_Type) < 0) { + Py_CLEAR(PyUFunc_Type.tp_dict); return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index c4e941f65bfe..63ac438eabc4 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -6648,57 +6648,6 @@ ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) } } -static PyObject * -ufunc_get_inspect_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) -{ - PyObject *signature; - - // If there is a __signature__ in the instance __dict__, use it. - int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__signature__, - &signature); - if (result == -1) { - return NULL; - } - else if (result == 1) { - return signature; - } - - if (npy_cache_import_runtime( - "numpy._core._internal", "_ufunc_inspect_signature_builder", - &npy_runtime_imports._ufunc_inspect_signature_builder) == -1) { - return NULL; - } - - signature = PyObject_CallFunctionObjArgs( - npy_runtime_imports._ufunc_inspect_signature_builder, - (PyObject *)ufunc, NULL); - if (signature == NULL) { - return NULL; - } - - // Cache the result in the instance dict for next time - if (PyDict_SetItem(ufunc->dict, npy_interned_str.__signature__, - signature) < 0) { - Py_DECREF(signature); - return NULL; - } - - return signature; -} - -static PyObject * -ufunc_getattro(PyObject *obj, PyObject *name) -{ - // __signature__ special-casing to prevent class attribute access - if (PyUnicode_Check(name) && - PyUnicode_Compare(name, npy_interned_str.__signature__) == 0) { - return ufunc_get_inspect_signature((PyUFuncObject *)obj, NULL); - } - - // For all other attributes, use default behavior - return PyObject_GenericGetAttr(obj, name); -} - static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { @@ -6809,6 +6758,7 @@ static PyGetSetDef ufunc_getset[] = { {"signature", (getter)ufunc_get_signature, NULL, NULL, NULL}, + // __signature__ stored in `__dict__`, see `_globals._SignatureDescriptor` {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; @@ -6843,7 +6793,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, - .tp_getattro = ufunc_getattro, + .tp_getattro = PyObject_GenericGetAttr, .tp_setattro = PyObject_GenericSetAttr, // TODO when Python 3.12 is the minimum supported version, // use Py_TPFLAGS_MANAGED_DICT diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index c10e4d1cf387..40b815f88984 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4048,6 +4048,7 @@ def test_array_ufunc_direct_call(self): assert_array_equal(res, a + a) @pytest.mark.thread_unsafe(reason="modifies global module") + @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" diff --git a/numpy/_globals.py b/numpy/_globals.py index 5f838ba91544..ada8d5c41af0 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -94,3 +94,28 @@ def __bool__(self): return False raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() From 396db9b09e43425c6de64db415c668a848c7dea1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Nov 2025 10:56:27 -0700 Subject: [PATCH 0903/1718] MAINT: Bump astral-sh/setup-uv from 7.1.3 to 7.1.4 (#30276) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.3 to 7.1.4. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/5a7eac68fb9809dea845d802897dc5c723910fa3...1e862dfacbd1d6d858c55d9b792c756523627244) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 51c03285c990..e3c6e7beba13 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 + - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 0cfbd6197de0..731d9091ec69 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a7eac68fb9809dea845d802897dc5c723910fa3 # v7.1.3 + - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 with: python-version: ${{ matrix.py }} activate-environment: true From dd80e20bc4b7f78ba92177c87d11c236bb7d194d Mon Sep 17 00:00:00 2001 From: Paresh Joshi Date: Sat, 22 Nov 2025 00:05:24 +0530 Subject: [PATCH 0904/1718] Fix race condition in test_printoptions_thread_safety (#30271) The test_printoptions_thread_safety function spawned threads but exited immediately without joining them. This created a race condition where: 1. The test could return 'Passed' before the assertions inside the threads actually ran. 2. "Zombie" threads were left running in the background, potentially interfering with subsequent tests. Added .join() calls for both threads to ensure proper execution and teardown. --- numpy/_core/tests/test_multithreading.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b78f476d33e2..9b10974839a4 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -122,6 +122,8 @@ def legacy_125(): task1.start() task2.start() + task1.join() + task2.join() def test_parallel_reduction(): From 0957720b27c9f5f6c9f1d43f1fa0d7dd43bd19db Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 22 Nov 2025 00:33:27 -0700 Subject: [PATCH 0905/1718] BUG: fix np.resize refchk on python 3.14 (#30278) Use new API on Python 3.14 for unique reference check (as it changed and for free-threading). On older Python's there is an additional reference on the stack for the method call, though. Fixes gh-30265 --- numpy/_core/src/multiarray/shape.c | 27 +++++++++++++-------------- numpy/_core/tests/test_multiarray.py | 7 +++++++ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index c98dba9abcb2..fce61ef36e63 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -32,7 +32,6 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; int new_nd=newshape->len, k, elsize; - int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; npy_intp *dimptr; @@ -93,21 +92,21 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) "Use the np.resize function or refcheck=False"); return -1; #else - refcnt = Py_REFCNT(self); +#if PY_VERSION_HEX >= 0x030E00B0 + if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { +#else + if (Py_REFCNT(self) > 2) { +#endif + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that " + "references or is referenced\n" + "by another array in this way.\n" + "Use the np.resize function or refcheck=False"); + return -1; + } #endif /* PYPY_VERSION */ } - else { - refcnt = 1; - } - if (refcnt > 2) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); - return -1; - } - /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); if (handler == NULL) { diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5410f7677b9f..218fcc79592e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6287,6 +6287,13 @@ def test_check_reference(self): y = x assert_raises(ValueError, x.resize, (5, 1)) + def test_check_reference_2(self): + # see gh-30265 + x = np.zeros((2, 2)) + y = x + with pytest.raises(ValueError): + x.resize((5, 5)) + @_no_tracing def test_int_shape(self): x = np.eye(3) From aa6eb42e5f8b79f62d057f981770a29421233c06 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 22 Nov 2025 00:40:01 -0700 Subject: [PATCH 0906/1718] MAINT: refactor unary temporary elision check (#30279) --- numpy/_core/src/multiarray/temp_elide.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index dbd469b05654..ea6cac08f78b 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -387,12 +387,12 @@ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1) { int cannot; - if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) || + if (!check_unique_temporary((PyObject *)m1) || + !PyArray_CheckExact(m1) || !PyArray_ISNUMBER(m1) || !PyArray_CHKFLAGS(m1, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(m1) || - PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES || - !check_unique_temporary((PyObject *)m1)) { + PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) { return 0; } if (check_callers(&cannot)) { From debb9a3f4bfb7ce8db68b687d71e5287c4cee069 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:31:35 +0100 Subject: [PATCH 0907/1718] TYP: ``lib._function_base_impl``: many typing improvements (#30246) * TYP: ``lib._function_base_impl``: many typing improvements * TYP: use the any trick when calling ``gradiant`` with arrays of unknown shape * TYP: fix regression in ``append`` caught by mypy_primer * TYP: Apply Copilot's review suggestions (yes, they actually made sense) --- numpy/lib/_function_base_impl.pyi | 2225 +++++++++++++---- .../tests/data/fail/lib_function_base.pyi | 36 +- .../tests/data/reveal/lib_function_base.pyi | 428 +++- 3 files changed, 2030 insertions(+), 659 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index bad00641072c..48d3743983b3 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,37 +1,22 @@ -# ruff: noqa: ANN401 -from _typeshed import Incomplete +from _typeshed import ConvertibleToInt, Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Concatenate, Literal as L, + Never, ParamSpec, Protocol, SupportsIndex, SupportsInt, TypeAlias, - TypeVar, overload, type_check_only, ) -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar import numpy as np -from numpy import ( - _OrderKACF, - bool_, - complex128, - complexfloating, - datetime64, - float64, - floating, - generic, - integer, - intp, - object_, - timedelta64, - vectorize, -) +from numpy import _OrderKACF, vectorize from numpy._core.multiarray import bincount from numpy._globals import _NoValueType from numpy._typing import ( @@ -41,19 +26,18 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, - _ArrayLikeDT64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, - _ArrayLikeTD64_co, _ComplexLike_co, _DTypeLike, _FloatLike_co, - _NestedSequence, + _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, _ShapeLike, + _SupportsArray, ) __all__ = [ @@ -98,15 +82,94 @@ __all__ = [ _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` -_Pss = ParamSpec("_Pss") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. +_Tss = ParamSpec("_Tss") + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_FloatingT = TypeVar("_FloatingT", bound=np.floating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) +_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) +_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) +_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) + _ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) +_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) +_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) +_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) +_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] + +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) + +_integer_co: TypeAlias = np.integer | np.bool +_float64_co: TypeAlias = np.float64 | _integer_co +_floating_co: TypeAlias = np.floating | _integer_co + +# non-trivial scalar-types that will become `complex128` in `sort_complex()`, +# i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` +_SortsToComplex128: TypeAlias = ( + np.bool + | np.int32 + | np.uint32 + | np.int64 + | np.uint64 + | np.float16 + | np.float32 + | np.float64 + | np.timedelta64 + | np.object_ +) + +_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] +_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +# workaround for mypy and pyright not following the typing spec for overloads +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] + +_Seq1D: TypeAlias = Sequence[_T] +_Seq2D: TypeAlias = Sequence[Sequence[_T]] +_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] +_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Tuple3: TypeAlias = tuple[_T, _T, _T] +_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] + +_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] +_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] +_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] + +_IndexLike: TypeAlias = slice | _ArrayLikeInt_co + +_Indexing: TypeAlias = L["ij", "xy"] +_InterpolationMethod = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] -_2Tuple: TypeAlias = tuple[_T, _T] -_MeshgridIdx: TypeAlias = L["ij", "xy"] +# The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can +# return any (usually 1d) array-like or scalar-like compatible with the input. +_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] +_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): @@ -116,192 +179,574 @@ class _TrimZerosSequence(Protocol[_T_co]): @overload def __getitem__(self, key: slice, /) -> _T_co: ... +@type_check_only +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@type_check_only +class _SizedIterable(Protocol[_T_co]): + def __iter__(self) -> Iterable[_T_co]: ... + def __len__(self) -> int: ... + ### @overload -def rot90( - m: _ArrayLike[_ScalarT], - k: int = 1, - axes: tuple[int, int] = (0, 1), -) -> NDArray[_ScalarT]: ... +def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... @overload -def rot90( - m: ArrayLike, - k: int = 1, - axes: tuple[int, int] = (0, 1), -) -> NDArray[Any]: ... - +def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... @overload -def flip(m: _ScalarT, axis: None = None) -> _ScalarT: ... +def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... + +# NOTE: Technically `flip` also accept scalars, but that has no effect and complicates +# the overloads significantly, so we ignore that case here. @overload -def flip(m: _ScalarLike_co, axis: None = None) -> Any: ... +def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... @overload -def flip(m: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... +def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... +# def iterable(y: object) -> TypeIs[Iterable[Any]]: ... -@overload +# NOTE: This assumes that if `axis` is given the input is at least 2d, and will +# therefore always return an array. +# NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will +# therefore always return an array. +@overload # inexact array, keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> _ArrayInexactT: ... +@overload # inexact array, returned=True keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[_ArrayInexactT]: ... +@overload # inexact array-like, axis=None +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> _InexactT: ... +@overload # inexact array-like, axis= +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, axis=None, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[_InexactT]: ... +@overload # inexact array-like, axis=, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # inexact array-like, returned=True, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # bool or integer array-like, axis=None def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.float64: ... +@overload # bool or integer array-like, axis= +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, axis=None, returned=True def average( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, axis: None = None, weights: _ArrayLikeFloat_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[floating]: ... -@overload +) -> _Tuple2[np.float64]: ... +@overload # bool or integer array-like, axis=, returned=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # bool or integer array-like, returned=True, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # complex array-like, axis=None def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> complexfloating: ... -@overload +) -> np.complex128: ... +@overload # complex array-like, axis= +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # complex array-like, keepdims=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis=None, returned=True def average( - a: _ArrayLikeComplex_co, + a: _ListSeqND[complex], axis: None = None, weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _2Tuple[complexfloating]: ... -@overload +) -> _Tuple2[np.complex128]: ... +@overload # complex array-like, axis=, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, *, returned: L[True], - keepdims: bool | bool_ | _NoValueType = ..., -) -> _2Tuple[Incomplete]: ... -@overload + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # complex array-like, keepdims=True, returned=True def average( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - weights: object | None = None, - returned: bool | bool_ = False, + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, *, - keepdims: bool | bool_ | _NoValueType = ..., -) -> Incomplete: ... + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # unknown, axis=None +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> Any: ... +@overload # unknown, axis= +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.ndarray: ... +@overload # unknown, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> np.ndarray: ... +@overload # unknown, axis=None, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[Any]: ... +@overload # unknown, axis=, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.ndarray]: ... +@overload # unknown, returned=True, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[np.ndarray]: ... +# @overload -def asarray_chkfinite( - a: _ArrayLike[_ScalarT], - dtype: None = None, - order: _OrderKACF = None, -) -> NDArray[_ScalarT]: ... +def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... @overload def asarray_chkfinite( - a: object, - dtype: None = None, - order: _OrderKACF = None, -) -> NDArray[Any]: ... + a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None +) -> _Array[_ShapeT, _ScalarT]: ... @overload -def asarray_chkfinite( - a: Any, - dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = None, -) -> NDArray[_ScalarT]: ... +def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... @overload -def asarray_chkfinite( - a: Any, - dtype: DTypeLike | None, - order: _OrderKACF = None, -) -> NDArray[Any]: ... +def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... +# NOTE: Contrary to the documentation, scalars are also accepted and treated as +# `[condlist]`. And even though the documentation says these should be boolean, in +# practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any +# array-like. +@overload +def piecewise( + x: _Array[_ShapeT, _ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> _Array[_ShapeT, _ScalarT]: ... @overload def piecewise( x: _ArrayLike[_ScalarT], - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] - | _ScalarT | object - ], - *args: _Pss.args, - **kw: _Pss.kwargs, + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, ) -> NDArray[_ScalarT]: ... @overload def piecewise( x: ArrayLike, - condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], - funclist: Sequence[ - Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] - | object - ], - *args: _Pss.args, - **kw: _Pss.kwargs, -) -> NDArray[Any]: ... + condlist: ArrayLike, + funclist: _PiecewiseFunctions[_ScalarT, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> NDArray[_ScalarT]: ... +# NOTE: condition is usually boolean, but anything with zero/non-zero semantics works +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[int]) -> _Array1D[np.int_]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[float]) -> _Array1D[np.float64]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[complex]) -> _Array1D[np.complex128]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bytes]) -> _Array1D[np.bytes_]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[str]) -> _Array1D[np.str_]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... + +# NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an +# error at runtime +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayT], + default: _ScalarLike_co = 0, +) -> _ArrayT: ... +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], + default: _ScalarLike_co = 0, +) -> NDArray[_ScalarT]: ... +@overload def select( - condlist: Sequence[ArrayLike], + condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: ArrayLike = 0, -) -> NDArray[Any]: ... + default: _ScalarLike_co = 0, +) -> np.ndarray: ... # keep roughly in sync with `ma.core.copy` @overload -def copy( - a: _ArrayT, - order: _OrderKACF, - subok: L[True], -) -> _ArrayT: ... +def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... @overload -def copy( - a: _ArrayT, - order: _OrderKACF = "K", - *, - subok: L[True], -) -> _ArrayT: ... +def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... @overload -def copy( - a: _ArrayLike[_ScalarT], - order: _OrderKACF = "K", - subok: L[False] = False, -) -> NDArray[_ScalarT]: ... +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... @overload -def copy( - a: ArrayLike, - order: _OrderKACF = "K", - subok: L[False] = False, -) -> NDArray[Any]: ... +def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... +# +@overload # ?d, known inexact scalar-type +def gradient( + f: _ArrayNoD[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, + # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors +) -> _Array1D[_InexactTimeT] | Any: ... +@overload # 1d, known inexact scalar-type +def gradient( + f: _Array1D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[_InexactTimeT]: ... +@overload # 2d, known inexact scalar-type +def gradient( + f: _Array2D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +@overload # 3d, known inexact scalar-type +def gradient( + f: _Array3D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +@overload # ?d, datetime64 scalar-type +def gradient( + f: _ArrayNoD[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64] | tuple[NDArray[np.timedelta64], ...]: ... +@overload # 1d, datetime64 scalar-type +def gradient( + f: _Array1D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64]: ... +@overload # 2d, datetime64 scalar-type +def gradient( + f: _Array2D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.timedelta64, np.timedelta64]: ... +@overload # 3d, datetime64 scalar-type +def gradient( + f: _Array3D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.timedelta64, np.timedelta64, np.timedelta64]: ... +@overload # 1d float-like +def gradient( + f: _Seq1D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.float64]: ... +@overload # 2d float-like +def gradient( + f: _Seq2D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.float64, np.float64]: ... +@overload # 3d float-like +def gradient( + f: _Seq3D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.float64, np.float64, np.float64]: ... +@overload # 1d complex-like (the `list` avoids overlap with the float-like overload) +def gradient( + f: list[complex], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.complex128]: ... +@overload # 2d float-like +def gradient( + f: _Seq1D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.complex128, np.complex128]: ... +@overload # 3d float-like +def gradient( + f: _Seq2D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.complex128, np.complex128, np.complex128]: ... +@overload # fallback def gradient( f: ArrayLike, - *varargs: ArrayLike, + *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> Any: ... +) -> Incomplete: ... -@overload -def diff( # type: ignore[overload-overlap] +# +@overload # n == 0; return input unchanged +def diff( a: _T, n: L[0], axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., # = _NoValue append: ArrayLike | _NoValueType = ..., # = _NoValue ) -> _T: ... -@overload +@overload # known array-type +def diff( + a: _ArrayNumericT, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _ArrayNumericT: ... +@overload # known shape, datetime64 +def diff( + a: _Array[_ShapeT, np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array[_ShapeT, np.timedelta64]: ... +@overload # unknown shape, known scalar-type +def diff( + a: _ArrayLike[_ScalarNumericT], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[_ScalarNumericT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: _Seq1D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.int_]: ... +@overload # 2d int +def diff( + a: _Seq2D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.float64]: ... +@overload # 2d float +def diff( + a: _Seq1D[list[float]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: _Seq1D[list[complex]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type def diff( a: ArrayLike, n: int = 1, axis: SupportsIndex = -1, - prepend: ArrayLike | _NoValueType = ..., # = _NoValue - append: ArrayLike | _NoValueType = ..., # = _NoValue + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., ) -> NDArray[Incomplete]: ... +# @overload # float scalar def interp( x: _FloatLike_co, @@ -310,71 +755,89 @@ def interp( left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> float64: ... -@overload # float array +) -> np.float64: ... +@overload # complex scalar def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _FloatLike_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeFloat_co, - left: _FloatLike_co | None = None, - right: _FloatLike_co | None = None, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64]: ... -@overload # float scalar or array +) -> np.complex128: ... +@overload # float array def interp( - x: _ArrayLikeFloat_co, + x: _Array[_ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[float64] | float64: ... -@overload # complex scalar +) -> _Array[_ShapeT, np.float64]: ... +@overload # complex array def interp( - x: _FloatLike_co, + x: _Array[_ShapeT, _floating_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128: ... -@overload # complex or float scalar +) -> _Array[_ShapeT, np.complex128]: ... +@overload # float sequence def interp( - x: _FloatLike_co, + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], - left: _NumberLike_co | None = None, - right: _NumberLike_co | None = None, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> complex128 | float64: ... -@overload # complex array +) -> _Array1D[np.float64]: ... +@overload # complex sequence def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _Seq1D[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128]: ... -@overload # complex or float array +) -> _Array1D[np.complex128]: ... +@overload # float array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex array-like def interp( - x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + x: _SeqND[_FloatLike_co], xp: _ArrayLikeFloat_co, - fp: Sequence[complex | complexfloating], + fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64]: ... -@overload # complex scalar or array +) -> NDArray[np.complex128]: ... +@overload # float scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64] | np.float64: ... +@overload # complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLike[complexfloating], + fp: _ArrayLike1D[np.complexfloating], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128] | complex128: ... -@overload # complex or float scalar or array +) -> NDArray[np.complex128] | np.complex128: ... +@overload # float/complex scalar/array-like def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, @@ -382,662 +845,1356 @@ def interp( left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> NDArray[complex128 | float64] | complex128 | float64: ... +) -> NDArray[np.complex128 | np.float64] | np.complex128 | np.float64: ... -@overload -def angle(z: _ComplexLike_co, deg: bool = False) -> floating: ... -@overload -def angle(z: object_, deg: bool = False) -> Any: ... -@overload -def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[floating]: ... -@overload -def angle(z: _ArrayLikeObject_co, deg: bool = False) -> NDArray[object_]: ... +# +@overload # 0d T: floating -> 0d T +def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +@overload # 0d complex | float | ~integer -> 0d float64 +def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... +@overload # 0d complex64 -> 0d float32 +def angle(z: np.complex64, deg: bool = False) -> np.float32: ... +@overload # 0d clongdouble -> 0d longdouble +def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... +@overload # T: nd floating -> T +def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +@overload # nd T: complex128 | ~integer -> nd float64 +def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +@overload # nd T: complex64 -> nd float32 +def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +@overload # nd T: clongdouble -> nd longdouble +def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +@overload # 1d complex -> 1d float64 +def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... +@overload # 2d complex -> 2d float64 +def angle(z: _Seq2D[complex], deg: bool = False) -> _Array2D[np.float64]: ... +@overload # 3d complex -> 3d float64 +def angle(z: _Seq3D[complex], deg: bool = False) -> _Array3D[np.float64]: ... +@overload # fallback +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | Any: ... -@overload +# +@overload # known array-type def unwrap( - p: _ArrayLikeFloat_co, + p: _ArrayFloatObjT, discont: float | None = None, axis: int = -1, *, - period: float = ..., -) -> NDArray[floating]: ... -@overload + period: float = ..., # = τ +) -> _ArrayFloatObjT: ... +@overload # known shape, float64 def unwrap( - p: _ArrayLikeObject_co, + p: _Array[_ShapeT, _float64_co], discont: float | None = None, axis: int = -1, *, - period: float = ..., -) -> NDArray[object_]: ... - -def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... - -def trim_zeros( - filt: _TrimZerosSequence[_T], - trim: L["f", "b", "fb", "bf"] = "fb", - axis: _ShapeLike | None = None, -) -> _T: ... + period: float = ..., # = τ +) -> _Array[_ShapeT, np.float64]: ... +@overload # 1d float64-like +def unwrap( + p: _Seq1D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array1D[np.float64]: ... +@overload # 2d float64-like +def unwrap( + p: _Seq2D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array2D[np.float64]: ... +@overload # 3d float64-like +def unwrap( + p: _Seq3D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array3D[np.float64]: ... +@overload # ?d, float64 +def unwrap( + p: _SeqND[float] | _ArrayLike[_float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> NDArray[np.float64]: ... +@overload # fallback +def unwrap( + p: _ArrayLikeFloat_co | _ArrayLikeObject_co, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> np.ndarray: ... -@overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... -@overload -def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... +# +@overload +def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +@overload # complex64, shape known +def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +@overload # complex64, shape unknown +def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... +@overload # complex128, shape known +def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex128, shape unknown +def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... +@overload # clongdouble, shape known +def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +@overload # clongdouble, shape unknown +def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... -def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... +# +def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... -@overload +# NOTE: keep in sync with `corrcoef` +@overload # ?d, known inexact scalar-type >=64 precision, y=. def cov( - m: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = None, + m: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, dtype: None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def cov( + m: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array0D[_AnyDoubleT]: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # nd, casts to float64, y= +def cov( + m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d, casts to float64, y=None +def cov( + m: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array0D[np.float64]: ... +@overload # nd, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def cov( + m: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def cov( + m: list[complex], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array0D[np.complex128]: ... +@overload # 2d complex, y=None -> 0d or 2d +def cov( + m: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> NDArray[np.complex128]: ... +@overload # 1d complex-like, y=None, dtype= +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array0D[_ScalarT]: ... +@overload # nd complex-like, y=, dtype= def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: _ArrayLikeComplex_co, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: None = None, -) -> NDArray[complexfloating]: ... -@overload + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= -> 0d or 2d def cov( m: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: None = None, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, dtype: _DTypeLike[_ScalarT], ) -> NDArray[_ScalarT]: ... -@overload +@overload # nd complex-like, y=, dtype=? +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array0D[Incomplete]: ... +@overload # nd complex-like, dtype=? def cov( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, - fweights: ArrayLike | None = None, - aweights: ArrayLike | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, *, - dtype: DTypeLike | None, -) -> NDArray[Any]: ... + dtype: DTypeLike | None = None, +) -> NDArray[Incomplete]: ... -# NOTE `bias` and `ddof` are deprecated and ignored -@overload +# NOTE: If only `x` is given and the resulting array has shape (1,1), a bare scalar +# is returned instead of a 2D array. When y is given, a 2D array is always returned. +# This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. +# NOTE: keep in sync with `cov` +@overload # ?d, known inexact scalar-type >=64 precision, y=. def corrcoef( - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co | None = None, + x: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], rowvar: bool = True, *, - dtype: None = None, -) -> NDArray[floating]: ... -@overload + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _AnyDoubleT: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # nd, casts to float64, y= +def corrcoef( + x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None +def corrcoef( + x: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d, casts to float64, y=None +def corrcoef( + x: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> np.float64: ... +@overload # nd, casts to float64, y=None +def corrcoef( + x: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def corrcoef( + x: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def corrcoef( + x: list[complex], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> np.complex128: ... +@overload # 2d complex, y=None +def corrcoef( + x: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128] | np.complex128: ... +@overload # 1d complex-like, y=None, dtype= +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_ScalarT], +) -> _ScalarT: ... +@overload # nd complex-like, y=, dtype= def corrcoef( x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: _ArrayLikeComplex_co, rowvar: bool = True, *, - dtype: None = None, -) -> NDArray[complexfloating]: ... -@overload + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= def corrcoef( x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co | None = None, + y: None = None, rowvar: bool = True, *, dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... -@overload +) -> _Array2D[_ScalarT] | _ScalarT: ... +@overload # nd complex-like, y=, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> Incomplete: ... +@overload # nd complex-like, dtype=? def corrcoef( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co | None = None, rowvar: bool = True, *, dtype: DTypeLike | None = None, -) -> NDArray[Any]: ... +) -> _Array2D[Incomplete] | Incomplete: ... -def blackman(M: _FloatLike_co) -> NDArray[floating]: ... - -def bartlett(M: _FloatLike_co) -> NDArray[floating]: ... - -def hanning(M: _FloatLike_co) -> NDArray[floating]: ... - -def hamming(M: _FloatLike_co) -> NDArray[floating]: ... - -def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... - -def kaiser( - M: _FloatLike_co, - beta: _FloatLike_co, -) -> NDArray[floating]: ... +# note that floating `M` are accepted, but their fractional part is ignored +def blackman(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def bartlett(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hanning(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hamming(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... +# @overload -def sinc(x: _FloatLike_co) -> floating: ... +def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... @overload -def sinc(x: _ComplexLike_co) -> complexfloating: ... +def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload -def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def i0(x: _Seq1D[_FloatLike_co]) -> _Array1D[np.float64]: ... @overload -def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +def i0(x: _Seq2D[_FloatLike_co]) -> _Array2D[np.float64]: ... +@overload +def i0(x: _Seq3D[_FloatLike_co]) -> _Array3D[np.float64]: ... +@overload +def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +# +@overload +def sinc(x: _InexactT) -> _InexactT: ... +@overload +def sinc(x: float | _float64_co) -> np.float64: ... +@overload +def sinc(x: complex) -> np.complex128 | Any: ... @overload +def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +@overload +def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +@overload +def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... +@overload +def sinc(x: _Seq2D[float]) -> _Array2D[np.float64]: ... +@overload +def sinc(x: _Seq3D[float]) -> _Array3D[np.float64]: ... +@overload +def sinc(x: _SeqND[float]) -> NDArray[np.float64]: ... +@overload +def sinc(x: list[complex]) -> _Array1D[np.complex128]: ... +@overload +def sinc(x: _Seq1D[list[complex]]) -> _Array2D[np.complex128]: ... +@overload +def sinc(x: _Seq2D[list[complex]]) -> _Array3D[np.complex128]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... + +# NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays +# it has no effect, and would complicate the overloads significantly. +@overload # known scalar-type, keepdims=False (default) def median( - a: _ArrayLikeFloat_co, + a: _ArrayLike[_InexactTimeT], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> floating: ... -@overload +) -> _InexactTimeT: ... +@overload # float array-like, keepdims=False (default) def median( - a: _ArrayLikeComplex_co, + a: _ArrayLikeInt_co | _SeqND[float] | float, axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> complexfloating: ... -@overload +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) def median( - a: _ArrayLikeTD64_co, + a: _ListSeqND[complex], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> timedelta64: ... -@overload +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) def median( - a: _ArrayLikeObject_co, + a: complex, axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> Any: ... -@overload +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + a: _ArrayNumericT, axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _ArrayNumericT: ... +@overload # known scalar-type, keepdims=True +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[_ScalarNumericT]: ... +@overload # known scalar-type, axis= +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, keepdims: bool = False, -) -> Any: ... -@overload +) -> NDArray[_ScalarNumericT]: ... +@overload # float array-like, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - out: _ArrayT, + a: _SeqND[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _SeqND[float], + axis: _ShapeLike, + out: None = None, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... -@overload +) -> NDArray[np.float64]: ... +@overload # complex array-like, keepdims=True def median( - a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.complex128]: ... +@overload # out= (keyword) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, keepdims: bool = False, ) -> _ArrayT: ... - -_MethodKind = L[ - "inverted_cdf", - "averaged_inverted_cdf", - "closest_observation", - "interpolated_inverted_cdf", - "hazen", - "weibull", - "linear", - "median_unbiased", - "normal_unbiased", - "lower", - "higher", - "midpoint", - "nearest", -] +@overload # out= (positional) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... # NOTE: keep in sync with `quantile` -@overload +@overload # inexact, scalar, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= def percentile( - a: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> floating: ... -@overload +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def percentile( - a: _ArrayLikeComplex_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> complexfloating: ... -@overload +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None def percentile( - a: _ArrayLikeTD64_co, + a: _ListSeqND[complex], q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> timedelta64: ... -@overload +) -> np.complex128: ... +@overload # complex, scalar, axis= def percentile( - a: _ArrayLikeDT64_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> datetime64: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True def percentile( - a: _ArrayLikeObject_co, + a: _ListSeqND[complex], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", - keepdims: L[False] = False, + method: _InterpolationMethod = "linear", *, + keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None def percentile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like def percentile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, - axis: None = None, + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", - keepdims: L[False] = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[complexfloating]: ... -@overload +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None def percentile( - a: _ArrayLikeTD64_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[timedelta64]: ... -@overload +) -> Any: ... +@overload # object_, scalar, axis= def percentile( - a: _ArrayLikeDT64_co, - q: _ArrayLikeFloat_co, - axis: None = None, + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[datetime64]: ... -@overload +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True def percentile( a: _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def percentile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[object_]: ... -@overload +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.object_]: ... +@overload # out= (keyword) def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... -@overload +@overload # out= (positional) def percentile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... +@overload # fallback +def percentile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... # NOTE: keep in sync with `percentile` -@overload +@overload # inexact, scalar, axis=None def quantile( - a: _ArrayLikeFloat_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> floating: ... -@overload +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= def quantile( - a: _ArrayLikeComplex_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> complexfloating: ... -@overload +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True def quantile( - a: _ArrayLikeTD64_co, + a: _ArrayLike[_InexactDateTimeT], q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> timedelta64: ... -@overload +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None def quantile( - a: _ArrayLikeDT64_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> datetime64: ... -@overload +) -> np.float64: ... +@overload # float, scalar, axis= def quantile( - a: _ArrayLikeObject_co, + a: _SeqND[float] | _ArrayLikeInt_co, q: _FloatLike_co, - axis: None = None, + axis: _ShapeLike, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True def quantile( - a: _ArrayLikeFloat_co, - q: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[floating]: ... -@overload +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like def quantile( - a: _ArrayLikeComplex_co, - q: _ArrayLikeFloat_co, + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[complexfloating]: ... -@overload +) -> np.complex128: ... +@overload # complex, scalar, axis= def quantile( - a: _ArrayLikeTD64_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def quantile( + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[timedelta64]: ... -@overload +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like def quantile( - a: _ArrayLikeDT64_co, - q: _ArrayLikeFloat_co, + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[datetime64]: ... -@overload +) -> Any: ... +@overload # object_, scalar, axis= def quantile( a: _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[object_]: ... -@overload +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, - q: _ArrayLikeFloat_co, + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> Any: ... -@overload +) -> NDArray[np.object_]: ... +@overload # out= (keyword) def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... -@overload +@overload # out= (positional) def quantile( - a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, out: _ArrayT, overwrite_input: bool = False, - method: _MethodKind = "linear", + method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, ) -> _ArrayT: ... +@overload # fallback +def quantile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... -_ScalarT_fm = TypeVar( - "_ScalarT_fm", - bound=floating | complexfloating | timedelta64, -) - -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... - -@overload -def trapezoid( # type: ignore[overload-overlap] - y: Sequence[_FloatLike_co], - x: Sequence[_FloatLike_co] | None = None, +# +@overload # ?d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _ArrayNoD[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float64: ... -@overload +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload # ?d, casts to float64 def trapezoid( - y: Sequence[_ComplexLike_co], - x: Sequence[_ComplexLike_co] | None = None, + y: _ArrayNoD[_integer_co], + x: _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> complex128: ... -@overload +) -> NDArray[np.float64] | np.float64: ... +@overload # strict 1d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array1D[_InexactTimeT], + x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 1d, casts to float64 +def trapezoid( + y: _Array1D[_float64_co] | _Seq1D[float], + x: _Array1D[_float64_co] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 1d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: list[complex], + x: _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 1d, casts to complex128 +def trapezoid( + y: _Seq1D[complex], + x: list[complex], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array2D[_InexactTimeT], + x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 2d, casts to float64 def trapezoid( - y: _ArrayLike[bool_ | integer], - x: _ArrayLike[bool_ | integer] | None = None, + y: _Array2D[_float64_co] | _Seq2D[float], + x: _ArrayMax2D[_float64_co] | _Seq2D[float] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float64 | NDArray[float64]: ... +) -> np.float64: ... +@overload # strict 2d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: _Seq1D[list[complex]], + x: _Seq2D[complex] | _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, casts to complex128 +def trapezoid( + y: _Seq2D[complex] | _Seq1D[complex], + x: _Seq1D[list[complex]], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... @overload -def trapezoid( # type: ignore[overload-overlap] - y: _ArrayLikeObject_co, - x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = None, +def trapezoid( + y: _ArrayLike[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload +def trapezoid( + y: _ArrayLike[_float64_co], + x: _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> float | NDArray[object_]: ... +) -> NDArray[np.float64] | np.float64: ... @overload def trapezoid( - y: _ArrayLike[_ScalarT_fm], - x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = None, + y: _ArrayLike[np.complex128], + x: _ArrayLikeComplex_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: Sequence[_SupportsRMulFloat[_T]], - x: Sequence[_SupportsRMulFloat[_T] | _T] | None = None, + y: _ArrayLikeComplex_co, + x: _ArrayLike[np.complex128], dx: float = 1.0, axis: SupportsIndex = -1, -) -> _T: ... +) -> NDArray[np.complex128] | np.complex128: ... @overload def trapezoid( - y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = None, + y: _ArrayLikeObject_co, + x: _ArrayLikeObject_co | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> ( - floating | complexfloating | timedelta64 - | NDArray[floating | complexfloating | timedelta64 | object_] -): ... +) -> NDArray[np.object_] | Any: ... @overload -def meshgrid( - *, - copy: bool = True, - sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[()]: ... +def trapezoid( + y: _Seq1D[_SupportsRMulFloat[_T]], + x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> _T: ... @overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + x: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> Incomplete: ... + +# +@overload # 0d +def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... +@overload # 1d, known scalar-type def meshgrid( x1: _ArrayLike[_ScalarT], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh1[_ScalarT]: ... +@overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh1[Any]: ... +@overload # 2d, known scalar-types def meshgrid( - x1: _ArrayLike[_ScalarT1], - x2: _ArrayLike[_ScalarT2], + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, _ScalarT1]: ... +@overload # 2d, known/unknown scalar-types def meshgrid( - x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + x1: _ArrayLike[_ScalarT], + x2: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, Any]: ... +@overload # 2d, unknown/known scalar-types def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: ArrayLike, + x1: ArrayLike, + x2: _ArrayLike[_ScalarT], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[Any, _ScalarT]: ... +@overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, @@ -1045,82 +2202,102 @@ def meshgrid( *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh2[Any, Any]: ... +@overload # 3d, known scalar-types def meshgrid( - x1: ArrayLike, - x2: ArrayLike, - x3: ArrayLike, + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], + x3: _ArrayLike[_ScalarT2], /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +@overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, - x4: ArrayLike, /, *, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", -) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... -@overload + indexing: _Indexing = "xy", +) -> _Mesh3[Any, Any, Any]: ... +@overload # ?d, known scalar-types +def meshgrid( + *xi: _ArrayLike[_ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, copy: bool = True, sparse: bool = False, - indexing: _MeshgridIdx = "xy", + indexing: _Indexing = "xy", ) -> tuple[NDArray[Any], ...]: ... -@overload -def delete( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def delete( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# +def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], vals: ArrayLike) -> None: ... -@overload -def insert( - arr: _ArrayLike[_ScalarT], - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[_ScalarT]: ... -@overload -def insert( - arr: ArrayLike, - obj: slice | _ArrayLikeInt_co, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# keep in sync with `insert` +@overload # known scalar-type, axis=None (default) +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any]: ... -def append( - arr: ArrayLike, - values: ArrayLike, - axis: SupportsIndex | None = None, -) -> NDArray[Any]: ... +# keep in sync with `delete` +@overload # known scalar-type, axis=None (default) +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... +# +@overload # known array type, axis specified +def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +@overload # 1d, known scalar type, axis specified +def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... +@overload # known scalar type, axis=None +def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # unknown scalar type, axis=None +def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... + +# @overload def digitize( - x: _FloatLike_co, - bins: _ArrayLikeFloat_co, - right: bool = False, -) -> intp: ... + x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[_ShapeT, np.int_]: ... @overload -def digitize( - x: _ArrayLikeFloat_co, - bins: _ArrayLikeFloat_co, - right: bool = False, -) -> NDArray[intp]: ... +def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... +@overload +def digitize(x: _Seq1D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array1D[np.int_]: ... +@overload +def digitize(x: _Seq2D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array2D[np.int_]: ... +@overload +def digitize(x: _Seq3D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array3D[np.int_]: ... +@overload +def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = False) -> NDArray[np.int_] | Any: ... diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi index f0bf6347691d..d7be993bf04c 100644 --- a/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -13,34 +13,32 @@ AR_b_list: list[npt.NDArray[np.bool]] def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... -np.average(AR_m) # type: ignore[arg-type] -np.select(1, [AR_f8]) # type: ignore[arg-type] -np.angle(AR_m) # type: ignore[arg-type] -np.unwrap(AR_m) # type: ignore[arg-type] -np.unwrap(AR_c16) # type: ignore[arg-type] +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] np.trim_zeros(1) # type: ignore[arg-type] np.place(1, [True], 1.5) # type: ignore[arg-type] np.vectorize(1) # type: ignore[arg-type] np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] -np.piecewise(AR_f8, True, [fn_ar_i], 42) # type: ignore[call-overload] -# TODO: enable these once mypy actually supports ParamSpec (released in 2021) -# NOTE: pyright correctly reports errors for these (`reportCallIssue`) -# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload]s -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] -# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[call-overload] +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] -np.cov(AR_m) # type: ignore[arg-type] -np.cov(AR_O) # type: ignore[arg-type] -np.corrcoef(AR_m) # type: ignore[arg-type] -np.corrcoef(AR_O) # type: ignore[arg-type] +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] np.blackman(1j) # type: ignore[arg-type] @@ -49,8 +47,8 @@ np.hanning(1j) # type: ignore[arg-type] np.hamming(1j) # type: ignore[arg-type] np.hamming(AR_c16) # type: ignore[arg-type] np.kaiser(1j, 1) # type: ignore[arg-type] -np.sinc(AR_O) # type: ignore[arg-type] -np.median(AR_M) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] np.percentile(AR_f8, 50j) # type: ignore[call-overload] np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 06096f5a7749..345635d06327 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,20 +1,26 @@ from collections.abc import Callable from fractions import Fraction -from typing import Any, assert_type +from typing import Any, LiteralString, assert_type, type_check_only import numpy as np import numpy.typing as npt -vectorized_func: np.vectorize - f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] AR_LIKE_f8: list[float] AR_LIKE_c16: list[complex] AR_LIKE_O: list[Fraction] +AR_u1: npt.NDArray[np.uint8] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] @@ -22,21 +28,26 @@ AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] + AR_b_list: list[npt.NDArray[np.bool]] -def func( - a: npt.NDArray[Any], - posarg: bool = ..., - /, - arg: int = ..., - *, - kwarg: str = ..., -) -> npt.NDArray[Any]: ... +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... +### + +# vectorize +vectorized_func: np.vectorize assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) -assert_type(vectorized_func.signature, str | None) -assert_type(vectorized_func.otypes, str | None) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) assert_type(vectorized_func.excluded, set[int | str]) assert_type(vectorized_func.__doc__, str | None) assert_type(vectorized_func([1]), Any) @@ -46,137 +57,270 @@ assert_type( np.vectorize, ) +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) -assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) -assert_type(np.flip(f8), np.float64) -assert_type(np.flip(1.0), Any) +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) -assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) +# iterable assert_type(np.iterable(1), bool) assert_type(np.iterable([1]), bool) -assert_type(np.average(AR_f8), np.floating) -assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating) +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.average(AR_O), Any) -assert_type(np.average(AR_f8, returned=True), tuple[np.floating, np.floating]) -assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating, np.complexfloating]) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) -assert_type(np.average(AR_f8, axis=0), Any) -assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) -assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) -assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] -assert_type(np.gradient(AR_f8, axis=None), Any) -assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) - -assert_type(np.diff("bob", n=0), str) -assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) -assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) - +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp assert_type(np.interp(1, [1], AR_f8), np.float64) assert_type(np.interp(1, [1], [1]), np.float64) assert_type(np.interp(1, [1], AR_c16), np.complex128) -assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` -assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) -assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) -assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` - -assert_type(np.angle(f8), np.floating) -assert_type(np.angle(AR_f8), npt.NDArray[np.floating]) -assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating]) -assert_type(np.angle(AR_O), npt.NDArray[np.object_]) - -assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating]) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) - -assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating]) - +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) -assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) - -assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) - -assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating]) -assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating]) -assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating]) -assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) - -assert_type(np.blackman(5), npt.NDArray[np.floating]) -assert_type(np.bartlett(6), npt.NDArray[np.floating]) -assert_type(np.hanning(4.5), npt.NDArray[np.floating]) -assert_type(np.hamming(0), npt.NDArray[np.floating]) -assert_type(np.i0(AR_i8), npt.NDArray[np.floating]) -assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating]) - -assert_type(np.sinc(1.0), np.floating) -assert_type(np.sinc(1j), np.complexfloating) -assert_type(np.sinc(AR_f8), npt.NDArray[np.floating]) -assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.median(AR_f8, keepdims=False), np.floating) -assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating) +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) assert_type(np.median(AR_m), np.timedelta64) assert_type(np.median(AR_O), Any) -assert_type(np.median(AR_f8, keepdims=True), Any) -assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.percentile(AR_f8, 50), np.floating) -assert_type(np.percentile(AR_c16, 50), np.complexfloating) +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) assert_type(np.percentile(AR_m, 50), np.timedelta64) assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) assert_type(np.percentile(AR_O, 50), Any) -assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating]) -assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating]) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) -assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) -assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) -assert_type(np.quantile(AR_f8, 0.5), np.floating) -assert_type(np.quantile(AR_c16, 0.5), np.complexfloating) -assert_type(np.quantile(AR_m, 0.5), np.timedelta64) -assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) -assert_type(np.quantile(AR_O, 0.5), Any) -assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating]) -assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating]) -assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) -assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) -assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) -assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) -assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) -assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) - +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid assert_type(np.trapezoid(AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) @@ -189,25 +333,77 @@ assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) -assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) -assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) +# meshgrid assert_type(np.meshgrid(), tuple[()]) -assert_type(np.meshgrid(AR_c16, indexing="ij"), tuple[npt.NDArray[np.complex128]]) -assert_type(np.meshgrid(AR_i8, AR_f8, copy=False), tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[np.float64]]) -assert_type(np.meshgrid(AR_LIKE_f8, AR_i8, AR_c16), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.meshgrid(*AR_LIKE_f8), tuple[npt.NDArray[Any], ...]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) -assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) -assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) -assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) -assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) -assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) -assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) +# digitize assert_type(np.digitize(4.5, [1]), np.intp) assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) From 3169a79ea90ccd40989dbbbcdeced337620da7b3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:34:16 +0100 Subject: [PATCH 0908/1718] TYP: fix shape-type of structured array fields (#30261) * TYP: fix shape-type of structed array fields * TYP: structured array field shape-type test --- numpy/__init__.pyi | 6 +++--- numpy/typing/tests/data/reveal/ndarray_assignability.pyi | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7d91e3d384de..b607c54aa425 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2197,10 +2197,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... - @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index d754a94003d3..feaccf28f578 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,4 +1,4 @@ -from typing import Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, TypeAlias, TypeVar, assert_type import numpy as np from numpy._typing import _64Bit @@ -33,6 +33,7 @@ _LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] _Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] _Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] _CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d @@ -44,6 +45,7 @@ g_1d: _LongDouble_1d c8_1d: _Complex64_1d c16_1d: _Complex128_1d G_1d: _CLongDouble_1d +V_1d: _Void_1d assert_type(do_abs(b1_1d), _Bool_1d) assert_type(do_abs(u1_1d), _UInt8_1d) @@ -75,3 +77,6 @@ assert_type(do_pos(i2_1d), _Int16_1d) assert_type(do_pos(q_1d), _LongLong_1d) assert_type(do_pos(f4_1d), _Float32_1d) assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) From cd352ddc8a00a869df136d8def51c3cb630b5d09 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 17:37:35 +0100 Subject: [PATCH 0909/1718] DEP, TYP: ``ndarray.shape`` setter pending deprecation (#30282) * DEP, TYP: ``ndarray.shape`` setter pending deprecation * DOC: release note for the pending deprecation of `ndarray.shape` --- doc/release/upcoming_changes/30282.deprecation.rst | 5 +++++ numpy/__init__.pyi | 7 +++++++ 2 files changed, 12 insertions(+) create mode 100644 doc/release/upcoming_changes/30282.deprecation.rst diff --git a/doc/release/upcoming_changes/30282.deprecation.rst b/doc/release/upcoming_changes/30282.deprecation.rst new file mode 100644 index 000000000000..e9aac9ae17d5 --- /dev/null +++ b/doc/release/upcoming_changes/30282.deprecation.rst @@ -0,0 +1,5 @@ +in-place modification of ``ndarray.shape`` is pending deprecation +----------------------------------------------------------------- +Setting the `ndarray.shape` attribute directly will be deprecated in a future release. +Instead of modifying the shape in place, it is recommended to use the `numpy.reshape` function. +Static type checkers might already report a warning for assignments to `ndarray.shape`. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b607c54aa425..977ad37d80af 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2251,15 +2251,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @property def ctypes(self) -> _ctypes[int]: ... + + # @property def shape(self) -> _ShapeT_co: ... @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) def shape(self, value: _ShapeLike) -> None: ... + + # @property def strides(self) -> _Shape: ... @strides.setter @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") def strides(self, value: _ShapeLike) -> None: ... + + # def byteswap(self, inplace: builtins.bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... From 41163fd996084ad9fde73db50865101698944e4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 18:07:05 +0000 Subject: [PATCH 0910/1718] MAINT: Bump github/codeql-action from 4.31.4 to 4.31.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.4 to 4.31.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/e12f0178983d466f2f6028f5cc7a6d786fd97f4b...fdbfb4d2750291e159f0156def62b853c2798ca2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ae31d4f33040..c758d05e43ab 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v4.31.4 + uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5b5a78c59029..d8daf8779d92 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@e12f0178983d466f2f6028f5cc7a6d786fd97f4b # v2.1.27 + uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.1.27 with: sarif_file: results.sarif From c293cee5dd52a087824dc61aa02c97c70d4f7d2c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 20:37:59 +0100 Subject: [PATCH 0911/1718] TYP: ``_core.overrides.set_module`` implicit re-export (#30289) This fixes mypy errors in e.g. `numpy/fft/_helper.py`, which imports `set_module` from `numpy._core.overrides`. --- numpy/_core/overrides.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 91d624203e81..6ef52566d782 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,6 +1,8 @@ from collections.abc import Callable, Iterable from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar +from numpy._utils import set_module as set_module + _T = TypeVar("_T") _Tss = ParamSpec("_Tss") _FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) From 4a24fc4eb8681e69f2910f0bfc1e929be0453ad0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 24 Nov 2025 20:39:18 +0100 Subject: [PATCH 0912/1718] TYP: move the ``normalize_axis_*`` function definitions from ``lib`` to ``_core`` (#30290) --- numpy/_core/multiarray.pyi | 4 +++- numpy/_core/numeric.pyi | 9 +++++++-- numpy/lib/_array_utils_impl.pyi | 22 +++------------------- 3 files changed, 13 insertions(+), 22 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ee8a9c4f67d8..0293d193cbc4 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -90,7 +90,6 @@ from numpy._typing._ufunc import ( _PyFunc_Nin2_Nout1, _PyFunc_Nin3P_Nout1, ) -from numpy.lib._array_utils_impl import normalize_axis_index __all__ = [ "_ARRAY_API", @@ -538,6 +537,9 @@ def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C" @overload def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 9be6000f0a8a..4ad2881cf17e 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete from builtins import bool as py_bool -from collections.abc import Callable, Sequence +from collections.abc import Callable, Iterable, Sequence from typing import ( Any, Final, @@ -53,7 +53,6 @@ from numpy._typing import ( _SupportsArrayFunc, _SupportsDType, ) -from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple from ._asarray import require from ._ufunc_config import ( @@ -1087,6 +1086,12 @@ def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDA # def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... # @overload # 0d, dtype=int (default), sparse=False (default) diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index 1b290829caf4..e33507a127c9 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,8 +1,5 @@ -from collections.abc import Iterable -from typing import Any - -from numpy import generic -from numpy.typing import NDArray +import numpy as np +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] @@ -10,17 +7,4 @@ __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] # implementing the `__array_interface__` protocol. The caveat is # that certain keys, marked as optional in the spec, must be present for # `byte_bounds`. This concerns `"strides"` and `"data"`. -def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... - -def normalize_axis_tuple( - axis: int | Iterable[int], - ndim: int, - argname: str | None = None, - allow_duplicate: bool | None = False, -) -> tuple[int, ...]: ... - -def normalize_axis_index( - axis: int = ..., - ndim: int = ..., - msg_prefix: str | None = ..., -) -> int: ... +def byte_bounds(a: np.generic | np.ndarray) -> tuple[int, int]: ... From 4329d425d9caa88d9eb00f2c0e07abf30041d871 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:39:38 +0100 Subject: [PATCH 0913/1718] TYP: ``lib._function_base_impl._quantile_ureduce_func`` inline annotation fix (#30291) --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index f37f69bcb5dd..09ab5e91a33c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4627,7 +4627,7 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( a: np.ndarray, q: np.ndarray, - weights: np.ndarray, + weights: np.ndarray | None, axis: int | None = None, out: np.ndarray | None = None, overwrite_input: bool = False, From 786f92cc7297d55f6d5d365c734f747d98d6897c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:41:13 +0100 Subject: [PATCH 0914/1718] TYP: move ``vectorize`` stubs to ``lib._function_base_impl`` (#30293) --- numpy/__init__.pyi | 22 +--------------------- numpy/lib/_function_base_impl.pyi | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 977ad37d80af..e19b130d73b6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -468,6 +468,7 @@ from numpy.lib._function_base_impl import ( # type: ignore[deprecated] append, interp, quantile, + vectorize, ) from numpy.lib._histograms_impl import ( @@ -6104,27 +6105,6 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): ) -> Any: ... def flush(self) -> None: ... -# TODO: Add a mypy plugin for managing functions whose output type is dependent -# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) -class vectorize: - pyfunc: Callable[..., Any] - cache: builtins.bool - signature: LiteralString | None - otypes: LiteralString | None - excluded: set[int | str] - __doc__: str | None - def __init__( - self, - /, - pyfunc: Callable[..., Any] | _NoValueType = ..., # = _NoValue - otypes: str | Iterable[DTypeLike] | None = None, - doc: str | None = None, - excluded: Iterable[int | str] | None = None, - cache: builtins.bool = False, - signature: str | None = None, - ) -> None: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - class poly1d: @property def variable(self) -> LiteralString: ... diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 48d3743983b3..d68918560b69 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -16,7 +16,7 @@ from typing import ( from typing_extensions import TypeIs, TypeVar import numpy as np -from numpy import _OrderKACF, vectorize +from numpy import _OrderKACF from numpy._core.multiarray import bincount from numpy._globals import _NoValueType from numpy._typing import ( @@ -190,6 +190,27 @@ class _SizedIterable(Protocol[_T_co]): ### +class vectorize: + __doc__: str | None + __module__: L["numpy"] = "numpy" + pyfunc: Callable[..., Incomplete] + cache: bool + signature: str | None + otypes: str | None + excluded: set[int | str] + + def __init__( + self, + /, + pyfunc: Callable[..., Incomplete] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... + @overload def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... @overload From 863d93a5195344005a5b9a9bdf8f609c3fcf7eb6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 00:44:34 +0100 Subject: [PATCH 0915/1718] TYP: `_core.*`: stubs for some private functions and constants (#30294) --- numpy/_core/shape_base.pyi | 4 ++++ numpy/_core/umath.pyi | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index a0fbefce0b3c..b87cae8a5f0f 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -15,6 +15,7 @@ __all__ = [ "vstack", ] +_T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT1 = TypeVar("_ScalarT1", bound=generic) _ScalarT2 = TypeVar("_ScalarT2", bound=generic) @@ -64,6 +65,9 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A @overload def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... + # keep in sync with `numpy.ma.extras.vstack` @overload def vstack( diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index d9f0d384cf6d..d3b662d79d66 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -1,3 +1,9 @@ +import contextvars +from _typeshed import SupportsWrite +from collections.abc import Callable +from typing import Any, Final, Literal, TypeAlias, TypedDict, Unpack, type_check_only +from typing_extensions import CapsuleType + from numpy import ( absolute, add, @@ -195,3 +201,30 @@ __all__ = [ "vecdot", "vecmat", ] + +### + +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +@type_check_only +class _ExtOjbDict(TypedDict, total=False): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + call: _ErrCall | None + bufsize: int + +# re-exports from `_core._multiarray_umath` that are used by `_core._ufunc_config` + +NAN: Final[float] = float("nan") +PINF: Final[float] = float("+inf") +NINF: Final[float] = float("-inf") +PZERO: Final[float] = +0.0 +NZERO: Final[float] = -0.0 +_UFUNC_API: Final[CapsuleType] = ... +_extobj_contextvar: Final[contextvars.ContextVar[CapsuleType]] = ... + +def _get_extobj_dict() -> _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... From ee10fbb6138caf3b724e65f570c64807c2120e4b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:16:05 +0100 Subject: [PATCH 0916/1718] MAINT: remove ``lib._shape_base_impl._replace_zero_by_x_arrays`` --- numpy/lib/_shape_base_impl.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index d1e55a48d711..c9e0fd316e04 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -730,15 +730,6 @@ def dstack(tup): return _nx.concatenate(arrs, 2) -def _replace_zero_by_x_arrays(sub_arys): - for i in range(len(sub_arys)): - if _nx.ndim(sub_arys[i]) == 0: - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): - sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) - return sub_arys - - def _array_split_dispatcher(ary, indices_or_sections, axis=None): return (ary, indices_or_sections) From 3747387359b127814ca43cede453478d195d9625 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:43:48 +0100 Subject: [PATCH 0917/1718] TYP: ``lib.*``: stubs for some private functions used by ``_function_base_impl`` --- numpy/lib/_stride_tricks_impl.pyi | 7 +++---- numpy/lib/_utils_impl.pyi | 6 ++++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 94651dff36ed..008f2d544414 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -67,8 +67,7 @@ def broadcast_to( ) -> NDArray[Any]: ... def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... +def broadcast_arrays(*args: ArrayLike, subok: bool = False) -> tuple[NDArray[Any], ...]: ... -def broadcast_arrays( - *args: ArrayLike, - subok: bool = False, -) -> tuple[NDArray[Any], ...]: ... +# used internally by `lib._function_base_impl._parse_input_dimensions` +def _broadcast_shape(*args: ArrayLike) -> _AnyShape: ... diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 7a34f273c423..e73ba659a31c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -6,11 +6,17 @@ import numpy as np __all__ = ["get_include", "info", "show_runtime"] +_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) +### + def get_include() -> LiteralString: ... def show_runtime() -> None: ... def info( object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... + +# used internally by `lib._function_base_impl._median` +def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... From e4235ad415636c0ed986d57e0055f15fe7b942dc Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 25 Nov 2025 02:53:13 +0100 Subject: [PATCH 0918/1718] MAINT: ``broadcast_shapes``: update presumed ``NPY_MAXARGS`` from 32 to 64 --- numpy/lib/_stride_tricks_impl.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index f6f3b8370a72..98a79b325f66 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -449,14 +449,14 @@ def _broadcast_shape(*args): """ # use the old-iterator because np.nditer does not handle size 0 arrays # consistently - b = np.broadcast(*args[:32]) - # unfortunately, it cannot handle 32 or more arguments directly - for pos in range(32, len(args), 31): + b = np.broadcast(*args[:64]) + # unfortunately, it cannot handle 64 or more arguments directly + for pos in range(64, len(args), 63): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) - b = np.broadcast(b, *args[pos:(pos + 31)]) + b = np.broadcast(b, *args[pos:(pos + 63)]) return b.shape @@ -567,7 +567,7 @@ def broadcast_arrays(*args, subok=False): [5, 5, 5]])] """ - # nditer is not used here to avoid the limit of 32 arrays. + # nditer is not used here to avoid the limit of 64 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews From 86e125a4f55025b31f98679e02d7e785af3f40fd Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Tue, 25 Nov 2025 15:31:34 +0530 Subject: [PATCH 0919/1718] recarray, masked_array and datetime break fill value calculation --- numpy/ma/core.py | 12 +++++++++--- numpy/ma/tests/test_core.py | 9 +++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 621dbd94640b..fa320a86abd2 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -26,6 +26,7 @@ import re import textwrap import warnings +import datetime import numpy as np import numpy._core.numerictypes as ntypes @@ -236,9 +237,14 @@ def _recursive_fill_value(dtype, f): # for integer casts, this allows the use of 99999 as a fill value # for int8. # TODO: This is probably a mess, but should best preserve behavior? - vals = tuple( - np.array(_recursive_fill_value(dtype[name], f)) - for name in dtype.names) + vals = [] + for name in dtype.names: + field_dtype = dtype[name] + val = _recursive_fill_value(field_dtype, f) + if np.issubdtype(field_dtype, np.datetime64): + if isinstance(val, (datetime.date, datetime.datetime)): + val = np.datetime64(val) + vals.append(np.array(val)) return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index fbebd9fccc37..e97f413ac89f 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6,6 +6,7 @@ __author__ = "Pierre GF Gerard-Marchant" import copy +import datetime as dt import itertools import operator import pickle @@ -2269,6 +2270,14 @@ def test_check_on_scalar(self): assert_raises(TypeError, _check_fill_value, 1e+20, int) assert_raises(TypeError, _check_fill_value, 'stuff', int) + def test_fill_value_datetime_structured(self): + # gh-29818 + rec = np.array([(dt.date(2025, 4, 1),)], dtype=[('foo', ' Date: Tue, 25 Nov 2025 16:00:53 +0530 Subject: [PATCH 0920/1718] fix:recarray, masked_array and datetime break fill value calculation --- numpy/ma/core.py | 4 ++-- numpy/ma/tests/test_core.py | 14 ++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index fa320a86abd2..255ede361040 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -20,13 +20,13 @@ """ import builtins +import datetime import functools import inspect import operator import re import textwrap import warnings -import datetime import numpy as np import numpy._core.numerictypes as ntypes @@ -245,7 +245,7 @@ def _recursive_fill_value(dtype, f): if isinstance(val, (datetime.date, datetime.datetime)): val = np.datetime64(val) vals.append(np.array(val)) - return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + return np.array(tuple(vals), dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype subval = _recursive_fill_value(subtype, f) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index e97f413ac89f..4a903df0cfbe 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -4980,9 +4980,10 @@ def test_make_mask(self): bools = [True, False] dtypes = [MaskType, float] msgformat = 'copy=%s, shrink=%s, dtype=%s' - for cpy, shr, dt in itertools.product(bools, bools, dtypes): - res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) - assert_(res is nomask, msgformat % (cpy, shr, dt)) + # renamed 'dt' to 'dtype' + for cpy, shr, dtype in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dtype) + assert_(res is nomask, msgformat % (cpy, shr, dtype)) def test_mask_or(self): # Initialize @@ -5313,9 +5314,10 @@ class TestMaskedObjectArray: def test_getitem(self): arr = np.ma.array([None, None]) - for dt in [float, object]: - a0 = np.eye(2).astype(dt) - a1 = np.eye(3).astype(dt) + # renamed 'dt' to 'dtype' + for dtype in [float, object]: + a0 = np.eye(2).astype(dtype) + a1 = np.eye(3).astype(dtype) arr[0] = a0 arr[1] = a1 From 0eb63fdf267528068e17cc48dd574bd3ba020fed Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Tue, 25 Nov 2025 16:26:00 +0530 Subject: [PATCH 0921/1718] fix:recarray, masked_array and datetime break fill value calculation --- numpy/ma/core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 255ede361040..53d29fc9b22c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -244,6 +244,8 @@ def _recursive_fill_value(dtype, f): if np.issubdtype(field_dtype, np.datetime64): if isinstance(val, (datetime.date, datetime.datetime)): val = np.datetime64(val) + elif isinstance(val, (int, np.integer)): + val = np.array(val).astype(field_dtype) vals.append(np.array(val)) return np.array(tuple(vals), dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: From 304f5f15d899dd26f3f47d6348e5a9c9fb6db31a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Nov 2025 12:00:46 -0700 Subject: [PATCH 0922/1718] MAINT: Bump actions/setup-python from 6.0.0 to 6.1.0 (#30303) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 6.0.0 to 6.1.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/e797f83bcb11b83ae66e0230d6156d7c80228e7c...83679a892e2d95755f2dac6acb0bfd1e9ac5d548) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: 6.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/linux.yml | 16 ++++++++-------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 1d8cb119705b..9d1c3ac20a45 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -42,7 +42,7 @@ jobs: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install linter requirements @@ -73,7 +73,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -88,7 +88,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: 'pypy3.11-v7.3.20' - name: Setup using scipy-openblas @@ -137,7 +137,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -222,7 +222,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and benchmarking dependencies @@ -261,7 +261,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -310,7 +310,7 @@ jobs: path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -339,7 +339,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 8dba9281443b..77cb9aaf91fe 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -74,7 +74,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -197,7 +197,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -225,7 +225,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -288,7 +288,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -352,7 +352,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -389,7 +389,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 6078e2f15378..dcc483eaf6df 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -63,7 +63,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - uses: ./.github/meson_actions @@ -81,7 +81,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -127,7 +127,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' - name: Install dependencies @@ -175,7 +175,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -190,7 +190,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' @@ -240,7 +240,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 76d87a5e5022..2787a7c81d3a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -127,7 +127,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.version }} diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index dcdad4ff36ba..03b471b10c63 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -32,7 +32,7 @@ jobs: with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 20584f60219f..a0604fa87aa9 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -32,7 +32,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.14t" @@ -90,7 +90,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.11' architecture: ${{ matrix.architecture }} @@ -146,7 +146,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.pyver }} From 1930db7b29d6a696fe72e2a6c0eb302607717340 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 20:28:59 +0100 Subject: [PATCH 0923/1718] MAINT: add ``matmul`` to ``_core.umath.__all__`` (#30286) --- numpy/_core/umath.py | 4 ++-- numpy/_core/umath.pyi | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 94f97c059187..cc4b8a1238f0 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -53,8 +53,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', + 'logical_or', 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index d3b662d79d66..faf6fb545d5f 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -69,6 +69,7 @@ from numpy import ( logical_not, logical_or, logical_xor, + matmul, matvec, maximum, minimum, @@ -168,6 +169,7 @@ __all__ = [ "logical_not", "logical_or", "logical_xor", + "matmul", "matvec", "maximum", "minimum", From 2c2a0d3e905a2b97543988a2d90794cfa49ad583 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 25 Nov 2025 23:12:20 +0100 Subject: [PATCH 0924/1718] DEP: deprecate ``numpy.lib.user_array.container`` (#30284) * DEP: deprecate ``numpy.lib.user_array.container`` * DEP: add deprecation notice for ``numpy.lib.user_array.container`` * DEP: add deprecation comment with date to ``lib.user_array.container`` Co-authored-by: Charles Harris --------- Co-authored-by: Charles Harris --- doc/release/upcoming_changes/30284.deprecation.rst | 3 +++ numpy/lib/_user_array_impl.py | 11 +++++++++++ numpy/lib/_user_array_impl.pyi | 4 +++- numpy/typing/tests/data/pass/lib_user_array.py | 2 +- 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/30284.deprecation.rst diff --git a/doc/release/upcoming_changes/30284.deprecation.rst b/doc/release/upcoming_changes/30284.deprecation.rst new file mode 100644 index 000000000000..8803f3225cd3 --- /dev/null +++ b/doc/release/upcoming_changes/30284.deprecation.rst @@ -0,0 +1,3 @@ +Deprecation of ``numpy.lib.user_array.container`` +------------------------------------------------- +The ``numpy.lib.user_array.container`` class is deprecated and will be removed in a future version. diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index f3a6c0f518be..2465f5f70b99 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -53,6 +53,17 @@ class container: astype """ + def __init_subclass__(cls) -> None: + # Deprecated in NumPy 2.4, 2025-11-24 + import warnings + + warnings.warn( + "The numpy.lib.user_array.container class is deprecated and will be " + "removed in a future version.", + DeprecationWarning, + stacklevel=2, + ) + def __init__(self, data, dtype=None, copy=True): self.array = array(data, dtype, copy=copy) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 0aeec42129af..7f364b495450 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from types import EllipsisType from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload -from typing_extensions import TypeVar, override +from typing_extensions import TypeVar, deprecated, override import numpy as np import numpy.typing as npt @@ -37,7 +37,9 @@ _ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice _ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] ### +# pyright: reportDeprecated = false +@deprecated("The numpy.lib.user_array.container class is deprecated and will be removed in a future version.") class container(Generic[_ShapeT_co, _DTypeT_co]): array: np.ndarray[_ShapeT_co, _DTypeT_co] diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py index 62b7e85d7ff1..f79dc38af508 100644 --- a/numpy/typing/tests/data/pass/lib_user_array.py +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -3,7 +3,7 @@ from __future__ import annotations import numpy as np -from numpy.lib.user_array import container +from numpy.lib.user_array import container # type: ignore[deprecated] N = 10_000 W = H = int(N**0.5) From 4e6251ab4df4ca8bf6599a7fce02cbadb9945342 Mon Sep 17 00:00:00 2001 From: Rupesh <206439536+Rupeshhsharma@users.noreply.github.com> Date: Wed, 26 Nov 2025 21:56:42 +0530 Subject: [PATCH 0925/1718] BUG: Fix RecursionError and raise ValueError for unmatched parentheses (#30300) * Fix RecursionError and raise ValueError for unmatched parentheses * Fix RecursionError and raise ValueError for unmatched parentheses --- numpy/f2py/symbolic.py | 2 ++ numpy/f2py/tests/test_symbolic.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 409b84da531c..c768b3c470ed 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1236,6 +1236,8 @@ def replace_parenthesis(s): i = mn_i j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') while s.count(left, i + 1, j) != s.count(right, i + 1, j): j = s.find(right, j + 1) diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index ec23f522128b..fbf5abd9aa18 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -493,3 +493,8 @@ def test_polynomial_atoms(self): assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) From 5306844284b1a4d365510b7fb04175dbd66e00ce Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 26 Nov 2025 10:42:38 -0700 Subject: [PATCH 0926/1718] MAINT: avoid unused variable warnings in dtype tests (#30310) --- numpy/_core/src/umath/_scaled_float_dtype.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index da842cd8c55d..020e903b5fc8 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -787,8 +787,7 @@ sfloat_stable_sort_loop( { assert(data[0] == data[1]); assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_STABLE); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); npy_intp N = dimensions[0]; char *in = data[0]; @@ -807,8 +806,7 @@ sfloat_default_sort_loop( { assert(data[0] == data[1]); assert(strides[0] == sizeof(npy_float64) && strides[1] == sizeof(npy_float64)); - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_DEFAULT); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); npy_intp N = dimensions[0]; char *in = data[0]; @@ -874,8 +872,7 @@ sfloat_stable_argsort_loop( const npy_intp *strides, NpyAuxData *NPY_UNUSED(auxdata)) { - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_STABLE); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_STABLE); assert(strides[0] == sizeof(npy_float64)); assert(strides[1] == sizeof(npy_intp)); @@ -895,8 +892,7 @@ sfloat_default_argsort_loop( const npy_intp *strides, NpyAuxData *NPY_UNUSED(auxdata)) { - PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; - assert(parameters->flags == NPY_SORT_DEFAULT); + assert(((PyArrayMethod_SortParameters *)context->parameters)->flags == NPY_SORT_DEFAULT); assert(strides[0] == sizeof(npy_float64)); assert(strides[1] == sizeof(npy_intp)); From 2b6c35db8712217158185fc58bf7d83892b1d967 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 26 Nov 2025 13:06:27 -0500 Subject: [PATCH 0927/1718] ENH: New-style sorting for StringDType (#30266) * ENH: New-style sorting for StringDType * BUG: fix descriptor handling in stringdtype_sort_resolve_descriptors * REF: Cleanup braces and function order * REF: rename fill_sort_data_with_array to get_sort_data_from_array for clarity * MAINT: Move sort type selection one level down, reducing duplication. * MAINT: Combine two levels of indirection * REF: remove unnecessary view_offset assignment in stringdtype_sort_resolve_descriptors * REF: change function visibility to static for stringdtype sort functions --------- Co-authored-by: Marten Henric van Kerkwijk --- numpy/_core/src/common/npy_sort.h.src | 23 +++ numpy/_core/src/multiarray/item_selection.c | 18 +- .../_core/src/multiarray/stringdtype/dtype.c | 186 ++++++++++++++++++ numpy/_core/src/npysort/mergesort.cpp | 36 +++- numpy/_core/src/npysort/npysort_common.h | 14 ++ numpy/_core/src/npysort/quicksort.cpp | 30 ++- 6 files changed, 284 insertions(+), 23 deletions(-) diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index d6e4357225a8..1f82b07659f4 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -107,6 +107,29 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); + +/* + ***************************************************************************** + ** GENERIC SORT IMPLEMENTATIONS ** + ***************************************************************************** + */ + +typedef int (PyArray_SortImpl)(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +typedef int (PyArray_ArgSortImpl)(void *vv, npy_intp *tosort, npy_intp n, + void *varr, npy_intp elsize, + PyArray_CompareFunc *cmp); + +NPY_NO_EXPORT int npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); +NPY_NO_EXPORT int npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index a50b8c49c3fa..f9d753f9e7ba 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -1226,8 +1226,8 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (N <= 1 || PyArray_SIZE(op) == 0) { return 0; } - - if (method_flags != NULL) { + + if (strided_loop != NULL) { needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; } else { @@ -1441,7 +1441,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, rstride = PyArray_STRIDE(rop, axis); needidxbuffer = rstride != sizeof(npy_intp); - if (method_flags != NULL) { + if (strided_loop != NULL) { needs_api = *method_flags & NPY_METH_REQUIRES_PYAPI; } else { @@ -3142,7 +3142,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArrayMethod_Context context = {0}; PyArray_Descr *loop_descrs[2]; NpyAuxData *auxdata = NULL; - NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; PyArray_SortFunc **sort_table = NULL; PyArray_SortFunc *sort = NULL; @@ -3184,7 +3184,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; if (sort_method->get_strided_loop( - &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { ret = -1; goto fail; } @@ -3229,7 +3229,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } ret = _new_sortlike(op, axis, sort, strided_loop, - &context, auxdata, method_flags, NULL, NULL, 0); + &context, auxdata, &method_flags, NULL, NULL, 0); fail: if (sort_method != NULL) { @@ -3259,7 +3259,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArrayMethod_Context context = {0}; PyArray_Descr *loop_descrs[2]; NpyAuxData *auxdata = NULL; - NPY_ARRAYMETHOD_FLAGS *method_flags = NULL; + NPY_ARRAYMETHOD_FLAGS method_flags = 0; PyArray_ArgSortFunc **argsort_table = NULL; PyArray_ArgSortFunc *argsort = NULL; @@ -3295,7 +3295,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; if (argsort_method->get_strided_loop( - &context, 1, 0, strides, &strided_loop, &auxdata, method_flags) < 0) { + &context, 1, 0, strides, &strided_loop, &auxdata, &method_flags) < 0) { ret = NULL; goto fail; } @@ -3346,7 +3346,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } ret = _new_argsortlike(op2, axis, argsort, strided_loop, - &context, auxdata, method_flags, NULL, NULL, 0); + &context, auxdata, &method_flags, NULL, NULL, 0); Py_DECREF(op2); fail: diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index a06e7a1ed1b6..85514ef15df6 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "npy_import.h" #include "multiarraymodule.h" +#include "npy_sort.h" /* * Internal helper to create new instances @@ -459,6 +460,7 @@ compare(void *a, void *b, void *arr) return ret; } +// We assume the allocator mutex is already held. int _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, PyArray_StringDTypeObject *descr_b) @@ -516,6 +518,17 @@ _compare(void *a, void *b, PyArray_StringDTypeObject *descr_a, return NpyString_cmp(&s_a, &s_b); } +int +_sort_compare(const void *a, const void *b, void *context) +{ + PyArrayMethod_Context *sort_context = (PyArrayMethod_Context *)context; + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)sort_context->descriptors[0]; + + int ret = _compare((void *)a, (void *)b, sdescr, sdescr); + return ret; +} + // PyArray_ArgFunc // The max element is the one with the highest unicode code point. int @@ -667,6 +680,121 @@ static PyType_Slot PyArray_StringDType_Slots[] = { {0, NULL}}; +/* + * Wrap the sort loop to acquire/release the string allocator, + * and pick the correct internal implementation. + */ +static int +stringdtype_wrap_sort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + PyArray_SortImpl *sort_loop = + ((PyArrayMethod_SortParameters *)context->parameters)->flags + == NPY_SORT_STABLE ? &npy_mergesort_impl : &npy_quicksort_impl; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = sort_loop( + data[0], dimensions[0], context, + context->descriptors[0]->elsize, &_sort_compare); + NpyString_release_allocator(allocator); + return ret; +} + +/* + * This is currently required even though the default implementation would work, + * because the output, though enforced to be equal to the input, is parametric. + */ +static NPY_CASTING +stringdtype_sort_resolve_descriptors( + PyArrayMethodObject *method, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, + PyArray_Descr **output_descrs, + npy_intp *view_offset) +{ + output_descrs[0] = NPY_DT_CALL_ensure_canonical(input_descrs[0]); + if (NPY_UNLIKELY(output_descrs[0] == NULL)) { + return -1; + } + output_descrs[1] = NPY_DT_CALL_ensure_canonical(input_descrs[1]); + if (NPY_UNLIKELY(output_descrs[1] == NULL)) { + Py_XDECREF(output_descrs[0]); + return -1; + } + + return method->casting; +} + +static int +stringdtype_wrap_argsort_loop( + PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_StringDTypeObject *sdescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + PyArray_ArgSortImpl *argsort_loop = + ((PyArrayMethod_SortParameters *)context->parameters) + ->flags == NPY_SORT_STABLE ? &npy_amergesort_impl : &npy_aquicksort_impl; + + npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); + int ret = argsort_loop( + data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, &_sort_compare); + NpyString_release_allocator(allocator); + return ret; +} + +static int +stringdtype_get_sort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if ((parameters->flags == NPY_SORT_STABLE) + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_sort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + +static int +stringdtype_get_argsort_loop( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags) +{ + PyArrayMethod_SortParameters *parameters = (PyArrayMethod_SortParameters *)context->parameters; + *flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (parameters->flags == NPY_SORT_STABLE + || parameters->flags == NPY_SORT_DEFAULT) { + *out_loop = (PyArrayMethod_StridedLoop *)stringdtype_wrap_argsort_loop; + } + else { + PyErr_SetString(PyExc_RuntimeError, "unsupported sort kind"); + return -1; + } + return 0; +} + static PyObject * stringdtype_new(PyTypeObject *NPY_UNUSED(cls), PyObject *args, PyObject *kwds) { @@ -831,6 +959,60 @@ PyArray_DTypeMeta PyArray_StringDType = { /* rest, filled in during DTypeMeta initialization */ }; +NPY_NO_EXPORT int +init_stringdtype_sorts(void) +{ + PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; + + PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; + PyType_Slot sort_slots[3] = { + {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, + {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {0, NULL} + }; + PyArrayMethod_Spec sort_spec = { + .name = "stringdtype_sort", + .nin = 1, + .nout = 1, + .dtypes = sort_dtypes, + .slots = sort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *sort_method = PyArrayMethod_FromSpec_int( + &sort_spec, 1); + if (sort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->sort_meth = sort_method->method; + Py_INCREF(sort_method->method); + Py_DECREF(sort_method); + + PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; + PyType_Slot argsort_slots[2] = { + {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {0, NULL} + }; + PyArrayMethod_Spec argsort_spec = { + .name = "stringdtype_argsort", + .nin = 1, + .nout = 1, + .dtypes = argsort_dtypes, + .slots = argsort_slots, + .flags = NPY_METH_NO_FLOATINGPOINT_ERRORS, + }; + + PyBoundArrayMethodObject *argsort_method = PyArrayMethod_FromSpec_int( + &argsort_spec, 1); + if (argsort_method == NULL) { + return -1; + } + NPY_DT_SLOTS(stringdtype)->argsort_meth = argsort_method->method; + Py_INCREF(argsort_method->method); + Py_DECREF(argsort_method); + return 0; +} + NPY_NO_EXPORT int init_string_dtype(void) { @@ -876,6 +1058,10 @@ init_string_dtype(void) PyMem_Free(PyArray_StringDType_casts); + if (init_stringdtype_sorts() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/npysort/mergesort.cpp b/numpy/_core/src/npysort/mergesort.cpp index 2fac0ccfafcd..1cfe04b1d266 100644 --- a/numpy/_core/src/npysort/mergesort.cpp +++ b/numpy/_core/src/npysort/mergesort.cpp @@ -337,7 +337,7 @@ string_amergesort_(type *v, npy_intp *tosort, npy_intp num, void *varr) static void npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, - PyArray_CompareFunc *cmp, PyArrayObject *arr) + PyArray_CompareFunc *cmp, void *arr) { char *pi, *pj, *pk, *pm; @@ -383,9 +383,19 @@ npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize, NPY_NO_EXPORT int npy_mergesort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_mergesort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_mergesort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *pl = (char *)start; char *pr = pl + num * elsize; char *pw; @@ -413,7 +423,7 @@ npy_mergesort(void *start, npy_intp num, void *varr) static void npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, - npy_intp elsize, PyArray_CompareFunc *cmp, PyArrayObject *arr) + npy_intp elsize, PyArray_CompareFunc *cmp, void *arr) { char *vp; npy_intp vi, *pi, *pj, *pk, *pm; @@ -459,9 +469,19 @@ npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw, NPY_NO_EXPORT int npy_amergesort(void *v, npy_intp *tosort, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + void *arr = varr; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(arr, &elsize, &cmp); + + return npy_amergesort_impl(v, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_amergesort_impl(void *v, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; npy_intp *pl, *pr, *pw; /* Items that have zero size don't make sense to sort */ diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index 8b7e0ef43f88..f2b99e3b7f66 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -40,6 +40,20 @@ extern "C" { /* Need this for the argsort functions */ #define INTP_SWAP(a,b) {npy_intp tmp = (b); (b)=(a); (a) = tmp;} +/* + ****************************************************************************** + ** SORTING WRAPPERS ** + ****************************************************************************** + */ + +static inline void +get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp) +{ + PyArrayObject *arr = (PyArrayObject *)varr; + *elsize = PyArray_ITEMSIZE(arr); + *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; +} + /* ***************************************************************************** ** COMPARISON FUNCTIONS ** diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index ddf4fce0c28b..2f5adde17b64 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -508,9 +508,18 @@ string_aquicksort_(type *vv, npy_intp *tosort, npy_intp num, void *varr) NPY_NO_EXPORT int npy_quicksort(void *start, npy_intp num, void *varr) { - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_quicksort_impl(start, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_quicksort_impl(void *start, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *vp; char *pl = (char *)start; char *pr = pl + (num - 1) * elsize; @@ -612,10 +621,19 @@ npy_quicksort(void *start, npy_intp num, void *varr) NPY_NO_EXPORT int npy_aquicksort(void *vv, npy_intp *tosort, npy_intp num, void *varr) { + npy_intp elsize; + PyArray_CompareFunc *cmp; + get_sort_data_from_array(varr, &elsize, &cmp); + + return npy_aquicksort_impl(vv, tosort, num, varr, elsize, cmp); +} + +NPY_NO_EXPORT int +npy_aquicksort_impl(void *vv, npy_intp *tosort, npy_intp num, void *varr, + npy_intp elsize, PyArray_CompareFunc *cmp) +{ + void *arr = varr; char *v = (char *)vv; - PyArrayObject *arr = (PyArrayObject *)varr; - npy_intp elsize = PyArray_ITEMSIZE(arr); - PyArray_CompareFunc *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; char *vp; npy_intp *pl = tosort; npy_intp *pr = tosort + num - 1; From b8f76dd6b0adae1506db4e07e675413e74de1b7f Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Wed, 26 Nov 2025 19:26:52 -0800 Subject: [PATCH 0928/1718] DOC: record a data -> record a data point [skip azp][skip cirrus][skip actions] (#30313) --- doc/source/reference/maskedarray.generic.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 3fbe25d5b03c..4f53c6146b53 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -20,8 +20,8 @@ What is a masked array? ----------------------- In many circumstances, datasets can be incomplete or tainted by the presence -of invalid data. For example, a sensor may have failed to record a data, or -recorded an invalid value. The :mod:`numpy.ma` module provides a convenient +of invalid data. For example, a sensor may have failed to record a data point, +or recorded an invalid value. The :mod:`numpy.ma` module provides a convenient way to address this issue, by introducing masked arrays. A masked array is the combination of a standard :class:`numpy.ndarray` and a From e16fc01051e598807ad87c9df4683b7e9f42364e Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 27 Nov 2025 17:26:16 +0100 Subject: [PATCH 0929/1718] MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute recursion (#30179) * MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute recursion This is a lot more complex then I expected. We, correctly, deprecated `.dtype` attribute lookup recursion, however... The code still had a `try/except` that still recursed and that try except carried meaning at least in a weird path of: ``` class mydtype(np.void): dtype = ... ``` Now does that make sense? Maybe not... we also fall back to object in some paths which should have been broken when a dtype attribute existed on a type but it is a property/descriptor. So, this removes the recursion, but adds a check for `__get__` to filter out those cases, this is something we successfully did for other protocols `__array__`, `__array_interface__`, etc. Signed-off-by: Sebastian Berg * DOC: Add release note snippet * DOC: Make release note more precise, array-likes don't need the new attr --------- Signed-off-by: Sebastian Berg --- .../upcoming_changes/30179.new_feature.rst | 13 +++ doc/source/user/basics.interoperability.rst | 12 +++ numpy/_core/src/multiarray/descriptor.c | 82 +++++++++---------- numpy/_core/src/multiarray/descriptor.h | 3 - numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + numpy/_core/src/multiarray/scalarapi.c | 36 +++++--- numpy/_core/tests/test_dtype.py | 57 ++++++++++--- 8 files changed, 137 insertions(+), 68 deletions(-) create mode 100644 doc/release/upcoming_changes/30179.new_feature.rst diff --git a/doc/release/upcoming_changes/30179.new_feature.rst b/doc/release/upcoming_changes/30179.new_feature.rst new file mode 100644 index 000000000000..e19815289351 --- /dev/null +++ b/doc/release/upcoming_changes/30179.new_feature.rst @@ -0,0 +1,13 @@ +New ``__numpy_dtype__`` protocol +-------------------------------- +NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check +for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` +or any ``dtype=`` argument. + +Downstream projects are encouraged to implement this for all dtype like +objects which may previously have used a ``.dtype`` attribute that returned +a NumPy dtype. +We expect to deprecate ``.dtype`` in the future to prevent interpreting +array-like objects with a ``.dtype`` attribute as a dtype. +If you wish you can implement ``__numpy_dtype__`` to ensure an earlier +warning or error (``.dtype`` is ignored if this is found). diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index ca0c39d7081f..b1c115ff1de0 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -65,6 +65,18 @@ and outputs a NumPy ndarray (which is generally a view of the input object's dat buffer). The :ref:`dlpack:python-spec` page explains the ``__dlpack__`` protocol in detail. +``dtype`` interoperability +~~~~~~~~~~~~~~~~~~~~~~~~~~ +Similar to ``__array__()`` for array objects, defining ``__numpy_dtype__`` +allows a custom dtype object to be interoperable with NumPy. +The ``__numpy_dtype__`` must return a NumPy dtype instance (note that +``np.float64`` is not a dtype instance, ``np.dtype(np.float64)`` is). + +.. versionadded:: 2.4 + Before NumPy 2.4 a ``.dtype`` attribute was treated similarly. As of NumPy 2.4 + both is accepted and implementing ``__numpy_dtype__`` prevents ``.dtype`` + from being checked. + The array interface protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 852a2c768948..37776f6a60cc 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -15,6 +15,7 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "_datetime.h" @@ -84,63 +85,54 @@ _try_convert_from_ctypes_type(PyTypeObject *type) } /* - * This function creates a dtype object when the object has a "dtype" attribute, - * and it can be converted to a dtype object. + * This function creates a dtype object when the object has a "__numpy_dtype__" + * or "dtype" attribute which must be valid NumPy dtype instance. * * Returns `Py_NotImplemented` if this is not possible. - * Currently the only failure mode for a NULL return is a RecursionError. */ static PyArray_Descr * _try_convert_from_dtype_attr(PyObject *obj) { + int used_dtype_attr = 0; /* For arbitrary objects that have a "dtype" attribute */ - PyObject *dtypedescr = PyObject_GetAttrString(obj, "dtype"); - if (dtypedescr == NULL) { + PyObject *attr; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.numpy_dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { /* - * This can be reached due to recursion limit being hit while fetching - * the attribute (tested for py3.7). This removes the custom message. + * When "__numpy_dtype__" does not exist, also check "dtype". This should + * be removed in the future. + * We do however support a weird `class myclass(np.void): dtype = ...` + * syntax. */ - goto fail; - } - - if (PyArray_DescrCheck(dtypedescr)) { - /* The dtype attribute is already a valid descriptor */ - return (PyArray_Descr *)dtypedescr; + used_dtype_attr = 1; + int res = PyObject_GetOptionalAttr(obj, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; + } + else if (res == 0) { + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } } - - if (Py_EnterRecursiveCall( - " while trying to convert the given data type from its " - "`.dtype` attribute.") != 0) { - Py_DECREF(dtypedescr); + if (!PyArray_DescrCheck(attr)) { + if (PyType_Check(obj) && PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + Py_INCREF(Py_NotImplemented); + return (PyArray_Descr *)Py_NotImplemented; + } + PyErr_Format(PyExc_ValueError, + "Could not convert %R to a NumPy dtype (via `.%S` value %R).", obj, + used_dtype_attr ? npy_interned_str.dtype : npy_interned_str.numpy_dtype, + attr); + Py_DECREF(attr); return NULL; } - - PyArray_Descr *newdescr = _convert_from_any(dtypedescr, 0); - Py_DECREF(dtypedescr); - Py_LeaveRecursiveCall(); - if (newdescr == NULL) { - goto fail; - } - - Py_DECREF(newdescr); - PyErr_SetString(PyExc_ValueError, "dtype attribute is not a valid dtype instance"); - return NULL; - - fail: - /* Ignore all but recursion errors, to give ctypes a full try. */ - if (!PyErr_ExceptionMatches(PyExc_RecursionError)) { - PyErr_Clear(); - Py_INCREF(Py_NotImplemented); - return (PyArray_Descr *)Py_NotImplemented; - } - return NULL; -} - -/* Expose to another file with a prefixed name */ -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj) -{ - return _try_convert_from_dtype_attr(obj); + /* The dtype attribute is already a valid descriptor */ + return (PyArray_Descr *)attr; } /* diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 820e53f0c3e8..284afabe96fc 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -44,9 +44,6 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( NPY_NO_EXPORT PyObject * array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args); -NPY_NO_EXPORT PyArray_Descr * -_arraydescr_try_convert_from_dtype_attr(PyObject *obj); - NPY_NO_EXPORT int is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype); diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index c7f4c51c6675..997c798c665d 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -38,6 +38,7 @@ intern_strings(void) INTERN_STRING(array_ufunc, "__array_ufunc__"); INTERN_STRING(array_wrap, "__array_wrap__"); INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(numpy_dtype, "__numpy_dtype__"); INTERN_STRING(implementation, "_implementation"); INTERN_STRING(axis1, "axis1"); INTERN_STRING(axis2, "axis2"); diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 365673ffb0b4..f3d1135ec044 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -24,6 +24,7 @@ typedef struct npy_interned_str_struct { PyObject *array_wrap; PyObject *array_finalize; PyObject *array_ufunc; + PyObject *numpy_dtype; PyObject *implementation; PyObject *axis1; PyObject *axis2; diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index 7d65d972998d..a602e312727b 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -11,8 +11,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" - - +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "array_coercion.h" #include "ctors.h" @@ -343,17 +342,34 @@ PyArray_DescrFromTypeObject(PyObject *type) /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { + PyObject *attr; + _PyArray_LegacyDescr *conv = NULL; + int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); + if (res < 0) { + return NULL; // Should be a rather criticial error, so just fail. + } + if (res == 1) { + if (!PyArray_DescrCheck(attr)) { + if (PyObject_HasAttrString(attr, "__get__")) { + /* If the object has a __get__, assume this is a class property. */ + Py_DECREF(attr); + conv = NULL; + } + else { + PyErr_Format(PyExc_ValueError, + "`.dtype` attribute %R is not a valid dtype instance", + attr); + Py_DECREF(attr); + return NULL; + } + } + } + _PyArray_LegacyDescr *new = (_PyArray_LegacyDescr *)PyArray_DescrNewFromType(NPY_VOID); if (new == NULL) { return NULL; } - _PyArray_LegacyDescr *conv = (_PyArray_LegacyDescr *)( - _arraydescr_try_convert_from_dtype_attr(type)); - if (conv == NULL) { - Py_DECREF(new); - return NULL; - } - if ((PyObject *)conv != Py_NotImplemented && PyDataType_ISLEGACY(conv)) { + if (conv != NULL && PyDataType_ISLEGACY(conv)) { new->fields = conv->fields; Py_XINCREF(new->fields); new->names = conv->names; @@ -362,7 +378,7 @@ PyArray_DescrFromTypeObject(PyObject *type) new->subarray = conv->subarray; conv->subarray = NULL; } - Py_DECREF(conv); + Py_XDECREF(conv); Py_XDECREF(new->typeobj); new->typeobj = (PyTypeObject *)type; Py_INCREF(type); diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 974e8765be47..616e1fec02e3 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1582,19 +1582,19 @@ class dt: assert np.dtype(dt) == np.float64 assert np.dtype(dt()) == np.float64 - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_recursion(self): + def test_recursive(self): + # This used to recurse. It now doesn't, we enforce the + # dtype attribute to be a dtype (and will not recurse). class dt: pass dt.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt) dt_instance = dt() dt_instance.dtype = dt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(dt_instance) def test_void_subtype(self): @@ -1607,21 +1607,58 @@ class dt(np.void): np.dtype(dt) np.dtype(dt(1)) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") - def test_void_subtype_recursion(self): + def test_void_subtype_recursive(self): + # Used to recurse, but dtype is now enforced to be a dtype instance + # so that we do not recurse. class vdt(np.void): pass vdt.dtype = vdt - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt) - with pytest.raises(RecursionError): + with pytest.raises(ValueError): np.dtype(vdt(1)) +class TestFromDTypeProtocol: + def test_simple(self): + class A: + dtype = np.dtype("f8") + + assert np.dtype(A()) == np.dtype(np.float64) + + def test_not_a_dtype(self): + # This also prevents coercion as a trivial path, although + # a custom error may be nicer. + class ArrayLike: + __numpy_dtype__ = None + dtype = np.dtype("f8") + + with pytest.raises(ValueError, match=".*__numpy_dtype__.*"): + np.dtype(ArrayLike()) + + def test_prevent_dtype_explicit(self): + class ArrayLike: + @property + def __numpy_dtype__(self): + raise RuntimeError("my error!") + + with pytest.raises(RuntimeError, match="my error!"): + np.dtype(ArrayLike()) + + def test_type_object(self): + class TypeWithProperty: + @property + def __numpy_dtype__(self): + raise RuntimeError("not reached") + + # Arbitrary types go to object currently, and the + # protocol doesn't prevent that. + assert np.dtype(TypeWithProperty) == object + + class TestDTypeClasses: @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational]) def test_basic_dtypes_subclass_properties(self, dtype): From 91f0451b2353b18cf981e2dd842ef673318dd8c6 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 28 Nov 2025 10:13:38 -0500 Subject: [PATCH 0930/1718] MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp (#30312) * MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp * MAINT: Delete copy, move, etc. to avoid potential double-release by the RAII classes. * MAINT: Fix include guard name in raii_utils.hpp * MAINT: Fix a typo in raii_utils.hpp --- numpy/_core/src/common/raii_utils.hpp | 166 ++++++++++++++++++ .../src/multiarray/stringdtype/casts.cpp | 27 ++- 2 files changed, 176 insertions(+), 17 deletions(-) create mode 100644 numpy/_core/src/common/raii_utils.hpp diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp new file mode 100644 index 000000000000..e92d0eae9269 --- /dev/null +++ b/numpy/_core/src/common/raii_utils.hpp @@ -0,0 +1,166 @@ +#ifndef NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ +#define NUMPY_CORE_SRC_COMMON_RAII_UTILS_HPP_ + +// +// Utilities for RAII management of resources. +// +// Another (and arguably clearer) name for this resource management pattern +// is "Scope-Bound Resource Management", but RAII is much more common, so we +// use the familiar acronym. +// + +#include + +// For npy_string_allocator, PyArray_StringDTypeObject, NPY_NO_EXPORT: +#include "numpy/ndarraytypes.h" + +// Forward declarations not currently in a header. +// XXX Where should these be moved? +NPY_NO_EXPORT npy_string_allocator * +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr); +NPY_NO_EXPORT void +NpyString_release_allocator(npy_string_allocator *allocator); + + +namespace np { namespace raii { + +// +// RAII for PyGILState_* API. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::EnsureGIL ensure_gil{}; +// [code that uses the Python C API here] +// } +// +// instead of +// +// PyGILState_STATE gil_state = PyGILState_Ensure(); +// [code that uses the Python C API here] +// PyGILState_Release(gil_state); +// +// or +// NPY_ALLOW_C_API_DEF +// NPY_ALLOW_C_API +// [code that uses the Python C API here] +// NPY_DISABLE_C_API +// +// This ensures that PyGILState_Release(gil_state) is called, even if the +// wrapped code throws an exception or executes a return or a goto. +// +class EnsureGIL +{ + PyGILState_STATE gil_state; + +public: + + EnsureGIL() { + gil_state = PyGILState_Ensure(); + } + + ~EnsureGIL() { + PyGILState_Release(gil_state); + } + + EnsureGIL(const EnsureGIL&) = delete; + EnsureGIL(EnsureGIL&& other) = delete; + EnsureGIL& operator=(const EnsureGIL&) = delete; + EnsureGIL& operator=(EnsureGIL&&) = delete; +}; + + +// +// RAII for Python thread state. +// +// In C++ code, use this at the beginning of a scope, e.g. +// +// { +// np::raii::SaveThreadState save_thread_state{}; +// [code...] +// } +// +// instead of +// +// PyThreadState *thread_state = PyEval_SaveThread(); +// [code...] +// PyEval_RestoreThread(thread_state); +// +// or +// Py_BEGIN_ALLOW_THREADS +// [code...] +// Py_END_ALLOW_THREADS +// +// or +// NPY_BEGIN_THREADS_DEF +// NPY_BEGIN_THREADS +// [code...] +// NPY_END_THREADS +// +// This ensures that PyEval_RestoreThread(thread_state) is called, even +// if the wrapped code throws an exception or executes a return or a goto. +// +class SaveThreadState +{ + PyThreadState *thread_state; + +public: + + SaveThreadState() { + thread_state = PyEval_SaveThread(); + } + + ~SaveThreadState() { + PyEval_RestoreThread(thread_state); + } + + SaveThreadState(const SaveThreadState&) = delete; + SaveThreadState(SaveThreadState&& other) = delete; + SaveThreadState& operator=(const SaveThreadState&) = delete; + SaveThreadState& operator=(SaveThreadState&&) = delete; +}; + + +// +// RAII for npy_string_allocator. +// +// Instead of +// +// npy_string_allocator *allocator = NpyString_acquire_allocator(descr); +// [code that uses allocator] +// NpyString_release_allocator(allocator); +// +// use +// +// { +// np::raii::NpyStringAcquireAllocator alloc(descr); +// [code that uses alloc.allocator()] +// } +// +class NpyStringAcquireAllocator +{ + npy_string_allocator *_allocator; + +public: + + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) { + _allocator = NpyString_acquire_allocator(descr); + } + + ~NpyStringAcquireAllocator() { + NpyString_release_allocator(_allocator); + } + + NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator(NpyStringAcquireAllocator&& other) = delete; + NpyStringAcquireAllocator& operator=(const NpyStringAcquireAllocator&) = delete; + NpyStringAcquireAllocator& operator=(NpyStringAcquireAllocator&&) = delete; + + npy_string_allocator *allocator() { + return _allocator; + } +}; + +}} // namespace np { namespace raii { + +#endif diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index 3632e359c9a9..20ef3013bf86 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -18,6 +18,7 @@ #include "numpyos.h" #include "umathmodule.h" #include "gil_utils.h" +#include "raii_utils.hpp" #include "static_string.h" #include "dtypemeta.h" #include "dtype.h" @@ -1910,7 +1911,8 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - npy_string_allocator *allocator = NpyString_acquire_allocator(descr); + np::raii::NpyStringAcquireAllocator alloc(descr); + int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; const npy_static_string *default_string = &descr->default_string; @@ -1926,22 +1928,22 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], const npy_packed_static_string *ps = (npy_packed_static_string *)in; npy_static_string s = {0, NULL}; if (load_nullable_string(ps, &s, has_null, has_string_na, - default_string, na_name, allocator, + default_string, na_name, alloc.allocator(), "in string to bytes cast") == -1) { - goto fail; + return -1; } for (size_t i=0; i 127) { - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; + np::raii::EnsureGIL ensure_gil{}; + PyObject *str = PyUnicode_FromStringAndSize(s.buf, s.size); if (str == NULL) { PyErr_SetString( PyExc_UnicodeEncodeError, "Invalid character encountered during unicode encoding." ); - goto fail; + return -1; } PyObject *exc = PyObject_CallFunction( @@ -1956,14 +1958,13 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], if (exc == NULL) { Py_DECREF(str); - goto fail; + return -1; } PyErr_SetObject(PyExceptionInstance_Class(exc), exc); Py_DECREF(exc); Py_DECREF(str); - NPY_DISABLE_C_API; - goto fail; + return -1; } } @@ -1976,15 +1977,7 @@ string_to_bytes(PyArrayMethod_Context *context, char *const data[], out += out_stride; } - NpyString_release_allocator(allocator); - return 0; - -fail: - - NpyString_release_allocator(allocator); - - return -1; } static PyType_Slot s2bytes_slots[] = { From 51f39f3110713b0ae93d4799f3108cb7d11bf155 Mon Sep 17 00:00:00 2001 From: Faizan-Ul Huda <61704685+faizanhuda12@users.noreply.github.com> Date: Fri, 28 Nov 2025 11:12:46 -0500 Subject: [PATCH 0931/1718] fixed documentation issue (#30318) fixed an issue I found related to doucmentation --- doc/TESTS.rst | 2 -- doc/source/reference/index.rst | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index e57cf7711da8..803625e727ae 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -143,8 +143,6 @@ module called ``test_yyy.py``. If you only need to test one aspect of More often, we need to group a number of tests together, so we create a test class:: - import pytest - # import xxx symbols from numpy.xxx.yyy import zzz import pytest diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 02e3248953fb..aa6c692d6b2b 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -61,6 +61,7 @@ Other topics thread_safety global_state security + testing distutils_status_migration distutils_guide swig From 9f0519e83247b8a1b46c12fb583d4d575d992bd7 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 28 Nov 2025 17:18:33 +0100 Subject: [PATCH 0932/1718] BUG: Fix descriptor changes related build/parse value issues and skip test on 32bit (#30314) * TST: Skip test that requires 64bit system more explicitly This test wasn't always skipped reliably in debian builds. * BUG: Fix large-pickle round-trip and hash * Use int64 for parsing, but not uint64 (backward-compat) I guess this means we can't use the top flag for a bit longer ;) --- numpy/_core/src/multiarray/descriptor.c | 26 ++++++++++++------------- numpy/_core/src/multiarray/hashdescr.c | 2 +- numpy/_core/tests/test_dtype.py | 12 +++++++++++- numpy/_core/tests/test_nditer.py | 2 ++ 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 37776f6a60cc..1fc5b76d1f00 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2866,13 +2866,13 @@ _descr_find_object(PyArray_Descr *self) static PyObject * arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) { - int elsize = -1, alignment = -1; + Py_ssize_t elsize = -1, alignment = -1; int version = 4; char endian; PyObject *endian_obj; PyObject *subarray, *fields, *names = NULL, *metadata=NULL; int incref_names = 1; - int int_dtypeflags = 0; + npy_int64 signed_dtypeflags = 0; npy_uint64 dtypeflags; if (!PyDataType_ISLEGACY(self)) { @@ -2891,24 +2891,24 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 9: - if (!PyArg_ParseTuple(args, "(iOOOOiiiO):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnkO):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags, &metadata)) { + &alignment, &signed_dtypeflags, &metadata)) { PyErr_Clear(); return NULL; } break; case 8: - if (!PyArg_ParseTuple(args, "(iOOOOiii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnnk):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, - &alignment, &int_dtypeflags)) { + &alignment, &signed_dtypeflags)) { return NULL; } break; case 7: - if (!PyArg_ParseTuple(args, "(iOOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOOnn):__setstate__", &version, &endian_obj, &subarray, &names, &fields, &elsize, &alignment)) { @@ -2916,7 +2916,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; case 6: - if (!PyArg_ParseTuple(args, "(iOOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(iOOOnn):__setstate__", &version, &endian_obj, &subarray, &fields, &elsize, &alignment)) { @@ -2925,7 +2925,7 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) break; case 5: version = 0; - if (!PyArg_ParseTuple(args, "(OOOii):__setstate__", + if (!PyArg_ParseTuple(args, "(OOOnn):__setstate__", &endian_obj, &subarray, &fields, &elsize, &alignment)) { return NULL; @@ -3173,12 +3173,12 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) * flags as an int even though it actually was a char in the PyArray_Descr * structure */ - if (int_dtypeflags < 0 && int_dtypeflags >= -128) { + if (signed_dtypeflags < 0 && signed_dtypeflags >= -128) { /* NumPy used to use a char. So normalize if signed. */ - int_dtypeflags += 128; + signed_dtypeflags += 128; } - dtypeflags = int_dtypeflags; - if (dtypeflags != int_dtypeflags) { + dtypeflags = (npy_uint64)signed_dtypeflags; + if (dtypeflags != signed_dtypeflags) { PyErr_Format(PyExc_ValueError, "incorrect value for flags variable (overflow)"); return NULL; diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 0117ef218a81..853e247e0b74 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -79,7 +79,7 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l) * For builtin type, hash relies on : kind + byteorder + flags + * type_num + elsize + alignment */ - t = Py_BuildValue("(cccii)", descr->kind, nbyteorder, + t = Py_BuildValue("(ccKnn)", descr->kind, nbyteorder, descr->flags, descr->elsize, descr->alignment); for(i = 0; i < PyTuple_Size(t); ++i) { diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 616e1fec02e3..8819262c4e21 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -18,6 +18,7 @@ from numpy._core._rational_tests import rational from numpy.testing import ( HAS_REFCOUNT, + IS_64BIT, IS_PYPY, IS_PYSTON, IS_WASM, @@ -1376,6 +1377,15 @@ def check_pickling(self, dtype): assert_equal(x, y) assert_equal(x[0], y[0]) + @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") + @pytest.mark.xfail(reason="dtype conversion doesn't allow this yet.") + def test_pickling_large(self): + # The actual itemsize is larger than a c-integer here. + dtype = np.dtype(f"({2**31},)i") + self.check_pickling(dtype) + dtype = np.dtype(f"({2**31},)i", metadata={"a": "b"}) + self.check_pickling(dtype) + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object, bool]) def test_builtin(self, t): @@ -1440,7 +1450,7 @@ def test_pickle_dtype(self, dt): for proto in range(pickle.HIGHEST_PROTOCOL + 1): roundtrip_dt = pickle.loads(pickle.dumps(dt, proto)) assert roundtrip_dt == dt - assert hash(dt) == pre_pickle_hash + assert hash(roundtrip_dt) == pre_pickle_hash class TestPromotion: diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 84a88d54548e..943c25cdaa13 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -12,6 +12,7 @@ from numpy import all, arange, array, nditer from numpy.testing import ( HAS_REFCOUNT, + IS_64BIT, IS_PYPY, IS_WASM, assert_, @@ -3404,6 +3405,7 @@ def test_arbitrary_number_of_ops_nested(): @pytest.mark.slow +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @requires_memory(9 * np.iinfo(np.intc).max) @pytest.mark.thread_unsafe(reason="crashes with low memory") def test_arbitrary_number_of_ops_error(): From 3a811fb9af105372da2d07d83e77ae085d51f54e Mon Sep 17 00:00:00 2001 From: Steven Hur <138725592+Jongwan93@users.noreply.github.com> Date: Fri, 28 Nov 2025 12:20:13 -0500 Subject: [PATCH 0933/1718] BUG: Fix misleading ValueError in convolve on empty inputs due to variable swapping (#30277) * BUG: Fix misleading ValueError in convolve on empty inputs Previously, numpy.convolve swapped arguments for optimization if the second argument was longer than the first. This occurred before checking for empty arrays. Consequently, if the first argument was empty, the swap would cause the error message to incorrectly report that the second argument was empty (e.g., "v cannot be empty"). --- numpy/_core/numeric.py | 4 ++-- numpy/_core/tests/test_numeric.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index b050b81834c6..11527c9de442 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -893,12 +893,12 @@ def convolve(a, v, mode='full'): """ a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) - if (len(v) > len(a)): - a, v = v, a if len(a) == 0: raise ValueError('a cannot be empty') if len(v) == 0: raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a return multiarray.correlate(a, v[::-1], mode) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 2c5535a19c77..9e71b7c6b1b8 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3686,6 +3686,16 @@ def test_mode(self): with assert_raises(TypeError): np.convolve(d, k, mode=None) + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) class TestArgwhere: From 0a5840644cfb0a1bbd4ce1dceddb8dc594e805e6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:18:53 +0100 Subject: [PATCH 0934/1718] TYP: ``__numpy_dtype__`` (#30321) * TYP: ``__numpy_dtype__`` * TYP: type-test for ``dtype`` construction with ``__numpy_dtype__`` --- numpy/_typing/__init__.py | 1 + numpy/_typing/_dtype_like.py | 15 +++++++++++---- numpy/lib/_index_tricks_impl.pyi | 4 ++-- numpy/typing/tests/data/reveal/dtype.pyi | 5 +++++ numpy/typing/tests/test_runtime.py | 5 ----- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 11ed6e62f1be..22eb6e3f30bb 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -98,6 +98,7 @@ _DTypeLikeTD64 as _DTypeLikeTD64, _DTypeLikeUInt as _DTypeLikeUInt, _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, _SupportsDType as _SupportsDType, _VoidDTypeLike as _VoidDTypeLike, ) diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index b44dd06e0d7f..34c4bd44f519 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,5 +1,5 @@ from collections.abc import Sequence # noqa: F811 -from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar import numpy as np @@ -19,6 +19,7 @@ ) _ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) _DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types @@ -40,13 +41,19 @@ class _DTypeDict(_DTypeDictBase, total=False): aligned: bool -# A protocol for anything with the dtype attribute -@runtime_checkable -class _SupportsDType(Protocol[_DTypeT_co]): +class _HasDType(Protocol[_DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... +class _HasNumPyDType(Protocol[_DTypeT_co]): + @property + def __numpy_dtype__(self, /) -> _DTypeT_co: ... + + +_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] + + # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` _DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 570688fe8d62..aad3124445a3 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -24,9 +24,9 @@ from numpy._typing import ( _ArrayLike, _DTypeLike, _FiniteNestedSequence, + _HasDType, _NestedSequence, _SupportsArray, - _SupportsDType, ) __all__ = [ # noqa: RUF022 @@ -235,7 +235,7 @@ class IndexExpression(Generic[_BoolT_co]): def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 1794c944b3ae..7f670b3be51c 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -125,3 +125,8 @@ assert_type(dtype_V["f0"], np.dtype) assert_type(dtype_V[0], np.dtype) assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 8e49fda8185c..462fe4eabdc0 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -93,7 +93,6 @@ def test_keys() -> None: PROTOCOLS: dict[str, tuple[type[Any], object]] = { - "_SupportsDType": (_npt._SupportsDType, np.int64(1)), "_SupportsArray": (_npt._SupportsArray, np.arange(10)), "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), "_NestedSequence": (_npt._NestedSequence, [1]), @@ -107,9 +106,5 @@ def test_isinstance(self, cls: type[Any], obj: object) -> None: assert not isinstance(None, cls) def test_issubclass(self, cls: type[Any], obj: object) -> None: - if cls is _npt._SupportsDType: - pytest.xfail( - "Protocols with non-method members don't support issubclass()" - ) assert issubclass(type(obj), cls) assert not issubclass(type(None), cls) From 82a7312eea1e72b73b6a22b8828d9d2851b7d171 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:30:17 +0100 Subject: [PATCH 0935/1718] TYP: ``ndenumerate`` generic type parameter default (#30324) --- numpy/lib/_index_tricks_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index aad3124445a3..ff316f566993 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -53,7 +53,7 @@ _TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) _ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) From 355a1ac4e39f431d13266c99549e72df34683419 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:32:48 +0100 Subject: [PATCH 0936/1718] DOC, TYP: document the ``numpy.typing`` deprecations from 2.3 (#30325) --- numpy/typing/__init__.py | 34 +++++++++++++++++++++++++++++++++- numpy/typing/mypy_plugin.py | 5 +++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 163655bd7662..ef4c0885257b 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -104,13 +104,45 @@ >>> import numpy.typing as npt >>> T = TypeVar("T", bound=npt.NBitBase) - >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: ... ... Consequently, the likes of `~numpy.float16`, `~numpy.float32` and `~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to runtime, they're not necessarily considered as sub-classes. +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + Timedelta64 ~~~~~~~~~~~ diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index dc1e2564fc32..fb78eb077c44 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -18,6 +18,11 @@ .. versionadded:: 1.22 .. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. Examples -------- From d0a303ea4d60bdb2edd028d86757867085f83250 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:34:19 +0100 Subject: [PATCH 0937/1718] TYP: ``ma.mrecords.MaskedRecords`` generic type parameter defaults (#30326) --- numpy/ma/mrecords.pyi | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index d5cdf097bb84..737a34ebdb70 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,8 +1,10 @@ -from typing import Any, TypeVar +from typing import Any, Generic +from typing_extensions import TypeVar -from numpy import dtype +import numpy as np +from numpy._typing import _AnyShape -from . import MaskedArray +from .core import MaskedArray __all__ = [ "MaskedRecords", @@ -13,10 +15,10 @@ __all__ = [ "addfield", ] -_ShapeT_co = TypeVar("_ShapeT_co", covariant=True, bound=tuple[int, ...]) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, shape, From 250285f3cefd950e3a41e049a8733a3f6a9b920c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 29 Nov 2025 01:36:49 +0100 Subject: [PATCH 0938/1718] TYP: ``_core._umath_tests`` module stubs (#30327) --- numpy/_core/_umath_tests.pyi | 47 ++++++++++++++++++++++++++++++++++++ numpy/_core/meson.build | 5 ++-- tools/stubtest/allowlist.txt | 13 ++++++++-- 3 files changed, 61 insertions(+), 4 deletions(-) create mode 100644 numpy/_core/_umath_tests.pyi diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi new file mode 100644 index 000000000000..696cec3b755e --- /dev/null +++ b/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 487a5a684f97..3a0b52c3b079 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -927,7 +927,7 @@ foreach gen_mtargets : [ 'loops_arithm_fp.dispatch.h', src_file.process('src/umath/loops_arithm_fp.dispatch.c.src'), [ - X86_V3, X86_V2, + X86_V3, X86_V2, ASIMD, NEON, VSX3, VSX2, VXE, VX, @@ -960,7 +960,7 @@ foreach gen_mtargets : [ 'loops_exponent_log.dispatch.h', src_file.process('src/umath/loops_exponent_log.dispatch.c.src'), [ - X86_V4, X86_V3, + X86_V4, X86_V3, ] ], [ @@ -1403,6 +1403,7 @@ python_sources = [ '_type_aliases.pyi', '_ufunc_config.py', '_ufunc_config.pyi', + '_umath_tests.pyi', 'arrayprint.py', 'arrayprint.pyi', 'cversions.py', diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 2cfa30d3c0b1..30f4cd120cc9 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -59,7 +59,7 @@ numpy\.core\.shape_base.* numpy\.core\.umath.* numpy\.typing\.mypy_plugin -# ufuncs +# ufuncs, see https://github.com/python/mypy/issues/20223 numpy\.(\w+\.)*abs numpy\.(\w+\.)*absolute numpy\.(\w+\.)*acos @@ -177,7 +177,16 @@ numpy\.(\w+\.)*istitle numpy\.(\w+\.)*isupper numpy\.(\w+\.)*str_len numpy\._core\._methods\.umr_bitwise_count -numpy\._core\._methods\.umr_bitwise_count +numpy\._core\._umath_tests\.always_error +numpy\._core\._umath_tests\.always_error_gufunc +numpy\._core\._umath_tests\.always_error_unary +numpy\._core\._umath_tests\.conv1d_full +numpy\._core\._umath_tests\.cross1d +numpy\._core\._umath_tests\.euclidean_pdist +numpy\._core\._umath_tests\.indexed_negative +numpy\._core\._umath_tests\.inner1d +numpy\._core\._umath_tests\.inner1d_no_doc +numpy\._core\._umath_tests\.matrix_multiply numpy\.linalg\._umath_linalg\.qr_complete numpy\.linalg\._umath_linalg\.qr_reduced numpy\.linalg\._umath_linalg\.solve From afd1ccee30edda4526bbe9c323cc9280b5922fc0 Mon Sep 17 00:00:00 2001 From: Tontonio3 <81591435+Tontonio3@users.noreply.github.com> Date: Sun, 30 Nov 2025 10:57:08 -0500 Subject: [PATCH 0939/1718] BUG: quantile should error when weights are all zeros (#28595) * added err messages and tests * Modified tests and added release note * Fixed tests * Fixed bug to handle object dtypes * Fixed bug to handle object dtypes * Streamlined testing, improved error handling capabilities * Changed infinite error message * Bug fix * Fixed lint test * Improved testing * Changed error handling, made it faster, removed dtype=object special cases * More comprehensive testing * More comprehensive testing * Fixed lint * Fixed tests * Fixed CircleCI test * streamlined checks * lint fix * lint fix * Fix and simplify things (but don't bother with being overly specific) * Appease linter gods and also use one valid entry for other test * rename release note fragment --------- Co-authored-by: Sebastian Berg --- .../upcoming_changes/28595.improvement.rst | 7 ++++ numpy/lib/_function_base_impl.py | 3 ++ numpy/lib/tests/test_function_base.py | 32 +++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 doc/release/upcoming_changes/28595.improvement.rst diff --git a/doc/release/upcoming_changes/28595.improvement.rst b/doc/release/upcoming_changes/28595.improvement.rst new file mode 100644 index 000000000000..aea833f5179c --- /dev/null +++ b/doc/release/upcoming_changes/28595.improvement.rst @@ -0,0 +1,7 @@ +Improved error handling in `np.quantile` +---------------------------------------- +`np.quantile` now raises errors if: + +* All weights are zero +* At least one weight is `np.nan` +* At least one weight is `np.inf` \ No newline at end of file diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 09ab5e91a33c..87a834850fda 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4827,6 +4827,9 @@ def _quantile( # distribution function cdf cdf = weights.cumsum(axis=0, dtype=np.float64) cdf /= cdf[-1, ...] # normalization to 1 + if np.isnan(cdf[-1]).any(): + # Above calculations should normally warn for the zero/inf case. + raise ValueError("Weights included NaN, inf or were all zero.") # Search index i such that # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) # is then equivalent to diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 1aaca3bae1d6..5b8b0adbdd0d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4277,6 +4277,37 @@ def test_closest_observation(self): assert_equal(4, np.quantile(arr[0:9], q, method=m)) assert_equal(5, np.quantile(arr, q, method=m)) + @pytest.mark.parametrize("weights", + [[1, np.inf, 1, 1], [1, np.inf, 1, np.inf], [0, 0, 0, 0], + [np.finfo("float64").max] * 4]) + @pytest.mark.parametrize("dty", ["f8", "O"]) + def test_inf_zeroes_err(self, weights, dty): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(ValueError, + match=r"Weights included NaN, inf or were all zero"): + # We (currently) don't bother to check ahead so 0/0 or + # overflow to `inf` while summing weights, or `inf / inf` + # will all warn before the error is raised. + with np.errstate(all="ignore"): + a = np.quantile(arr, q, weights=wgts, method=m, axis=1) + + @pytest.mark.parametrize("weights", + [[1, np.nan, 1, 1], [1, np.nan, np.nan, 1]]) + @pytest.mark.parametrize(["err", "dty"], + [(ValueError, "f8"), ((RuntimeWarning, ValueError), "O")]) + def test_nan_err(self, err, dty, weights): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(err): + a = np.quantile(arr, q, weights=wgts, method=m) + def test_quantile_gh_29003_Fraction(self): r = np.quantile([1, 2], q=Fraction(1)) assert r == Fraction(2) @@ -4293,6 +4324,7 @@ def test_float16_gh_29003(self): assert value == q * 50_000 assert value.dtype == np.float16 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), From 2de293aed2d8307db261faf4f5071f79376ccc85 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 30 Nov 2025 09:32:25 -0700 Subject: [PATCH 0940/1718] BEG, MAINT: Begin NumPy 2.5 development (#30333) - Create 2.5.0-notes.rst - Update release.rst - Update pyproject.toml - Update cversions.txt - Update numpyconfig.h - Update numpy/_core/meson.build - Delete release fragments [skip cirrus] [skip actions] --- .../upcoming_changes/28590.improvement.rst | 33 ----------- .../upcoming_changes/28595.improvement.rst | 7 --- doc/release/upcoming_changes/28767.change.rst | 10 ---- .../upcoming_changes/28767.performance.rst | 10 ---- doc/release/upcoming_changes/28896.change.rst | 56 ------------------- .../upcoming_changes/28925.deprecation.rst | 9 --- .../upcoming_changes/29030.compatibility.rst | 6 -- .../upcoming_changes/29052.deprecation.rst | 10 ---- doc/release/upcoming_changes/29060.change.rst | 3 - .../upcoming_changes/29094.compatibility.rst | 7 --- doc/release/upcoming_changes/29105.change.rst | 1 - .../upcoming_changes/29112.improvement.rst | 5 -- .../upcoming_changes/29129.enhancement.rst | 7 --- .../upcoming_changes/29137.compatibility.rst | 3 - .../upcoming_changes/29165.performance.rst | 7 --- doc/release/upcoming_changes/29179.change.rst | 4 -- .../upcoming_changes/29240.new_feature.rst | 1 - .../upcoming_changes/29273.new_feature.rst | 1 - .../upcoming_changes/29301.deprecation.rst | 7 --- doc/release/upcoming_changes/29338.change.rst | 9 --- .../upcoming_changes/29396.improvement.rst | 5 -- .../upcoming_changes/29423.new_feature.rst | 7 --- doc/release/upcoming_changes/29537.change.rst | 7 --- .../upcoming_changes/29537.performance.rst | 9 --- .../upcoming_changes/29550.deprecation.rst | 6 -- .../upcoming_changes/29569.new_feature.rst | 27 --------- doc/release/upcoming_changes/29642.c_api.rst | 13 ----- doc/release/upcoming_changes/29642.change.rst | 7 --- doc/release/upcoming_changes/29739.change.rst | 15 ----- doc/release/upcoming_changes/29750.change.rst | 5 -- .../upcoming_changes/29813.new_feature.rst | 6 -- .../upcoming_changes/29819.improvement.rst | 6 -- doc/release/upcoming_changes/29836.c_api.rst | 15 ----- .../upcoming_changes/29836.improvement.rst | 26 --------- .../upcoming_changes/29841.expired.rst | 6 -- doc/release/upcoming_changes/29900.c_api.rst | 5 -- .../upcoming_changes/29900.new_feature.rst | 9 --- .../upcoming_changes/29909.expired.rst | 10 ---- .../upcoming_changes/29947.improvement.rst | 7 --- .../upcoming_changes/29973.expired.rst | 12 ---- .../upcoming_changes/29978.expired.rst | 4 -- .../upcoming_changes/29980.expired.rst | 5 -- .../upcoming_changes/29984.expired.rst | 5 -- .../upcoming_changes/29986.expired.rst | 10 ---- .../upcoming_changes/29994.expired.rst | 6 -- .../upcoming_changes/29997.expired.rst | 9 --- .../upcoming_changes/30021.expired.rst | 5 -- .../upcoming_changes/30068.expired.rst | 12 ---- .../upcoming_changes/30147.compatibility.rst | 4 -- .../upcoming_changes/30168.deprecation.rst | 5 -- .../upcoming_changes/30179.new_feature.rst | 13 ----- .../upcoming_changes/30208.highlight.rst | 2 - .../upcoming_changes/30208.improvement.rst | 12 ---- .../upcoming_changes/30282.deprecation.rst | 5 -- .../upcoming_changes/30284.deprecation.rst | 3 - doc/source/release.rst | 1 + doc/source/release/2.5.0-notes.rst | 19 +++++++ numpy/_core/code_generators/cversions.txt | 1 + numpy/_core/include/numpy/numpyconfig.h | 16 ++++-- numpy/_core/meson.build | 1 + pyproject.toml | 3 +- 61 files changed, 33 insertions(+), 507 deletions(-) delete mode 100644 doc/release/upcoming_changes/28590.improvement.rst delete mode 100644 doc/release/upcoming_changes/28595.improvement.rst delete mode 100644 doc/release/upcoming_changes/28767.change.rst delete mode 100644 doc/release/upcoming_changes/28767.performance.rst delete mode 100644 doc/release/upcoming_changes/28896.change.rst delete mode 100644 doc/release/upcoming_changes/28925.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29030.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29052.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29060.change.rst delete mode 100644 doc/release/upcoming_changes/29094.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29105.change.rst delete mode 100644 doc/release/upcoming_changes/29112.improvement.rst delete mode 100644 doc/release/upcoming_changes/29129.enhancement.rst delete mode 100644 doc/release/upcoming_changes/29137.compatibility.rst delete mode 100644 doc/release/upcoming_changes/29165.performance.rst delete mode 100644 doc/release/upcoming_changes/29179.change.rst delete mode 100644 doc/release/upcoming_changes/29240.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29273.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29301.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29338.change.rst delete mode 100644 doc/release/upcoming_changes/29396.improvement.rst delete mode 100644 doc/release/upcoming_changes/29423.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29537.change.rst delete mode 100644 doc/release/upcoming_changes/29537.performance.rst delete mode 100644 doc/release/upcoming_changes/29550.deprecation.rst delete mode 100644 doc/release/upcoming_changes/29569.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29642.c_api.rst delete mode 100644 doc/release/upcoming_changes/29642.change.rst delete mode 100644 doc/release/upcoming_changes/29739.change.rst delete mode 100644 doc/release/upcoming_changes/29750.change.rst delete mode 100644 doc/release/upcoming_changes/29813.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29819.improvement.rst delete mode 100644 doc/release/upcoming_changes/29836.c_api.rst delete mode 100644 doc/release/upcoming_changes/29836.improvement.rst delete mode 100644 doc/release/upcoming_changes/29841.expired.rst delete mode 100644 doc/release/upcoming_changes/29900.c_api.rst delete mode 100644 doc/release/upcoming_changes/29900.new_feature.rst delete mode 100644 doc/release/upcoming_changes/29909.expired.rst delete mode 100644 doc/release/upcoming_changes/29947.improvement.rst delete mode 100644 doc/release/upcoming_changes/29973.expired.rst delete mode 100644 doc/release/upcoming_changes/29978.expired.rst delete mode 100644 doc/release/upcoming_changes/29980.expired.rst delete mode 100644 doc/release/upcoming_changes/29984.expired.rst delete mode 100644 doc/release/upcoming_changes/29986.expired.rst delete mode 100644 doc/release/upcoming_changes/29994.expired.rst delete mode 100644 doc/release/upcoming_changes/29997.expired.rst delete mode 100644 doc/release/upcoming_changes/30021.expired.rst delete mode 100644 doc/release/upcoming_changes/30068.expired.rst delete mode 100644 doc/release/upcoming_changes/30147.compatibility.rst delete mode 100644 doc/release/upcoming_changes/30168.deprecation.rst delete mode 100644 doc/release/upcoming_changes/30179.new_feature.rst delete mode 100644 doc/release/upcoming_changes/30208.highlight.rst delete mode 100644 doc/release/upcoming_changes/30208.improvement.rst delete mode 100644 doc/release/upcoming_changes/30282.deprecation.rst delete mode 100644 doc/release/upcoming_changes/30284.deprecation.rst create mode 100644 doc/source/release/2.5.0-notes.rst diff --git a/doc/release/upcoming_changes/28590.improvement.rst b/doc/release/upcoming_changes/28590.improvement.rst deleted file mode 100644 index 35f5cb3c2ad2..000000000000 --- a/doc/release/upcoming_changes/28590.improvement.rst +++ /dev/null @@ -1,33 +0,0 @@ -Fix ``flatiter`` indexing edge cases ------------------------------------- - -The ``flatiter`` object now shares the same index preparation logic as -``ndarray``, ensuring consistent behavior and fixing several issues where -invalid indices were previously accepted or misinterpreted. - -Key fixes and improvements: - -* Stricter index validation - - - Boolean non-array indices like ``arr.flat[[True, True]]`` were - incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. - They now raise an index error. Note that indices that match the - iterator's shape are expected to not raise in the future and be - handled as regular boolean indices. Use ``np.asarray()`` if - you want to match that behavior. - - Float non-array indices were also cast to integer and incorrectly - treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now - deprecated and will be removed in a future version. - - 0-dimensional boolean indices like ``arr.flat[True]`` are also - deprecated and will be removed in a future version. - -* Consistent error types: - - Certain invalid `flatiter` indices that previously raised `ValueError` - now correctly raise `IndexError`, aligning with `ndarray` behavior. - -* Improved error messages: - - The error message for unsupported index operations now provides more - specific details, including explicitly listing the valid index types, - instead of the generic ``IndexError: unsupported index operation``. diff --git a/doc/release/upcoming_changes/28595.improvement.rst b/doc/release/upcoming_changes/28595.improvement.rst deleted file mode 100644 index aea833f5179c..000000000000 --- a/doc/release/upcoming_changes/28595.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Improved error handling in `np.quantile` ----------------------------------------- -`np.quantile` now raises errors if: - -* All weights are zero -* At least one weight is `np.nan` -* At least one weight is `np.inf` \ No newline at end of file diff --git a/doc/release/upcoming_changes/28767.change.rst b/doc/release/upcoming_changes/28767.change.rst deleted file mode 100644 index ec173c3672b0..000000000000 --- a/doc/release/upcoming_changes/28767.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -``unique_values`` for string dtypes may return unsorted data ------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for string dtypes. -This enhancement extends the hash-table algorithm to byte strings ('S'), -Unicode strings ('U'), and the experimental string dtype ('T', StringDType). -As a result, calling np.unique() on an array of strings will use -the faster hash-based method to obtain unique values. -Note that this hash-based method does not guarantee that the returned unique values will be sorted. -This also works for StringDType arrays containing None (missing values) -when using equal_nan=True (treating missing values as equal). diff --git a/doc/release/upcoming_changes/28767.performance.rst b/doc/release/upcoming_changes/28767.performance.rst deleted file mode 100644 index ef8ac1c3a45d..000000000000 --- a/doc/release/upcoming_changes/28767.performance.rst +++ /dev/null @@ -1,10 +0,0 @@ -Performance improvements to ``np.unique`` for string dtypes ------------------------------------------------------------ -The hash-based algorithm for unique extraction provides -an order-of-magnitude speedup on large string arrays. -In an internal benchmark with about 1 billion string elements, -the hash-based np.unique completed in roughly 33.5 seconds, -compared to 498 seconds with the sort-based method -– about 15× faster for unsorted unique operations on strings. -This improvement greatly reduces the time to find unique values -in very large string datasets. diff --git a/doc/release/upcoming_changes/28896.change.rst b/doc/release/upcoming_changes/28896.change.rst deleted file mode 100644 index 47538b7b22b2..000000000000 --- a/doc/release/upcoming_changes/28896.change.rst +++ /dev/null @@ -1,56 +0,0 @@ -Modulate dispatched x86 CPU features ------------------------------------- - -**IMPORTANT**: The default setting for `cpu-baseline`` on x86 has been raised to `x86-64-v2` microarchitecture. -This can be changed to none during build time to support older CPUs, -though SIMD optimizations for pre-2009 processors are no longer maintained. - -NumPy has reorganized x86 CPU features into microarchitecture-based groups instead of individual features, -aligning with Linux distribution standards and Google Highway requirements. - -Key changes: -* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, ``X86_V3``, and ``X86_V4`` -* Raised the baseline to ``X86_V2`` -* Improved ``-`` operator behavior to properly exclude successor features that imply the excluded feature -* Added meson redirections for removed feature names to maintain backward compatibility -* Removed compiler compatibility workarounds for partial feature support (e.g., AVX512 without mask operations) -* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi support - -New Feature Group Hierarchy: - -.. list-table:: - :header-rows: 1 - :align: left - - * - Name - - Implies - - Includes - * - ``X86_V2`` - - - - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` - * - ``X86_V3`` - - ``X86_V2`` - - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` - * - ``X86_V4`` - - ``X86_V3`` - - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` - * - ``AVX512_ICL`` - - ``X86_V4`` - - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` - * - ``AVX512_SPR`` - - ``AVX512_ICL`` - - ``AVX512FP16`` - - -These groups correspond to CPU generations: - -- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) -- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) -- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) -- ``AVX512_ICL``: Intel Ice Lake and similar CPUs -- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs - -.. note:: - On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. - -Documentation has been updated with details on using these new feature groups with the current meson build system. diff --git a/doc/release/upcoming_changes/28925.deprecation.rst b/doc/release/upcoming_changes/28925.deprecation.rst deleted file mode 100644 index a421839394fa..000000000000 --- a/doc/release/upcoming_changes/28925.deprecation.rst +++ /dev/null @@ -1,9 +0,0 @@ -Setting the ``strides`` attribute is deprecated ------------------------------------------------ -Setting the strides attribute is now deprecated since mutating -an array is unsafe if an array is shared, especially by multiple -threads. As an alternative, you can create a new view (no copy) via: -* `np.lib.stride_tricks.strided_window_view` if applicable, -* `np.lib.stride_tricks.as_strided` for the general case, -* or the `np.ndarray` constructor (``buffer`` is the original array) for a light-weight version. - diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst deleted file mode 100644 index cf08551e28ee..000000000000 --- a/doc/release/upcoming_changes/29030.compatibility.rst +++ /dev/null @@ -1,6 +0,0 @@ -* NumPy's C extension modules have begun to use multi-phase initialisation, - as defined by :pep:`489`. As part of this, a new explicit check has been added - that each such module is only imported once per Python process. This comes with - the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing - it will now fail with an ``ImportError``. This has always been unsafe, with - unexpected side-effects, though did not previously raise an error. diff --git a/doc/release/upcoming_changes/29052.deprecation.rst b/doc/release/upcoming_changes/29052.deprecation.rst deleted file mode 100644 index e302907abfba..000000000000 --- a/doc/release/upcoming_changes/29052.deprecation.rst +++ /dev/null @@ -1,10 +0,0 @@ -Positional ``out`` argument to `np.maximum`, `np.minimum` is deprecated ------------------------------------------------------------------------ -Passing the output array ``out`` positionally to `numpy.maximum` and -`numpy.minimum` is deprecated. For example, ``np.maximum(a, b, c)`` will -emit a deprecation warning, since ``c`` is treated as the output buffer -rather than a third input. - -Always pass the output with the keyword form, e.g. -``np.maximum(a, b, out=c)``. This makes intent clear and simplifies -type annotations. diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst deleted file mode 100644 index 1561da7bf94e..000000000000 --- a/doc/release/upcoming_changes/29060.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Multiplication between a string and integer now raises OverflowError instead - of MemoryError if the result of the multiplication would create a string that - is too large to be represented. This follows Python's behavior. diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst deleted file mode 100644 index 961ee6504dae..000000000000 --- a/doc/release/upcoming_changes/29094.compatibility.rst +++ /dev/null @@ -1,7 +0,0 @@ -The Macro NPY_ALIGNMENT_REQUIRED has been removed -------------------------------------------------- -The macro was defined in the `npy_cpu.h` file, so might be regarded as -semipublic. As it turns out, with modern compilers and hardware it is almost -always the case that alignment is required, so numpy no longer uses the macro. -It is unlikely anyone uses it, but you might want to compile with the `-Wundef` -flag or equivalent to be sure. diff --git a/doc/release/upcoming_changes/29105.change.rst b/doc/release/upcoming_changes/29105.change.rst deleted file mode 100644 index b5d4a9838f30..000000000000 --- a/doc/release/upcoming_changes/29105.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit floating point input data has been improved. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29112.improvement.rst b/doc/release/upcoming_changes/29112.improvement.rst deleted file mode 100644 index 01baa668b9fe..000000000000 --- a/doc/release/upcoming_changes/29112.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Improved error message for `assert_array_compare` -------------------------------------------------- -The error message generated by `assert_array_compare` which is used by functions -like `assert_allclose`, `assert_array_less` etc. now also includes information -about the indices at which the assertion fails. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29129.enhancement.rst b/doc/release/upcoming_changes/29129.enhancement.rst deleted file mode 100644 index 9a14f13c1f4a..000000000000 --- a/doc/release/upcoming_changes/29129.enhancement.rst +++ /dev/null @@ -1,7 +0,0 @@ -``'same_value'`` for casting by value -------------------------------------- -The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual -values can be round-trip cast without changing value. Currently it is only -implemented in `ndarray.astype`. This will raise a ``ValueError`` if any of the -values in the array would change as a result of the cast, including rounding of -floats or overflowing of ints. diff --git a/doc/release/upcoming_changes/29137.compatibility.rst b/doc/release/upcoming_changes/29137.compatibility.rst deleted file mode 100644 index 3ac9da2a4c48..000000000000 --- a/doc/release/upcoming_changes/29137.compatibility.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.round` now always returns a copy. Previously, it returned a view - for integer inputs for ``decimals >= 0`` and a copy in all other cases. - This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. diff --git a/doc/release/upcoming_changes/29165.performance.rst b/doc/release/upcoming_changes/29165.performance.rst deleted file mode 100644 index 4e1a9a4ecdbc..000000000000 --- a/doc/release/upcoming_changes/29165.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Rewrite of `np.ndindex` using `itertools.product` --------------------------------------------------- -The `numpy.ndindex` function now uses `itertools.product` internally, -providing significant improvements in performance for large iteration spaces, -while maintaining the original behavior and interface. -For example, for an array of shape (50, 60, 90) the NumPy `ndindex` -benchmark improves performance by a factor 5.2. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst deleted file mode 100644 index 12eb6804d3dd..000000000000 --- a/doc/release/upcoming_changes/29179.change.rst +++ /dev/null @@ -1,4 +0,0 @@ -Fix bug in ``matmul`` for non-contiguous out kwarg parameter ------------------------------------------------------------- -In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause -memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/release/upcoming_changes/29240.new_feature.rst b/doc/release/upcoming_changes/29240.new_feature.rst deleted file mode 100644 index 02d43364b200..000000000000 --- a/doc/release/upcoming_changes/29240.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* Let ``np.size`` accept multiple axes. diff --git a/doc/release/upcoming_changes/29273.new_feature.rst b/doc/release/upcoming_changes/29273.new_feature.rst deleted file mode 100644 index 3e380ca0dbe6..000000000000 --- a/doc/release/upcoming_changes/29273.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. diff --git a/doc/release/upcoming_changes/29301.deprecation.rst b/doc/release/upcoming_changes/29301.deprecation.rst deleted file mode 100644 index e520b692458d..000000000000 --- a/doc/release/upcoming_changes/29301.deprecation.rst +++ /dev/null @@ -1,7 +0,0 @@ -``align=`` must be passed as boolean to ``np.dtype()`` ------------------------------------------------------- -When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be -given if ``align=`` is not a boolean. -This is mainly to prevent accidentally passing a subarray align flag where it -has no effect, such as ``np.dtype("f8", 3)`` instead of ``np.dtype(("f8", 3))``. -We strongly suggest to always pass ``align=`` as a keyword argument. diff --git a/doc/release/upcoming_changes/29338.change.rst b/doc/release/upcoming_changes/29338.change.rst deleted file mode 100644 index 64bf188009c8..000000000000 --- a/doc/release/upcoming_changes/29338.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -``__array_interface__`` with NULL pointer changed -------------------------------------------------- -The array interface now accepts NULL pointers (NumPy will do -its own dummy allocation, though). -Previously, these incorrectly triggered an undocumented -scalar path. -In the unlikely event that the scalar path was actually desired, -you can (for now) achieve the previous behavior via the correct -scalar path by not providing a ``data`` field at all. diff --git a/doc/release/upcoming_changes/29396.improvement.rst b/doc/release/upcoming_changes/29396.improvement.rst deleted file mode 100644 index 2cd3d81ad9d8..000000000000 --- a/doc/release/upcoming_changes/29396.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -Show unit information in ``__repr__`` for ``datetime64("NaT")`` ------------------------------------------------------------------- -When a `datetime64` object is "Not a Time" (NaT), its ``__repr__`` method now -includes the time unit of the datetime64 type. This makes it consistent with -the behavior of a `timedelta64` object. diff --git a/doc/release/upcoming_changes/29423.new_feature.rst b/doc/release/upcoming_changes/29423.new_feature.rst deleted file mode 100644 index 7e83604b0049..000000000000 --- a/doc/release/upcoming_changes/29423.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -``StringDType`` fill_value support in `numpy.ma.MaskedArray` ------------------------------------------------------------- -Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` when -using the variable‑width ``StringDType`` (kind ``'T'``), including through slicing -and views. The default is ``'N/A'`` and may be overridden by any valid string. -This fixes issue `gh‑29421 `__ and was -implemented in pull request `gh‑29423 `__. diff --git a/doc/release/upcoming_changes/29537.change.rst b/doc/release/upcoming_changes/29537.change.rst deleted file mode 100644 index 63abbbb5a347..000000000000 --- a/doc/release/upcoming_changes/29537.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` for complex dtypes may return unsorted data -------------------------------------------------------------- -np.unique now supports hash‐based duplicate removal for complex dtypes. -This enhancement extends the hash‐table algorithm -to all complex types ('c'), and their extended precision variants. -The hash‐based method provides faster extraction of unique values -but does not guarantee that the result will be sorted. diff --git a/doc/release/upcoming_changes/29537.performance.rst b/doc/release/upcoming_changes/29537.performance.rst deleted file mode 100644 index 8c78dc202a2e..000000000000 --- a/doc/release/upcoming_changes/29537.performance.rst +++ /dev/null @@ -1,9 +0,0 @@ -Performance improvements to ``np.unique`` for complex dtypes ------------------------------------------------------------- -The hash-based algorithm for unique extraction now also supports -complex dtypes, offering noticeable performance gains. - -In our benchmarks on complex128 arrays with 200,000 elements, -the hash-based approach was about 1.4–1.5× faster -than the sort-based baseline when there were 20% of unique values, -and about 5× faster when there were 0.2% of unique values. diff --git a/doc/release/upcoming_changes/29550.deprecation.rst b/doc/release/upcoming_changes/29550.deprecation.rst deleted file mode 100644 index ce35477c5010..000000000000 --- a/doc/release/upcoming_changes/29550.deprecation.rst +++ /dev/null @@ -1,6 +0,0 @@ -Assertion and warning control utilities are deprecated ------------------------------------------------------- - -`np.testing.assert_warns` and `np.testing.suppress_warnings` are deprecated. -Use `warnings.catch_warnings`, `warnings.filterwarnings`, ``pytest.warns``, or -``pytest.filterwarnings`` instead. diff --git a/doc/release/upcoming_changes/29569.new_feature.rst b/doc/release/upcoming_changes/29569.new_feature.rst deleted file mode 100644 index ac014c07c7a0..000000000000 --- a/doc/release/upcoming_changes/29569.new_feature.rst +++ /dev/null @@ -1,27 +0,0 @@ -``ndmax`` option for `numpy.array` ----------------------------------------------------- -The ``ndmax`` option is now available for `numpy.array`. -It explicitly limits the maximum number of dimensions created from nested sequences. - -This is particularly useful when creating arrays of list-like objects with ``dtype=object``. -By default, NumPy recurses through all nesting levels to create the highest possible -dimensional array, but this behavior may not be desired when the intent is to preserve -nested structures as objects. The ``ndmax`` parameter provides explicit control over -this recursion depth. - -.. code-block:: python - - # Default behavior: Creates a 2D array - >>> a = np.array([[1, 2], [3, 4]], dtype=object) - >>> a - array([[1, 2], - [3, 4]], dtype=object) - >>> a.shape - (2, 2) - - # With ndmax=1: Creates a 1D array - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) - >>> b - array([list([1, 2]), list([3, 4])], dtype=object) - >>> b.shape - (2,) diff --git a/doc/release/upcoming_changes/29642.c_api.rst b/doc/release/upcoming_changes/29642.c_api.rst deleted file mode 100644 index 65c804ef829b..000000000000 --- a/doc/release/upcoming_changes/29642.c_api.rst +++ /dev/null @@ -1,13 +0,0 @@ -The NPY_SORTKIND enum has been enhanced with new variables ----------------------------------------------------------- -This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. -We have changed the semantics of the old names in the NPY_SORTKIND enum and -added new ones. The changes are backward compatible, and no recompilation is -needed. The new names of interest are: - -* NPY_SORT_DEFAULT -- default sort (same value as NPY_QUICKSORT) -* NPY_SORT_STABLE -- the sort must be stable (same value as NPY_MERGESORT) -* NPY_SORT_DESCENDING -- the sort must be descending - -The semantic change is that NPY_HEAPSORT is mapped to NPY_QUICKSORT when used. -Note that NPY_SORT_DESCENDING is not yet implemented. diff --git a/doc/release/upcoming_changes/29642.change.rst b/doc/release/upcoming_changes/29642.change.rst deleted file mode 100644 index 4a1706e00bab..000000000000 --- a/doc/release/upcoming_changes/29642.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` ------------------------------------------------------------- -It is unlikely that this change will be noticed, but if you do see a change in -execution time or unstable argsort order, that is likely the cause. Please let -us know if there is a performance regression. Congratulate us if it is -improved :) - diff --git a/doc/release/upcoming_changes/29739.change.rst b/doc/release/upcoming_changes/29739.change.rst deleted file mode 100644 index 5d1316a1ba41..000000000000 --- a/doc/release/upcoming_changes/29739.change.rst +++ /dev/null @@ -1,15 +0,0 @@ -``numpy.typing.DTypeLike`` no longer accepts ``None`` ------------------------------------------------------ -The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of - -.. code-block:: python - - dtype: DTypeLike = None - -it should now be - -.. code-block:: python - - dtype: DTypeLike | None = None - -instead. diff --git a/doc/release/upcoming_changes/29750.change.rst b/doc/release/upcoming_changes/29750.change.rst deleted file mode 100644 index 2759c08d8349..000000000000 --- a/doc/release/upcoming_changes/29750.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a -``.a`` file extension on win-arm64, for compatibility for building with MSVC and -``setuptools``. Please note that using these static libraries is discouraged -and for existing projects using it, it's best to use it with a matching -compiler toolchain, which is ``clang-cl`` on Windows on Arm. diff --git a/doc/release/upcoming_changes/29813.new_feature.rst b/doc/release/upcoming_changes/29813.new_feature.rst deleted file mode 100644 index 690d7ca88799..000000000000 --- a/doc/release/upcoming_changes/29813.new_feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -Warning emitted when using `where` without `out` ------------------------------------------------- -Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will -now emit a warning. This usage tends to trip up users who expect some value in -output locations where the mask is ``False`` (the ufunc will not touch those -locations). The warning can be supressed by using ``out=None``. diff --git a/doc/release/upcoming_changes/29819.improvement.rst b/doc/release/upcoming_changes/29819.improvement.rst deleted file mode 100644 index fa4ac07f2a08..000000000000 --- a/doc/release/upcoming_changes/29819.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -Performance increase for scalar calculations --------------------------------------------- -The speed of calculations on scalars has been improved by about a factor 6 for -ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed -difference from their ``math`` equivalents from a factor 19 to 3 (the speed -for arrays is left unchanged). diff --git a/doc/release/upcoming_changes/29836.c_api.rst b/doc/release/upcoming_changes/29836.c_api.rst deleted file mode 100644 index 9ac5478c742a..000000000000 --- a/doc/release/upcoming_changes/29836.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -New ``NPY_DT_get_constant`` slot for DType constant retrieval -------------------------------------------------------------- -A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing -dtype implementations to provide constant values such as machine limits and -special values. The slot function has the signature:: - - int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) - -It returns 1 on success, 0 if the constant is not available, or -1 on error. -The function is always called with the GIL held and may write to unaligned memory. - -Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, -while floating-point constants return values of the dtype's native type. - -Implementing this can be used by user DTypes to provide `numpy.finfo` values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29836.improvement.rst b/doc/release/upcoming_changes/29836.improvement.rst deleted file mode 100644 index 0d7df429d125..000000000000 --- a/doc/release/upcoming_changes/29836.improvement.rst +++ /dev/null @@ -1,26 +0,0 @@ -``numpy.finfo`` Refactor ------------------------- -The ``numpy.finfo`` class has been completely refactored to obtain floating-point -constants directly from C compiler macros rather than deriving them at runtime. -This provides better accuracy, platform compatibility and corrected -several attribute calculations: - -* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and - ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, - ``DBL_MIN``, etc.), ensuring platform-correct values. - -* The deprecated ``MachAr`` runtime discovery mechanism has been removed. - -* Derived attributes have been corrected to match standard definitions: - ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for - all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` - follows the C standard definition. - -* longdouble constants, Specifically ``smallest_normal`` now follows the - C standard definitions as per respecitive platform. - -* Special handling added for PowerPC's IBM double-double format. - -* New test suite added in ``test_finfo.py`` to validate all - ``finfo`` properties against expected machine arithmetic values for - float16, float32, and float64 types. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29841.expired.rst b/doc/release/upcoming_changes/29841.expired.rst deleted file mode 100644 index 34977cec2f70..000000000000 --- a/doc/release/upcoming_changes/29841.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Raise `TypeError` on attempt to convert array with `ndim > 0` to scalar ------------------------------------------------------------------------ -Conversion of an array with `ndim > 0` to a scalar was deprecated in -NumPy 1.25. Now, attempting to do so raises `TypeError`. -Ensure you extract a single element from your array before performing -this operation. diff --git a/doc/release/upcoming_changes/29900.c_api.rst b/doc/release/upcoming_changes/29900.c_api.rst deleted file mode 100644 index b29014ac95fc..000000000000 --- a/doc/release/upcoming_changes/29900.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -A new `PyUFunc_AddLoopsFromSpecs` convenience function has been added to the C API. ------------------------------------------------------------------------------------ -This function allows adding multiple ufunc loops from their specs in one call using -a NULL-terminated array of `PyUFunc_LoopSlot` structs. It allows registering -sorting and argsorting loops using the new ArrayMethod API. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29900.new_feature.rst b/doc/release/upcoming_changes/29900.new_feature.rst deleted file mode 100644 index 1799b6043e29..000000000000 --- a/doc/release/upcoming_changes/29900.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -DType sorting and argsorting supports the ArrayMethod API ---------------------------------------------------------- -User-defined dtypes can now implement custom sorting and argsorting using -the ArrayMethod API. This mechanism can be used in place of the `PyArray_ArrFuncs` -slots which may be deprecated in the future. - -The sorting and argsorting methods are registered by passing the arraymethod -specs that implement the operations to the new `PyUFunc_AddLoopsFromSpecs` function. -See the ArrayMethod API documentation for details. \ No newline at end of file diff --git a/doc/release/upcoming_changes/29909.expired.rst b/doc/release/upcoming_changes/29909.expired.rst deleted file mode 100644 index 6a2ee4f53c09..000000000000 --- a/doc/release/upcoming_changes/29909.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Remove numpy.linalg.linalg and numpy.fft.helper ------------------------------------------------ - -The following were deprecated in NumPy 2.0 and have been moved to private modules - -* ``numpy.linalg.linalg`` - Use :mod:`numpy.linalg` instead. - -* ``numpy.fft.helper`` - Use :mod:`numpy.fft` instead. diff --git a/doc/release/upcoming_changes/29947.improvement.rst b/doc/release/upcoming_changes/29947.improvement.rst deleted file mode 100644 index 99c67e598347..000000000000 --- a/doc/release/upcoming_changes/29947.improvement.rst +++ /dev/null @@ -1,7 +0,0 @@ -Multiple axes are now supported in ``numpy.trim_zeros`` -------------------------------------------------------- -The ``axis`` argument of `numpy.trim_zeros` now accepts a sequence; for example -``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional -array ``x`` along axes 0 and 1. This fixes issue -`gh‑29945 `__ and was implemented -in pull request `gh‑29947 `__. diff --git a/doc/release/upcoming_changes/29973.expired.rst b/doc/release/upcoming_changes/29973.expired.rst deleted file mode 100644 index 5b51cb7cf428..000000000000 --- a/doc/release/upcoming_changes/29973.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -Remove ``interpolation`` parameter from quantile and percentile functions -------------------------------------------------------------------------- - -The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been -removed from the following functions: - -* ``numpy.percentile`` -* ``numpy.nanpercentile`` -* ``numpy.quantile`` -* ``numpy.nanquantile`` - -Use the ``method`` parameter instead. diff --git a/doc/release/upcoming_changes/29978.expired.rst b/doc/release/upcoming_changes/29978.expired.rst deleted file mode 100644 index e0f4de1d8715..000000000000 --- a/doc/release/upcoming_changes/29978.expired.rst +++ /dev/null @@ -1,4 +0,0 @@ -Removed ``numpy.in1d`` ----------------------- - -``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. diff --git a/doc/release/upcoming_changes/29980.expired.rst b/doc/release/upcoming_changes/29980.expired.rst deleted file mode 100644 index 563ba8aa6929..000000000000 --- a/doc/release/upcoming_changes/29980.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``numpy.ndindex.ndincr()`` ----------------------------------- - -The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now removed; -use ``next(ndindex)`` instead. diff --git a/doc/release/upcoming_changes/29984.expired.rst b/doc/release/upcoming_changes/29984.expired.rst deleted file mode 100644 index bcce0dedd4a7..000000000000 --- a/doc/release/upcoming_changes/29984.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Removed ``fix_imports`` parameter from ``numpy.save`` ------------------------------------------------------ - -The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. -This flag has been ignored since NumPy 1.17 and was only needed to support loading files in Python 2 that were written in Python 3. diff --git a/doc/release/upcoming_changes/29986.expired.rst b/doc/release/upcoming_changes/29986.expired.rst deleted file mode 100644 index 2a6b44380dd4..000000000000 --- a/doc/release/upcoming_changes/29986.expired.rst +++ /dev/null @@ -1,10 +0,0 @@ -Removal of four undocumented ``ndarray.ctypes`` methods -------------------------------------------------------- -Four undocumented methods of the ``ndarray.ctypes`` object have been removed: - -* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) -* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) -* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) -* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) - -These methods have been deprecated since NumPy 1.21. diff --git a/doc/release/upcoming_changes/29994.expired.rst b/doc/release/upcoming_changes/29994.expired.rst deleted file mode 100644 index 11331da6e810..000000000000 --- a/doc/release/upcoming_changes/29994.expired.rst +++ /dev/null @@ -1,6 +0,0 @@ -Remove ``newshape`` parameter from ``numpy.reshape`` ----------------------------------------------------- - -The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been -removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` -on newer NumPy versions. diff --git a/doc/release/upcoming_changes/29997.expired.rst b/doc/release/upcoming_changes/29997.expired.rst deleted file mode 100644 index 6bdfa792e4e6..000000000000 --- a/doc/release/upcoming_changes/29997.expired.rst +++ /dev/null @@ -1,9 +0,0 @@ -Removal of deprecated functions and arguments ---------------------------------------------- - -The following long-deprecated APIs have been removed: - -* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or - ``scipy.integrate`` functions instead. -* ``disp`` function — deprecated from 2.0 release and no longer functional. Use your own printing function instead. -* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect since NumPy 1.10. diff --git a/doc/release/upcoming_changes/30021.expired.rst b/doc/release/upcoming_changes/30021.expired.rst deleted file mode 100644 index 31ca300ce35f..000000000000 --- a/doc/release/upcoming_changes/30021.expired.rst +++ /dev/null @@ -1,5 +0,0 @@ -Remove ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` ------------------------------------------------------------------------- - -The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been -removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. diff --git a/doc/release/upcoming_changes/30068.expired.rst b/doc/release/upcoming_changes/30068.expired.rst deleted file mode 100644 index 5d41c98b3260..000000000000 --- a/doc/release/upcoming_changes/30068.expired.rst +++ /dev/null @@ -1,12 +0,0 @@ -``numpy.array2string`` and ``numpy.sum`` deprecations finalized ---------------------------------------------------------------- - -The following long-deprecated APIs have been removed or converted to errors: - -* The ``style`` parameter has been removed from ``numpy.array2string``. - This argument had no effect since Numpy 1.14.0. Any arguments following - it, such as ``formatter`` have now been made keyword-only. - -* Calling ``np.sum(generator)`` directly on a generator object now raises a `TypeError`. - This behavior was deprecated in NumPy 1.15.0. Use ``np.sum(np.fromiter(generator))`` - or the python ``sum`` builtin instead. diff --git a/doc/release/upcoming_changes/30147.compatibility.rst b/doc/release/upcoming_changes/30147.compatibility.rst deleted file mode 100644 index c5d13323fe6e..000000000000 --- a/doc/release/upcoming_changes/30147.compatibility.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Type-checkers will no longer accept calls to `numpy.arange` with - ``start`` as a keyword argument. This was done for compatibility with - the Array API standard. At runtime it is still possible to use - `numpy.arange` with ``start`` as a keyword argument. diff --git a/doc/release/upcoming_changes/30168.deprecation.rst b/doc/release/upcoming_changes/30168.deprecation.rst deleted file mode 100644 index 81673397590d..000000000000 --- a/doc/release/upcoming_changes/30168.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -``np.fix`` is pending deprecation ---------------------------------- -The `numpy.fix` function will be deprecated in a future release. It is recommended to use -`numpy.trunc` instead, as it provides the same functionality of truncating decimal values to their -integer parts. Static type checkers might already report a warning for the use of `numpy.fix`. diff --git a/doc/release/upcoming_changes/30179.new_feature.rst b/doc/release/upcoming_changes/30179.new_feature.rst deleted file mode 100644 index e19815289351..000000000000 --- a/doc/release/upcoming_changes/30179.new_feature.rst +++ /dev/null @@ -1,13 +0,0 @@ -New ``__numpy_dtype__`` protocol --------------------------------- -NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check -for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` -or any ``dtype=`` argument. - -Downstream projects are encouraged to implement this for all dtype like -objects which may previously have used a ``.dtype`` attribute that returned -a NumPy dtype. -We expect to deprecate ``.dtype`` in the future to prevent interpreting -array-like objects with a ``.dtype`` attribute as a dtype. -If you wish you can implement ``__numpy_dtype__`` to ensure an earlier -warning or error (``.dtype`` is ignored if this is found). diff --git a/doc/release/upcoming_changes/30208.highlight.rst b/doc/release/upcoming_changes/30208.highlight.rst deleted file mode 100644 index c13c46c056cc..000000000000 --- a/doc/release/upcoming_changes/30208.highlight.rst +++ /dev/null @@ -1,2 +0,0 @@ -* Runtime signature introspection support has been significantly improved. See the - corresponding improvement note for details. diff --git a/doc/release/upcoming_changes/30208.improvement.rst b/doc/release/upcoming_changes/30208.improvement.rst deleted file mode 100644 index ad9faaedfb6b..000000000000 --- a/doc/release/upcoming_changes/30208.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -Runtime signature introspection support has been significantly improved ------------------------------------------------------------------------ -Many NumPy functions, classes, and methods that previously raised ``ValueError`` when passed -to ``inspect.signature()`` now return meaningful signatures. This improves support for runtime type -checking, IDE autocomplete, documentation generation, and runtime introspection capabilities across -the NumPy API. - -Over three hundred classes and functions have been updated in total, including, but not limited to, -core classes such as `ndarray`, `generic`, `dtype`, `ufunc`, `broadcast`, `nditer`, etc., -most methods of `ndarray` and scalar types, array constructor functions (`array`, `empty`, -`arange`, `fromiter`, etc.), all :ref:`ufuncs`, and many other commonly used functions, including -`dot`, `concat`, `where`, `bincount`, `can_cast`, and numerous others. diff --git a/doc/release/upcoming_changes/30282.deprecation.rst b/doc/release/upcoming_changes/30282.deprecation.rst deleted file mode 100644 index e9aac9ae17d5..000000000000 --- a/doc/release/upcoming_changes/30282.deprecation.rst +++ /dev/null @@ -1,5 +0,0 @@ -in-place modification of ``ndarray.shape`` is pending deprecation ------------------------------------------------------------------ -Setting the `ndarray.shape` attribute directly will be deprecated in a future release. -Instead of modifying the shape in place, it is recommended to use the `numpy.reshape` function. -Static type checkers might already report a warning for assignments to `ndarray.shape`. diff --git a/doc/release/upcoming_changes/30284.deprecation.rst b/doc/release/upcoming_changes/30284.deprecation.rst deleted file mode 100644 index 8803f3225cd3..000000000000 --- a/doc/release/upcoming_changes/30284.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deprecation of ``numpy.lib.user_array.container`` -------------------------------------------------- -The ``numpy.lib.user_array.container`` class is deprecated and will be removed in a future version. diff --git a/doc/source/release.rst b/doc/source/release.rst index 190d0512faeb..5842fa9fc61a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.5.0 2.4.0 2.3.5 2.3.4 diff --git a/doc/source/release/2.5.0-notes.rst b/doc/source/release/2.5.0-notes.rst new file mode 100644 index 000000000000..1c07e859a7b9 --- /dev/null +++ b/doc/source/release/2.5.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.5.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 3a480dfd4ab3..b058875d0455 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -83,4 +83,5 @@ # Version 21 (NumPy 2.4.0) # Add 'same_value' casting, header additions. # General loop registration for ufuncs, sort, and argsort +# Version 21 (NumPy 2.5.0) No change 0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index c129a3aceb6d..0bd934e37a8d 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -85,6 +85,7 @@ #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 #define NPY_2_4_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000016 /* @@ -107,10 +108,11 @@ * default, or narrow it down if they wish to use newer API. If you adjust * this, consider the Python version support (example for 1.25.x): * - * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) - * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 - * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 - * NumPy 1.15.x supports Python: ... 3.6 3.7 + * NumPy 1.26.x supports Python: 3.9 3.10 3.11 3.12 + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 * * Users of the stable ABI may wish to target the last Python that is not * end of life. This would be 3.8 at NumPy 1.25 release time. @@ -124,8 +126,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.11 support) */ - #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION + /* Use the default (increase when dropping Python 3.12 support) */ + #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -177,6 +179,8 @@ #define NPY_FEATURE_VERSION_STRING "2.3" #elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.4" +#elif NPY_FEATURE_VERSION == NPY_2_5_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.5" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 3a0b52c3b079..b5695c8f3cde 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -51,6 +51,7 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x # 0x00000015 - 2.4.x +# 0x00000015 - 2.5.x C_API_VERSION = '0x00000015' # Check whether we have a mismatch between the set C API VERSION and the diff --git a/pyproject.toml b/pyproject.toml index 0dba95b04e84..e7830c5248a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.4.0.dev0" +version = "2.5.0.dev0" description = "Fundamental package for array computing in Python" authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ @@ -22,7 +22,6 @@ classifiers = [ 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3.14', From d6fd351cc8544243cc164cdad265f2df0580fcc6 Mon Sep 17 00:00:00 2001 From: Nathaniel Starkman Date: Sun, 30 Nov 2025 16:43:35 -0500 Subject: [PATCH 0941/1718] TYP: add overloads for `generic.__getitem__` (#30335) Co-authored-by: Joren Hammudoglu --- numpy/__init__.pyi | 46 +++++++++++ numpy/_core/records.pyi | 3 +- numpy/typing/tests/data/pass/scalars.py | 13 ++++ numpy/typing/tests/data/reveal/scalars.pyi | 88 ++++++++++++++++++++++ 4 files changed, 149 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e19b130d73b6..975a04857db7 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3678,6 +3678,29 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... + + # @overload def __array_wrap__( self, @@ -5708,10 +5731,33 @@ class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] @overload def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + # + @overload + def __getitem__(self, key: tuple[()], /) -> Self: ... + @overload + def __getitem__( + self, key: EllipsisType | tuple[EllipsisType], / + ) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __getitem__( + self, key: None | tuple[None], / + ) -> ndarray[tuple[int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None], / + ) -> ndarray[tuple[int, int], dtype[Self]]: ... + @overload + def __getitem__( + self, key: tuple[None, None, None], / + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + @overload # Limited support for (None,) * N > 3 + def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload def __getitem__(self, key: list[str], /) -> void: ... + + # def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 511a6a764829..2b133630cf35 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -64,7 +64,8 @@ class record(np.void): # type: ignore[misc] def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... - @overload + # + @overload # type: ignore[override] def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... @overload def __getitem__(self, key: list[str], /) -> record: ... diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index eeb707b255e1..133c5627e6e5 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -247,3 +247,16 @@ def __float__(self) -> float: c16.reshape(1) U.reshape(1) S.reshape(1) + +# Indexing scalars with any of {None, ..., tuple[()], tuple[None], tuple[...], +# tuple[None, ...]} should be valid +b[None] +i8[None] +u8[None] +f8[None] +c16[None] +c16[...] +c16[()] +c16[(None,)] +c16[(...,)] +c16[None, None] diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 67444e33dfc3..ab926baa7f15 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -44,6 +44,94 @@ assert_type(c16.imag, np.float64) assert_type(np.str_("foo"), np.str_) +# Indexing +assert_type(b[()], np.bool) +assert_type(i8[()], np.int64) +assert_type(u8[()], np.uint64) +assert_type(f8[()], np.float64) +assert_type(c8[()], np.complex64) +assert_type(c16[()], np.complex128) +assert_type(U[()], np.str_) +assert_type(S[()], np.bytes_) +assert_type(V[()], np.void) + +assert_type(b[...], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(b[(...,)], np.ndarray[tuple[()], np.dtype[np.bool]]) +assert_type(i8[...], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(i8[(...,)], np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(u8[...], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(u8[(...,)], np.ndarray[tuple[()], np.dtype[np.uint64]]) +assert_type(f8[...], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(f8[(...,)], np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(c8[...], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c8[(...,)], np.ndarray[tuple[()], np.dtype[np.complex64]]) +assert_type(c16[...], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(c16[(...,)], np.ndarray[tuple[()], np.dtype[np.complex128]]) +assert_type(U[...], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(U[(...,)], np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(S[...], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(S[(...,)], np.ndarray[tuple[()], np.dtype[np.bytes_]]) +assert_type(V[...], np.ndarray[tuple[()], np.dtype[np.void]]) +assert_type(V[(...,)], np.ndarray[tuple[()], np.dtype[np.void]]) + +None1 = (None,) +None2 = (None, None) +None3 = (None, None, None) +None4 = (None, None, None, None) + +assert_type(b[None], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None1], np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(b[None2], np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(b[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) +assert_type(b[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bool]]) + +assert_type(u8[None], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None1], np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(u8[None2], np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(u8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.uint64]]) +assert_type(u8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.uint64]]) + +assert_type(i8[None], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None1], np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(i8[None2], np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(i8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(i8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type(f8[None], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None1], np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8[None2], np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(f8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(f8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.float64]]) + +assert_type(c8[None], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None1], np.ndarray[tuple[int], np.dtype[np.complex64]]) +assert_type(c8[None2], np.ndarray[tuple[int, int], np.dtype[np.complex64]]) +assert_type(c8[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex64]]) +assert_type(c8[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex64]]) + +assert_type(c16[None], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None1], np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(c16[None2], np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(c16[None3], np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(c16[None4], np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]) + +assert_type(U[None], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None1], np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(U[None2], np.ndarray[tuple[int, int], np.dtype[np.str_]]) +assert_type(U[None3], np.ndarray[tuple[int, int, int], np.dtype[np.str_]]) +assert_type(U[None4], np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(S[None], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None1], np.ndarray[tuple[int], np.dtype[np.bytes_]]) +assert_type(S[None2], np.ndarray[tuple[int, int], np.dtype[np.bytes_]]) +assert_type(S[None3], np.ndarray[tuple[int, int, int], np.dtype[np.bytes_]]) +assert_type(S[None4], np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(V[None], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None1], np.ndarray[tuple[int], np.dtype[np.void]]) +assert_type(V[None2], np.ndarray[tuple[int, int], np.dtype[np.void]]) +assert_type(V[None3], np.ndarray[tuple[int, int, int], np.dtype[np.void]]) +assert_type(V[None4], np.ndarray[tuple[Any, ...], np.dtype[np.void]]) assert_type(V[0], Any) assert_type(V["field1"], Any) assert_type(V[["field1", "field2"]], np.void) From 7ed9d07c75dcc0f29855b51a844a7ff3a0aa81e8 Mon Sep 17 00:00:00 2001 From: Karan Singh Date: Mon, 1 Dec 2025 05:36:28 +0530 Subject: [PATCH 0942/1718] DOC: Fix typos and NumPy branding in 1.11.0 release notes (#30336) --- doc/source/release/1.11.0-notes.rst | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index f6fe84a4b17f..4700e37203ce 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -27,11 +27,11 @@ Details of these improvements can be found below. Build System Changes ==================== -* Numpy now uses ``setuptools`` for its builds instead of plain distutils. +* NumPy now uses ``setuptools`` for its builds instead of plain distutils. This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of - projects that depend on Numpy (see gh-6551). It potentially affects the way - that build/install methods for Numpy itself behave though. Please report any - unexpected behavior on the Numpy issue tracker. + projects that depend on NumPy (see gh-6551). It potentially affects the way + that build/install methods for NumPy itself behave though. Please report any + unexpected behavior on the NumPy issue tracker. * Bento build support and related files have been removed. * Single file build support and related files have been removed. @@ -39,7 +39,7 @@ Build System Changes Future Changes ============== -The following changes are scheduled for Numpy 1.12.0. +The following changes are scheduled for NumPy 1.12.0. * Support for Python 2.6, 3.2, and 3.3 will be dropped. * Relaxed stride checking will become the default. See the 1.8.0 release @@ -61,7 +61,7 @@ The following changes are scheduled for Numpy 1.12.0. In a future release the following changes will be made. * The ``rand`` function exposed in ``numpy.testing`` will be removed. That - function is left over from early Numpy and was implemented using the + function is left over from early NumPy and was implemented using the Python random module. The random number generators from ``numpy.random`` should be used instead. * The ``ndarray.view`` method will only allow c_contiguous arrays to be @@ -124,7 +124,7 @@ non-integers for degree specification. *np.dot* now raises ``TypeError`` instead of ``ValueError`` ----------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior mimics that of other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. @@ -194,7 +194,7 @@ New Features * ``f2py.compile`` has a new ``extension`` keyword parameter that allows the fortran extension to be specified for generated temp files. For instance, - the files can be specifies to be ``*.f90``. The ``verbose`` argument is + the files can be specified to be ``*.f90``. The ``verbose`` argument is also activated, it was previously ignored. * A ``dtype`` parameter has been added to ``np.random.randint`` @@ -254,7 +254,7 @@ Memory and speed improvements for masked arrays ----------------------------------------------- Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses ``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and -avoid a big memory peak. Another optimization was done to avoid a memory +avoids a big memory peak. Another optimization was done to avoid a memory peak and useless computations when printing a masked array. ``ndarray.tofile`` now uses fallocate on linux @@ -304,13 +304,13 @@ Instead, ``np.broadcast`` can be used in all cases. ``np.trace`` now respects array subclasses ------------------------------------------ -This behaviour mimics that of other functions such as ``np.diagonal`` and +This behavior mimics that of other functions such as ``np.diagonal`` and ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give the same result. ``np.dot`` now raises ``TypeError`` instead of ``ValueError`` ------------------------------------------------------------- -This behaviour mimics that of other functions such as ``np.inner``. If the two +This behavior is now consistent with other functions such as ``np.inner``. If the two arguments cannot be cast to a common type, it could have raised a ``TypeError`` or ``ValueError`` depending on their order. Now, ``np.dot`` will now always raise a ``TypeError``. From 50997e3c3b1f9331d3b03e3867c4896422278e52 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 1 Dec 2025 09:50:21 +0100 Subject: [PATCH 0943/1718] Revert "MAINT: undo change to `fromstring` text signature for 2.4.0" (#30236) --- numpy/_core/_add_newdocs.py | 4 +++- numpy/_core/tests/test_multiarray.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 6b937389defe..eded752b2721 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1482,9 +1482,11 @@ """) -# Signature can be updated for 2.5.0 release, see gh-30235 for details add_newdoc('numpy._core.multiarray', 'fromstring', """ + fromstring(string, dtype=None, count=-1, *, sep, like=None) + -- + fromstring(string, dtype=float, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 218fcc79592e..aa2dee8d58bb 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10993,6 +10993,7 @@ def test_c_func_dispatcher_signature(self, func): (np.fromfile, ("file", "dtype", "count", "sep", "offset", "like")), (np.fromiter, ("iter", "dtype", "count", "like")), (np.frompyfunc, ("func", "nin", "nout", "kwargs")), + (np.fromstring, ("string", "dtype", "count", "sep", "like")), (np.nested_iters, ( "op", "axes", "flags", "op_flags", "op_dtypes", "order", "casting", "buffersize", From 0c6c15b468ca9201b1e74b6a2e84e71139cba181 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 12:32:55 -0700 Subject: [PATCH 0944/1718] MAINT: Bump int128/hide-comment-action from 1.47.0 to 1.48.0 (#30346) Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.47.0 to 1.48.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/3580fff2b9b7c0e16466686530622f0eed93132a...9cdf7fd49089308931b20966baee90f4aadb9f6e) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.48.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 9f4cda234717..13eda8c230b3 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@3580fff2b9b7c0e16466686530622f0eed93132a # v1.47.0 + uses: int128/hide-comment-action@9cdf7fd49089308931b20966baee90f4aadb9f6e # v1.48.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 6b8ad75b19be63b9619fbf5c2290cfb970051222 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 12:33:26 -0700 Subject: [PATCH 0945/1718] MAINT: Bump github/codeql-action from 4.31.5 to 4.31.6 (#30345) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.5 to 4.31.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fdbfb4d2750291e159f0156def62b853c2798ca2...fe4161a26a8629af62121b670040955b330f9af2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c758d05e43ab..cbeb3ca07349 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/autobuild@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5 + uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d8daf8779d92..4f9bf10985a5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.1.27 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v2.1.27 with: sarif_file: results.sarif From fb0578d3973e926eee2e68ce9fd9996459b8534b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 1 Dec 2025 15:09:40 -0700 Subject: [PATCH 0946/1718] MAINT: Update circleci python to 3.12. (#30348) We could go to 3.14, but I would prefer to let it settle a bit longer. [skip actions] [skip cirrus] --- .circleci/config.yml | 4 ++-- doc/source/try_examples.json | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 62c29203dac1..36f0774131a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.13 + - image: cimg/python:3.12.12 working_directory: ~/repo @@ -53,7 +53,7 @@ jobs: - run: name: build NumPy command: | - python3.11 -m venv venv + python3.12 -m venv venv . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt \ -r requirements/build_requirements.txt \ diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 510efcdd2694..305860dced55 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -6,3 +6,4 @@ "numpy.__array_namespace_info__.html*" ] } + From d0a14ac3c601b5d54efa5aa56a5008620ecebee0 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 13:01:42 +0200 Subject: [PATCH 0947/1718] remove numpy.distutils --- .github/workflows/linux.yml | 2 +- doc/DISTUTILS.rst | 622 ---- doc/neps/scope.rst | 6 +- doc/source/building/distutils_equivalents.rst | 2 +- doc/source/conf.py | 10 +- doc/source/dev/depending_on_numpy.rst | 3 +- doc/source/f2py/buildtools/distutils.rst | 73 - doc/source/f2py/buildtools/index.rst | 2 +- doc/source/f2py/code/setup_example.py | 16 - doc/source/f2py/f2py.getting-started.rst | 9 +- doc/source/f2py/usage.rst | 77 +- doc/source/f2py/windows/index.rst | 6 - doc/source/reference/c-api/coremath.rst | 14 - doc/source/reference/distutils.rst | 219 -- doc/source/reference/distutils/misc_util.rst | 7 - doc/source/reference/distutils_guide.rst | 13 - .../reference/distutils_status_migration.rst | 38 +- doc/source/reference/index.rst | 2 - doc/source/reference/module_structure.rst | 2 - doc/source/try_examples.json | 1 - doc/source/user/absolute_beginners.rst | 2 +- doc/source/user/c-info.python-as-glue.rst | 8 +- meson_cpu/arm/meson.build | 16 +- meson_cpu/loongarch64/meson.build | 2 +- meson_cpu/ppc64/meson.build | 14 +- meson_cpu/riscv64/meson.build | 2 +- meson_cpu/s390x/meson.build | 6 +- meson_cpu/x86/meson.build | 4 +- numpy/__init__.py | 17 +- numpy/__init__.pyi | 2 - .../conv_template.py | 0 numpy/_build_utils/process_src_template.py | 4 +- numpy/_core/code_generators/genapi.py | 4 +- numpy/_core/code_generators/numpy_api.py | 2 +- .../src/_simd}/checks/cpu_asimd.c | 0 .../src/_simd}/checks/cpu_asimddp.c | 0 .../src/_simd}/checks/cpu_asimdfhm.c | 0 .../src/_simd}/checks/cpu_asimdhp.c | 0 .../src/_simd}/checks/cpu_avx.c | 0 .../src/_simd}/checks/cpu_avx2.c | 0 .../src/_simd}/checks/cpu_avx512_clx.c | 0 .../src/_simd}/checks/cpu_avx512_cnl.c | 0 .../src/_simd}/checks/cpu_avx512_icl.c | 0 .../src/_simd}/checks/cpu_avx512_knl.c | 0 .../src/_simd}/checks/cpu_avx512_knm.c | 0 .../src/_simd}/checks/cpu_avx512_skx.c | 0 .../src/_simd}/checks/cpu_avx512_spr.c | 0 .../src/_simd}/checks/cpu_avx512cd.c | 0 .../src/_simd}/checks/cpu_avx512f.c | 0 .../src/_simd}/checks/cpu_f16c.c | 0 .../src/_simd}/checks/cpu_fma3.c | 0 .../src/_simd}/checks/cpu_fma4.c | 0 .../src/_simd}/checks/cpu_lsx.c | 0 .../src/_simd}/checks/cpu_neon.c | 0 .../src/_simd}/checks/cpu_neon_fp16.c | 0 .../src/_simd}/checks/cpu_neon_vfpv4.c | 0 .../src/_simd}/checks/cpu_popcnt.c | 0 .../src/_simd}/checks/cpu_rvv.c | 0 .../src/_simd}/checks/cpu_sse.c | 0 .../src/_simd}/checks/cpu_sse2.c | 0 .../src/_simd}/checks/cpu_sse3.c | 0 .../src/_simd}/checks/cpu_sse41.c | 0 .../src/_simd}/checks/cpu_sse42.c | 0 .../src/_simd}/checks/cpu_ssse3.c | 0 .../src/_simd}/checks/cpu_sve.c | 0 .../src/_simd}/checks/cpu_vsx.c | 0 .../src/_simd}/checks/cpu_vsx2.c | 0 .../src/_simd}/checks/cpu_vsx3.c | 0 .../src/_simd}/checks/cpu_vsx4.c | 0 .../src/_simd}/checks/cpu_vx.c | 0 .../src/_simd}/checks/cpu_vxe.c | 0 .../src/_simd}/checks/cpu_vxe2.c | 0 .../src/_simd}/checks/cpu_xop.c | 0 .../src/_simd}/checks/extra_avx512bw_mask.c | 0 .../src/_simd}/checks/extra_avx512dq_mask.c | 0 .../src/_simd}/checks/extra_avx512f_reduce.c | 0 .../_simd}/checks/extra_vsx3_half_double.c | 0 .../src/_simd}/checks/extra_vsx4_mma.c | 0 .../src/_simd}/checks/extra_vsx_asm.c | 0 .../src/_simd}/checks/test_flags.c | 0 numpy/_pyinstaller/hook-numpy.py | 1 - numpy/_pytesttester.py | 8 - numpy/distutils/__init__.py | 64 - numpy/distutils/__init__.pyi | 4 - numpy/distutils/_shell_utils.py | 87 - numpy/distutils/armccompiler.py | 26 - numpy/distutils/ccompiler.py | 826 ----- numpy/distutils/ccompiler_opt.py | 2668 -------------- numpy/distutils/command/__init__.py | 41 - numpy/distutils/command/autodist.py | 148 - numpy/distutils/command/bdist_rpm.py | 22 - numpy/distutils/command/build.py | 62 - numpy/distutils/command/build_clib.py | 469 --- numpy/distutils/command/build_ext.py | 752 ---- numpy/distutils/command/build_py.py | 31 - numpy/distutils/command/build_scripts.py | 49 - numpy/distutils/command/build_src.py | 773 ---- numpy/distutils/command/config.py | 516 --- numpy/distutils/command/config_compiler.py | 126 - numpy/distutils/command/develop.py | 15 - numpy/distutils/command/egg_info.py | 25 - numpy/distutils/command/install.py | 79 - numpy/distutils/command/install_clib.py | 40 - numpy/distutils/command/install_data.py | 24 - numpy/distutils/command/install_headers.py | 25 - numpy/distutils/command/sdist.py | 27 - numpy/distutils/core.py | 215 -- numpy/distutils/cpuinfo.py | 683 ---- numpy/distutils/exec_command.py | 315 -- numpy/distutils/extension.py | 101 - numpy/distutils/fcompiler/__init__.py | 1035 ------ numpy/distutils/fcompiler/absoft.py | 158 - numpy/distutils/fcompiler/arm.py | 71 - numpy/distutils/fcompiler/compaq.py | 120 - numpy/distutils/fcompiler/environment.py | 88 - numpy/distutils/fcompiler/fujitsu.py | 46 - numpy/distutils/fcompiler/g95.py | 42 - numpy/distutils/fcompiler/gnu.py | 555 --- numpy/distutils/fcompiler/hpux.py | 41 - numpy/distutils/fcompiler/ibm.py | 97 - numpy/distutils/fcompiler/intel.py | 211 -- numpy/distutils/fcompiler/lahey.py | 45 - numpy/distutils/fcompiler/mips.py | 54 - numpy/distutils/fcompiler/nag.py | 87 - numpy/distutils/fcompiler/none.py | 28 - numpy/distutils/fcompiler/nv.py | 53 - numpy/distutils/fcompiler/pathf95.py | 33 - numpy/distutils/fcompiler/pg.py | 128 - numpy/distutils/fcompiler/sun.py | 51 - numpy/distutils/fcompiler/vast.py | 52 - numpy/distutils/from_template.py | 261 -- numpy/distutils/fujitsuccompiler.py | 28 - numpy/distutils/intelccompiler.py | 106 - numpy/distutils/lib2def.py | 116 - numpy/distutils/line_endings.py | 77 - numpy/distutils/log.py | 111 - numpy/distutils/mingw/gfortran_vs2003_hack.c | 6 - numpy/distutils/mingw32ccompiler.py | 620 ---- numpy/distutils/misc_util.py | 2484 ------------- numpy/distutils/msvc9compiler.py | 63 - numpy/distutils/msvccompiler.py | 76 - numpy/distutils/npy_pkg_config.py | 441 --- numpy/distutils/numpy_distribution.py | 17 - numpy/distutils/pathccompiler.py | 21 - numpy/distutils/system_info.py | 3267 ----------------- numpy/distutils/tests/__init__.py | 0 numpy/distutils/tests/test_build_ext.py | 74 - numpy/distutils/tests/test_ccompiler_opt.py | 808 ---- .../tests/test_ccompiler_opt_conf.py | 176 - numpy/distutils/tests/test_exec_command.py | 217 -- numpy/distutils/tests/test_fcompiler.py | 43 - numpy/distutils/tests/test_fcompiler_gnu.py | 55 - numpy/distutils/tests/test_fcompiler_intel.py | 30 - .../distutils/tests/test_fcompiler_nagfor.py | 22 - numpy/distutils/tests/test_from_template.py | 44 - numpy/distutils/tests/test_log.py | 34 - .../distutils/tests/test_mingw32ccompiler.py | 47 - numpy/distutils/tests/test_misc_util.py | 88 - numpy/distutils/tests/test_npy_pkg_config.py | 84 - numpy/distutils/tests/test_shell_utils.py | 79 - numpy/distutils/tests/test_system_info.py | 334 -- numpy/distutils/tests/utilities.py | 90 - numpy/distutils/unixccompiler.py | 141 - numpy/f2py/_backends/__init__.py | 3 - numpy/f2py/_backends/_distutils.py | 76 - numpy/f2py/diagnose.py | 59 +- numpy/f2py/f2py2e.py | 37 +- numpy/f2py/tests/test_f2py2e.py | 32 - numpy/testing/_private/utils.py | 6 +- numpy/tests/test_public_api.py | 84 +- numpy/typing/tests/test_isfile.py | 2 - pytest.ini | 5 +- ruff.toml | 1 - tools/check_installed_files.py | 5 - tools/stubtest/allowlist_py311.txt | 1 - tools/stubtest/allowlist_py312.txt | 2 - tools/stubtest/mypy.ini | 1 - 177 files changed, 89 insertions(+), 22709 deletions(-) delete mode 100644 doc/DISTUTILS.rst delete mode 100644 doc/source/f2py/code/setup_example.py delete mode 100644 doc/source/reference/distutils.rst delete mode 100644 doc/source/reference/distutils/misc_util.rst delete mode 100644 doc/source/reference/distutils_guide.rst rename numpy/{distutils => _build_utils}/conv_template.py (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimd.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimddp.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimdfhm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_asimdhp.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_clx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_cnl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_icl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_knl.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_knm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_skx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512_spr.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512cd.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_avx512f.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_f16c.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_fma3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_fma4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_lsx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon_fp16.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_neon_vfpv4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_popcnt.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_rvv.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse41.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sse42.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_ssse3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_sve.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx3.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vsx4.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vx.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vxe.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_vxe2.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/cpu_xop.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512bw_mask.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512dq_mask.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_avx512f_reduce.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx3_half_double.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx4_mma.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/extra_vsx_asm.c (100%) rename numpy/{distutils => _core/src/_simd}/checks/test_flags.c (100%) delete mode 100644 numpy/distutils/__init__.py delete mode 100644 numpy/distutils/__init__.pyi delete mode 100644 numpy/distutils/_shell_utils.py delete mode 100644 numpy/distutils/armccompiler.py delete mode 100644 numpy/distutils/ccompiler.py delete mode 100644 numpy/distutils/ccompiler_opt.py delete mode 100644 numpy/distutils/command/__init__.py delete mode 100644 numpy/distutils/command/autodist.py delete mode 100644 numpy/distutils/command/bdist_rpm.py delete mode 100644 numpy/distutils/command/build.py delete mode 100644 numpy/distutils/command/build_clib.py delete mode 100644 numpy/distutils/command/build_ext.py delete mode 100644 numpy/distutils/command/build_py.py delete mode 100644 numpy/distutils/command/build_scripts.py delete mode 100644 numpy/distutils/command/build_src.py delete mode 100644 numpy/distutils/command/config.py delete mode 100644 numpy/distutils/command/config_compiler.py delete mode 100644 numpy/distutils/command/develop.py delete mode 100644 numpy/distutils/command/egg_info.py delete mode 100644 numpy/distutils/command/install.py delete mode 100644 numpy/distutils/command/install_clib.py delete mode 100644 numpy/distutils/command/install_data.py delete mode 100644 numpy/distutils/command/install_headers.py delete mode 100644 numpy/distutils/command/sdist.py delete mode 100644 numpy/distutils/core.py delete mode 100644 numpy/distutils/cpuinfo.py delete mode 100644 numpy/distutils/exec_command.py delete mode 100644 numpy/distutils/extension.py delete mode 100644 numpy/distutils/fcompiler/__init__.py delete mode 100644 numpy/distutils/fcompiler/absoft.py delete mode 100644 numpy/distutils/fcompiler/arm.py delete mode 100644 numpy/distutils/fcompiler/compaq.py delete mode 100644 numpy/distutils/fcompiler/environment.py delete mode 100644 numpy/distutils/fcompiler/fujitsu.py delete mode 100644 numpy/distutils/fcompiler/g95.py delete mode 100644 numpy/distutils/fcompiler/gnu.py delete mode 100644 numpy/distutils/fcompiler/hpux.py delete mode 100644 numpy/distutils/fcompiler/ibm.py delete mode 100644 numpy/distutils/fcompiler/intel.py delete mode 100644 numpy/distutils/fcompiler/lahey.py delete mode 100644 numpy/distutils/fcompiler/mips.py delete mode 100644 numpy/distutils/fcompiler/nag.py delete mode 100644 numpy/distutils/fcompiler/none.py delete mode 100644 numpy/distutils/fcompiler/nv.py delete mode 100644 numpy/distutils/fcompiler/pathf95.py delete mode 100644 numpy/distutils/fcompiler/pg.py delete mode 100644 numpy/distutils/fcompiler/sun.py delete mode 100644 numpy/distutils/fcompiler/vast.py delete mode 100644 numpy/distutils/from_template.py delete mode 100644 numpy/distutils/fujitsuccompiler.py delete mode 100644 numpy/distutils/intelccompiler.py delete mode 100644 numpy/distutils/lib2def.py delete mode 100644 numpy/distutils/line_endings.py delete mode 100644 numpy/distutils/log.py delete mode 100644 numpy/distutils/mingw/gfortran_vs2003_hack.c delete mode 100644 numpy/distutils/mingw32ccompiler.py delete mode 100644 numpy/distutils/misc_util.py delete mode 100644 numpy/distutils/msvc9compiler.py delete mode 100644 numpy/distutils/msvccompiler.py delete mode 100644 numpy/distutils/npy_pkg_config.py delete mode 100644 numpy/distutils/numpy_distribution.py delete mode 100644 numpy/distutils/pathccompiler.py delete mode 100644 numpy/distutils/system_info.py delete mode 100644 numpy/distutils/tests/__init__.py delete mode 100644 numpy/distutils/tests/test_build_ext.py delete mode 100644 numpy/distutils/tests/test_ccompiler_opt.py delete mode 100644 numpy/distutils/tests/test_ccompiler_opt_conf.py delete mode 100644 numpy/distutils/tests/test_exec_command.py delete mode 100644 numpy/distutils/tests/test_fcompiler.py delete mode 100644 numpy/distutils/tests/test_fcompiler_gnu.py delete mode 100644 numpy/distutils/tests/test_fcompiler_intel.py delete mode 100644 numpy/distutils/tests/test_fcompiler_nagfor.py delete mode 100644 numpy/distutils/tests/test_from_template.py delete mode 100644 numpy/distutils/tests/test_log.py delete mode 100644 numpy/distutils/tests/test_mingw32ccompiler.py delete mode 100644 numpy/distutils/tests/test_misc_util.py delete mode 100644 numpy/distutils/tests/test_npy_pkg_config.py delete mode 100644 numpy/distutils/tests/test_shell_utils.py delete mode 100644 numpy/distutils/tests/test_system_info.py delete mode 100644 numpy/distutils/tests/utilities.py delete mode 100644 numpy/distutils/unixccompiler.py delete mode 100644 numpy/f2py/_backends/_distutils.py diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 9d1c3ac20a45..a23f1f7649d8 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -368,7 +368,7 @@ jobs: - name: Check for unreachable code paths in Python modules run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches - bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + bash -c "! vulture . --min-confidence 100 --exclude doc/,vendored-meson/ | grep 'unreachable'" - name: Check usage of install_tag run: | rm -rf build-install diff --git a/doc/DISTUTILS.rst b/doc/DISTUTILS.rst deleted file mode 100644 index 142c15a7124a..000000000000 --- a/doc/DISTUTILS.rst +++ /dev/null @@ -1,622 +0,0 @@ -.. -*- rest -*- - -NumPy distutils - users guide -============================= - -.. contents:: - -SciPy structure -''''''''''''''' - -Currently SciPy project consists of two packages: - -- NumPy --- it provides packages like: - - + numpy.distutils - extension to Python distutils - + numpy.f2py - a tool to bind Fortran/C codes to Python - + numpy._core - future replacement of Numeric and numarray packages - + numpy.lib - extra utility functions - + numpy.testing - numpy-style tools for unit testing - + etc - -- SciPy --- a collection of scientific tools for Python. - -The aim of this document is to describe how to add new tools to SciPy. - - -Requirements for SciPy packages -''''''''''''''''''''''''''''''' - -SciPy consists of Python packages, called SciPy packages, that are -available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. -Any SciPy package may depend on NumPy packages but the dependence on other -SciPy packages should be kept minimal or zero. - -A SciPy package contains, in addition to its sources, the following -files and directories: - -+ ``setup.py`` --- building script -+ ``__init__.py`` --- package initializer -+ ``tests/`` --- directory of unittests - -Their contents are described below. - -The ``setup.py`` file -''''''''''''''''''''' - -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the -``Configuration`` class, described below. - -SciPy pure Python package example ---------------------------------- - -Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - - #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - #setup(**configuration(top_path='').todict()) - setup(configuration=configuration) - -The arguments of the ``configuration`` function specify the name of -parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, -along with the name of the current package, should be passed to the -``Configuration`` constructor. - -The ``Configuration`` constructor has a fourth optional argument, -``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. - -Remaining ``Configuration`` arguments are all keyword arguments that will -be used to initialize attributes of ``Configuration`` -instance. Usually, these keywords are the same as the ones that -``setup(..)`` function would expect, for example, ``packages``, -``ext_modules``, ``data_files``, ``include_dirs``, ``libraries``, -``headers``, ``scripts``, ``package_dir``, etc. However, the direct -specification of these keywords is not recommended as the content of -these keyword arguments will not be processed or checked for the -consistency of SciPy building system. - -Finally, ``Configuration`` has ``.todict()`` method that returns all -the configuration data as a dictionary suitable for passing on to the -``setup(..)`` function. - -``Configuration`` instance attributes -------------------------------------- - -In addition to attributes that can be specified via keyword arguments -to ``Configuration`` constructor, ``Configuration`` instance (let us -denote as ``config``) has the following attributes that can be useful -in writing setup scripts: - -+ ``config.name`` - full name of the current package. The names of parent - packages can be extracted as ``config.name.split('.')``. - -+ ``config.local_path`` - path to the location of current ``setup.py`` file. - -+ ``config.top_path`` - path to the location of main ``setup.py`` file. - -``Configuration`` instance methods ----------------------------------- - -+ ``config.todict()`` --- returns configuration dictionary suitable for - passing to ``numpy.distutils.core.setup(..)`` function. - -+ ``config.paths(*paths) --- applies ``glob.glob(..)`` to items of - ``paths`` if necessary. Fixes ``paths`` item that is relative to - ``config.local_path``. - -+ ``config.get_subpackage(subpackage_name,subpackage_path=None)`` --- - returns a list of subpackage configurations. Subpackage is looked in the - current directory under the name ``subpackage_name`` but the path - can be specified also via optional ``subpackage_path`` argument. - If ``subpackage_name`` is specified as ``None`` then the subpackage - name will be taken the basename of ``subpackage_path``. - Any ``*`` used for subpackage names are expanded as wildcards. - -+ ``config.add_subpackage(subpackage_name,subpackage_path=None)`` --- - add SciPy subpackage configuration to the current one. The meaning - and usage of arguments is explained above, see - ``config.get_subpackage()`` method. - -+ ``config.add_data_files(*files)`` --- prepend ``files`` to ``data_files`` - list. If ``files`` item is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data - files. By default data files are copied under package installation - directory. For example, - - :: - - config.add_data_files('foo.dat', - ('fun',['gun.dat','nun/pun.dat','/tmp/sun.dat']), - 'bar/car.dat'. - '/full/path/to/can.dat', - ) - - will install data files to the following locations - - :: - - / - foo.dat - fun/ - gun.dat - pun.dat - sun.dat - bar/ - car.dat - can.dat - - Path to data files can be a function taking no arguments and - returning path(s) to data files -- this is a useful when data files - are generated while building the package. (XXX: explain the step - when this function are called exactly) - -+ ``config.add_data_dir(data_path)`` --- add directory ``data_path`` - recursively to ``data_files``. The whole directory tree starting at - ``data_path`` will be copied under package installation directory. - If ``data_path`` is a tuple then its first element defines - the suffix of where data files are copied relative to package installation - directory and the second element specifies the path to data directory. - By default, data directory are copied under package installation - directory under the basename of ``data_path``. For example, - - :: - - config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat - config.add_data_dir(('sun','fun')) - config.add_data_dir(('gun','/full/path/to/fun')) - - will install data files to the following locations - - :: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - bar/ - car.dat - -+ ``config.add_include_dirs(*paths)`` --- prepend ``paths`` to - ``include_dirs`` list. This list will be visible to all extension - modules of the current package. - -+ ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under - ``/include/pythonX.X//`` - directory. If ``files`` item is a tuple then it's first argument - specifies the installation suffix relative to - ``/include/pythonX.X/`` path. This is a Python distutils - method; its use is discouraged for NumPy and SciPy in favour of - ``config.add_data_files(*files)``. - -+ ``config.add_scripts(*files)`` --- prepend ``files`` to ``scripts`` - list. Scripts will be installed under ``/bin/`` directory. - -+ ``config.add_extension(name,sources,**kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument - ``name`` defines the name of the extension module that will be - installed under ``config.name`` package. The second argument is - a list of sources. ``add_extension`` method takes also keyword - arguments that are passed on to the ``Extension`` constructor. - The list of allowed keywords is the following: ``include_dirs``, - ``define_macros``, ``undef_macros``, ``library_dirs``, ``libraries``, - ``runtime_library_dirs``, ``extra_objects``, ``extra_compile_args``, - ``extra_link_args``, ``export_symbols``, ``swig_opts``, ``depends``, - ``language``, ``f2py_options``, ``module_dirs``, ``extra_info``, - ``extra_f77_compile_args``, ``extra_f90_compile_args``. - - Note that ``config.paths`` method is applied to all lists that - may contain paths. ``extra_info`` is a dictionary or a list - of dictionaries that content will be appended to keyword arguments. - The list ``depends`` contains paths to files or directories - that the sources of the extension module depend on. If any path - in the ``depends`` list is newer than the extension module, then - the module will be rebuilt. - - The list of sources may contain functions ('source generators') - with a pattern ``def (ext, build_dir): return - ``. If ``funcname`` returns ``None``, no sources - are generated. And if the ``Extension`` instance has no sources - after processing all source generators, no extension module will - be built. This is the recommended way to conditionally define - extension modules. Source generator functions are called by the - ``build_src`` sub-command of ``numpy.distutils``. - - For example, here is a typical source generator function:: - - def generate_source(ext,build_dir): - import os - from distutils.dep_util import newer - target = os.path.join(build_dir,'somesource.c') - if newer(target,__file__): - # create target file - return target - - The first argument contains the Extension instance that can be - useful to access its attributes like ``depends``, ``sources``, - etc. lists and modify them during the building process. - The second argument gives a path to a build directory that must - be used when creating files to a disk. - -+ ``config.add_library(name, sources, **build_info)`` --- add a - library to ``libraries`` list. Allowed keywords arguments are - ``depends``, ``macros``, ``include_dirs``, ``extra_compiler_args``, - ``f2py_options``, ``extra_f77_compile_args``, - ``extra_f90_compile_args``. See ``.add_extension()`` method for - more information on arguments. - -+ ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled successfully). - -+ ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled successfully). - -+ ``config.get_version()`` --- return version string of the current package, - ``None`` if version information could not be detected. This methods - scans files ``__version__.py``, ``_version.py``, - ``version.py``, ``__svn_version__.py`` for string variables - ``version``, ``__version__``, ``_version``. - -+ ``config.make_svn_version_py()`` --- appends a data function to - ``data_files`` list that will generate ``__svn_version__.py`` file - to the current package directory. The file will be removed from - the source directory when Python exits. - -+ ``config.get_build_temp_dir()`` --- return a path to a temporary - directory. This is the place where one should build temporary - files. - -+ ``config.get_distribution()`` --- return distutils ``Distribution`` - instance. - -+ ``config.get_config_cmd()`` --- returns ``numpy.distutils`` config - command instance. - -+ ``config.get_info(*names)`` --- - - -.. _templating: - -Conversion of ``.src`` files using templates --------------------------------------------- - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. - -.. index:: - single: code generation - -Fortran files -------------- - -This template converter will replicate all **function** and -**subroutine** blocks in the file with names that contain '<...>' -according to the rules in '<...>'. The number of comma-separated words -in '<...>' determines the number of times the block is repeated. What -these words are indicates what that repeat rule, '<...>', should be -replaced with in each block. All of the repeat rules in a block must -contain the same number of comma-separated words indicating the number -of times that block should be repeated. If the word in the repeat rule -needs a comma, leftarrow, or rightarrow, then prepend it with a -backslash ' \'. If a word in the repeat rule matches ' \\' then -it will be replaced with the -th word in the same repeat -specification. There are two forms for the repeat rule: named and -short. - -Named repeat rule -^^^^^^^^^^^^^^^^^ - -A named repeat rule is useful when the same set of repeats must be -used several times in a block. It is specified using , where N is the number of times the block -should be repeated. On each repeat of the block, the entire -expression, '<...>' will be replaced first with item1, and then with -item2, and so forth until N repeats are accomplished. Once a named -repeat specification has been introduced, the same repeat rule may be -used **in the current block** by referring only to the name -(i.e. ). - - -Short repeat rule -^^^^^^^^^^^^^^^^^ - -A short repeat rule looks like . The -rule specifies that the entire expression, '<...>' should be replaced -first with item1, and then with item2, and so forth until N repeats -are accomplished. - - -Pre-defined names -^^^^^^^^^^^^^^^^^ - -The following predefined named repeat rules are available: - -- - -- <_c=s,d,c,z> - -- <_t=real, double precision, complex, double complex> - -- - -- - -- - -- - - -Other files ------------- - -Non-Fortran files use a separate syntax for defining template blocks -that should be repeated using a variable expansion similar to the -named repeat rules of the Fortran-specific repeats. - -NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written -in a custom templating language to generate C code. The ``@`` symbol is -used to wrap macro-style variables to empower a string substitution mechanism -that might describe (for instance) a set of data types. - -The template language blocks are delimited by ``/**begin repeat`` -and ``/**end repeat**/`` lines, which may also be nested using -consecutively numbered delimiting lines such as ``/**begin repeat1`` -and ``/**end repeat1**/``: - -1. ``/**begin repeat`` on a line by itself marks the beginning of - a segment that should be repeated. - -2. Named variable expansions are defined using ``#name=item1, item2, item3, - ..., itemN#`` and placed on successive lines. These variables are - replaced in each repeat block with corresponding word. All named - variables in the same repeat block must define the same number of - words. - -3. In specifying the repeat rule for a named variable, ``item*N`` is short- - hand for ``item, item, ..., item`` repeated N times. In addition, - parenthesis in combination with ``*N`` can be used for grouping several - items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is - equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1, - item2#``. - -4. ``*/`` on a line by itself marks the end of the variable expansion - naming. The next line is the first line that will be repeated using - the named rules. - -5. Inside the block to be repeated, the variables that should be expanded - are specified as ``@name@``. - -6. ``/**end repeat**/`` on a line by itself marks the previous line - as the last line of the block to be repeated. - -7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted - for string substitution, which is preprocessed to a number of otherwise - identical loops with several strings such as ``INT``, ``LONG``, ``UINT``, - ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and - maintenance burden by mimicking languages that have generic type support. - -The above rules may be clearer in the following template source example: - -.. code-block:: NumPyC - :linenos: - :emphasize-lines: 3, 13, 29, 31 - - /* TIMEDELTA to non-float types */ - - /**begin repeat - * - * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, - * LONGLONG, ULONGLONG, DATETIME, - * TIMEDELTA# - * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, - * npy_long, npy_ulong, npy_longlong, npy_ulonglong, - * npy_datetime, npy_timedelta# - */ - - /**begin repeat1 - * - * #FROMTYPE = TIMEDELTA# - * #fromtype = npy_timedelta# - */ - static void - @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, - void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) - { - const @fromtype@ *ip = input; - @totype@ *op = output; - - while (n--) { - *op++ = (@totype@)*ip++; - } - } - /**end repeat1**/ - - /**end repeat**/ - -The preprocessing of generically-typed C source files (whether in NumPy -proper or in any third party package using NumPy Distutils) is performed -by `conv_template.py`_. -The type-specific C files generated (extension: ``.c``) -by these modules during the build process are ready to be compiled. This -form of generic typing is also supported for C header files (preprocessed -to produce ``.h`` files). - -.. _conv_template.py: https://github.com/numpy/numpy/blob/main/numpy/distutils/conv_template.py - -Useful functions in ``numpy.distutils.misc_util`` -------------------------------------------------- - -+ ``get_numpy_include_dirs()`` --- return a list of NumPy base - include directories. NumPy base include directories contain - header files such as ``numpy/arrayobject.h``, ``numpy/funcobject.h`` - etc. For installed NumPy the returned list has length 1 - but when building NumPy the list may contain more directories, - for example, a path to ``config.h`` file that - ``numpy/base/setup.py`` file generates and is used by ``numpy`` - header files. - -+ ``append_path(prefix,path)`` --- smart append ``path`` to ``prefix``. - -+ ``gpaths(paths, local_path='')`` --- apply glob to paths and prepend - ``local_path`` if needed. - -+ ``njoin(*path)`` --- join pathname components + convert ``/``-separated path - to ``os.sep``-separated path and resolve ``..``, ``.`` from paths. - Ex. ``njoin('a',['b','./c'],'..','g') -> os.path.join('a','b','g')``. - -+ ``minrelpath(path)`` --- resolves dots in ``path``. - -+ ``rel_path(path, parent_path)`` --- return ``path`` relative to ``parent_path``. - -+ ``def get_cmd(cmdname,_cache={})`` --- returns ``numpy.distutils`` - command instance. - -+ ``all_strings(lst)`` - -+ ``has_f_sources(sources)`` - -+ ``has_cxx_sources(sources)`` - -+ ``filter_sources(sources)`` --- return ``c_sources, cxx_sources, - f_sources, fmodule_sources`` - -+ ``get_dependencies(sources)`` - -+ ``is_local_src_dir(directory)`` - -+ ``get_ext_source_files(ext)`` - -+ ``get_script_files(scripts)`` - -+ ``get_lib_source_files(lib)`` - -+ ``get_data_files(data)`` - -+ ``dot_join(*args)`` --- join non-zero arguments with a dot. - -+ ``get_frame(level=0)`` --- return frame object from call stack with given level. - -+ ``cyg2win32(path)`` - -+ ``mingw32()`` --- return ``True`` when using mingw32 environment. - -+ ``terminal_has_colors()``, ``red_text(s)``, ``green_text(s)``, - ``yellow_text(s)``, ``blue_text(s)``, ``cyan_text(s)`` - -+ ``get_path(mod_name,parent_path=None)`` --- return path of a module - relative to parent_path when given. Handles also ``__main__`` and - ``__builtin__`` modules. - -+ ``allpath(name)`` --- replaces ``/`` with ``os.sep`` in ``name``. - -+ ``cxx_ext_match``, ``fortran_ext_match``, ``f90_ext_match``, - ``f90_module_name_match`` - -``numpy.distutils.system_info`` module --------------------------------------- - -+ ``get_info(name,notfound_action=0)`` -+ ``combine_paths(*args,**kws)`` -+ ``show_all()`` - -``numpy.distutils.cpuinfo`` module ----------------------------------- - -+ ``cpuinfo`` - -``numpy.distutils.log`` module ------------------------------- - -+ ``set_verbosity(v)`` - - -``numpy.distutils.exec_command`` module ---------------------------------------- - -+ ``get_pythonexe()`` -+ ``find_executable(exe, path=None)`` -+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )`` - -The ``__init__.py`` file -'''''''''''''''''''''''' - -The header of a typical SciPy ``__init__.py`` is:: - - """ - Package docstring, typically with a brief description and function listing. - """ - - # import functions into module namespace - from .subpackage import * - ... - - __all__ = [s for s in dir() if not s.startswith('_')] - - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - -Extra features in NumPy Distutils -''''''''''''''''''''''''''''''''' - -Specifying config_fc options for libraries in setup.py script -------------------------------------------------------------- - -It is possible to specify config_fc options in setup.py scripts. -For example, using:: - - config.add_library('library', - sources=[...], - config_fc={'noopt':(__file__,1)}) - -will compile the ``library`` sources without optimization flags. - -It's recommended to specify only those config_fc options in such a way -that are compiler independent. - -Getting extra Fortran 77 compiler options from source ------------------------------------------------------ - -Some old Fortran codes need special compiler options in order to -work correctly. In order to specify compiler options per source -file, ``numpy.distutils`` Fortran compiler looks for the following -pattern:: - - CF77FLAGS() = - -in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). - -TODO: This feature can be easily extended for Fortran 90 codes as -well. Let us know if you would need such a feature. diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 93887c4b12ff..1d5722700a79 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,10 +36,10 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other - relevant libraries for scientific computing) + - numpy.distutils (deprecated and removed, build support for C++, Fortran, + BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - - testing utilities + - testing utilities (mostly deprecated, pytest does a good job) - **Speed**: we take performance concerns seriously and aim to execute operations on large arrays with similar performance as native C diff --git a/doc/source/building/distutils_equivalents.rst b/doc/source/building/distutils_equivalents.rst index 156174d02358..65821bfec9d9 100644 --- a/doc/source/building/distutils_equivalents.rst +++ b/doc/source/building/distutils_equivalents.rst @@ -3,7 +3,7 @@ Meson and ``distutils`` ways of doing things -------------------------------------------- -*Old workflows (numpy.distutils based):* +*Old workflows (numpy.distutils based, no longer relevant):* 1. ``python runtests.py`` 2. ``python setup.py build_ext -i`` + ``export diff --git a/doc/source/conf.py b/doc/source/conf.py index af431db44351..e26719b05cb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -154,21 +154,13 @@ class PyTypeObject(ctypes.Structure): nitpick_ignore = [] if sys.version_info[:2] >= (3, 12): - exclude_patterns += [ - "reference/distutils.rst", - "reference/distutils/misc_util.rst", - ] suppress_warnings += [ 'toc.excluded', # Suppress warnings about excluded toctree entries ] nitpicky = True nitpick_ignore += [ - ('ref', 'numpy-distutils-refguide'), - # The first ignore is not catpured without nitpicky = True. - # These three ignores are required once nitpicky = True is set. - ('py:mod', 'numpy.distutils'), + # The first ignore is not captured without nitpicky = True. ('py:class', 'Extension'), - ('py:class', 'numpy.distutils.misc_util.Configuration'), ] # If true, '()' will be appended to :func: etc. cross-reference text. diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index 7583dc9af84a..fcdae806fad3 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -142,8 +142,7 @@ recommend all packages depending on NumPy to follow the recommendations in NEP 29. For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``numpy.distutils`` or -``setuptools`` to build). +``install_requires`` in ``setup.py`` (assuming you use ``setuptools`` to build). Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst index 87e17a811cd0..98416c8cdef9 100644 --- a/doc/source/f2py/buildtools/distutils.rst +++ b/doc/source/f2py/buildtools/distutils.rst @@ -9,76 +9,3 @@ Using via `numpy.distutils` ``distutils`` has been removed in favor of ``meson`` see :ref:`distutils-status-migration`. - -.. currentmodule:: numpy.distutils.core - -:mod:`numpy.distutils` is part of NumPy, and extends the standard Python -``distutils`` module to deal with Fortran sources and F2PY signature files, e.g. -compile Fortran sources, call F2PY to construct extension modules, etc. - -.. topic:: Example - - Consider the following ``setup_file.py`` for the ``fib`` and ``scalar`` - examples from :ref:`f2py-getting-started` section: - - .. literalinclude:: ./../code/setup_example.py - :language: python - - Running - - .. code-block:: bash - - python setup_example.py build - - will build two extension modules ``scalar`` and ``fib2`` to the - build directory. - -Extensions to ``distutils`` -=========================== - -:mod:`numpy.distutils` extends ``distutils`` with the following features: - -* :class:`Extension` class argument ``sources`` may contain Fortran source - files. In addition, the list ``sources`` may contain at most one - F2PY signature file, and in this case, the name of an Extension module must - match with the ```` used in signature file. It is - assumed that an F2PY signature file contains exactly one ``python - module`` block. - - If ``sources`` do not contain a signature file, then F2PY is used to scan - Fortran source files to construct wrappers to the Fortran codes. - - Additional options to the F2PY executable can be given using the - :class:`Extension` class argument ``f2py_options``. - -* The following new ``distutils`` commands are defined: - - ``build_src`` - to construct Fortran wrapper extension modules, among many other things. - ``config_fc`` - to change Fortran compiler options. - - Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced - to support Fortran sources. - - Run - - .. code-block:: bash - - python config_fc build_src build_ext --help - - to see available options for these commands. - -* When building Python packages containing Fortran sources, one - can choose different Fortran compilers by using the ``build_ext`` - command option ``--fcompiler=``. Here ```` can be one of the - following names (on ``linux`` systems):: - - absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast - - See ``numpy_distutils/fcompiler.py`` for an up-to-date list of - supported compilers for different platforms, or run - - .. code-block:: bash - - python -m numpy.f2py -c --backend distutils --help-fcompiler diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 37782e5ca74b..d5f3876e6dd5 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2023**. Like the rest of + which was removed in ``Python 3.12.0`` in **October 2025**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py deleted file mode 100644 index ef79ad1ecfb6..000000000000 --- a/doc/source/f2py/code/setup_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from numpy.distutils.core import Extension - -ext1 = Extension(name='scalar', - sources=['scalar.f']) -ext2 = Extension(name='fib2', - sources=['fib2.pyf', 'fib1.f']) - -if __name__ == "__main__": - from numpy.distutils.core import setup - setup(name='f2py_example', - description="F2PY Users Guide examples", - author="Pearu Peterson", - author_email="pearu@cens.ioc.ee", - ext_modules=[ext1, ext2] - ) -# End of setup_example.py diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index e5746c49e94d..b6951b11da8d 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -22,15 +22,12 @@ following steps: * F2PY compiles all sources and builds an extension module containing the wrappers. - * In building the extension modules, F2PY uses ``meson`` and used to use - ``numpy.distutils`` For different build systems, see :ref:`f2py-bldsys`. + * In building the extension modules, F2PY uses ``meson``. For different + build systems, see :ref:`f2py-bldsys`. .. note:: - See :ref:`f2py-meson-distutils` for migration information. - - * Depending on your operating system, you may need to install the Python development headers (which provide the file ``Python.h``) separately. In Linux Debian-based distributions this package should be called ``python3-dev``, @@ -224,7 +221,7 @@ Fortran code, we can apply the wrapping steps one by one. .. literalinclude:: ./code/fib2.pyf :language: fortran -* Finally, we build the extension module with ``numpy.distutils`` by running: +* Finally, we build the extension module by running: :: diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index efcf2bec5266..ec936bb72e1c 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -101,10 +101,6 @@ Here ```` may also contain signature files. Among other options and ``;`` on Windows. In ``CMake`` this corresponds to using ``$``. -``--help-link []`` - List system resources found by ``numpy_distutils/system_info.py``. For - example, try ``f2py --help-link lapack_opt``. - 3. Building a module ~~~~~~~~~~~~~~~~~~~~ @@ -127,7 +123,7 @@ module is constructed by scanning all Fortran source codes for routine signatures, before proceeding to build the extension module. .. warning:: - From Python 3.12 onwards, ``distutils`` has been removed. Use environment + ``distutils`` has been removed. Use environment variables or native files to interact with ``meson`` instead. See its `FAQ `__ for more information. @@ -135,17 +131,13 @@ Among other options (see below) and options described for previous modes, the fo .. note:: - .. versionchanged:: 1.26.0 - There are now two separate build backends which can be used, ``distutils`` - and ``meson``. Users are **strongly** recommended to switch to ``meson`` - since it is the default above Python ``3.12``. + .. versionchanged:: 2.5.0 + The ``distutils`` backend has been removed. Common build flags: ``--backend `` - Specify the build backend for the compilation process. The supported backends - are ``meson`` and ``distutils``. If not specified, defaults to ``distutils``. - On Python 3.12 or higher, the default is ``meson``. + Legacy option, only ``meson`` is supported. ``--f77flags=`` Specify F77 compiler flags ``--f90flags=`` @@ -165,39 +157,13 @@ Common build flags: Add directory ```` to the list of directories to be searched for ``-l``. -The ``meson`` specific flags are: - -``--dep `` **meson only** +``--dep `` Specify a meson dependency for the module. This may be passed multiple times for multiple dependencies. Dependencies are stored in a list for further processing. Example: ``--dep lapack --dep scalapack`` This will identify "lapack" and "scalapack" as dependencies and remove them from argv, leaving a dependencies list containing ["lapack", "scalapack"]. -The older ``distutils`` flags are: - -``--help-fcompiler`` **no meson** - List the available Fortran compilers. -``--fcompiler=`` **no meson** - Specify a Fortran compiler type by vendor. -``--f77exec=`` **no meson** - Specify the path to a F77 compiler -``--f90exec=`` **no meson** - Specify the path to a F90 compiler -``--opt=`` **no meson** - Specify optimization flags -``--arch=`` **no meson** - Specify architecture specific optimization flags -``--noopt`` **no meson** - Compile without optimization flags -``--noarch`` **no meson** - Compile without arch-dependent optimization flags -``link-`` **no meson** - Link the extension module with as defined by - ``numpy_distutils/system_info.py``. E.g. to link with optimized LAPACK - libraries (vecLib on MacOSX, ATLAS elsewhere), use ``--link-lapack_opt``. - See also ``--help-link`` switch. - .. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file @@ -295,39 +261,6 @@ When using ``numpy.f2py`` as a module, the following functions can be invoked. .. automodule:: numpy.f2py :members: -Automatic extension module generation -===================================== - -If you want to distribute your f2py extension module, then you only -need to include the .pyf file and the Fortran code. The distutils -extensions in NumPy allow you to define an extension module entirely -in terms of this interface file. A valid ``setup.py`` file allowing -distribution of the ``add.f`` module (as part of the package -``f2py_examples`` so that it would be loaded as ``f2py_examples.add``) is: - -.. code-block:: python - - def configuration(parent_package='', top_path=None) - from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_examples',parent_package, top_path) - config.add_extension('add', sources=['add.pyf','add.f']) - return config - - if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - -Installation of the new package is easy using:: - - pip install . - -assuming you have the proper permissions to write to the main site- -packages directory for the version of Python you are using. For the -resulting package to work, you need to create a file named ``__init__.py`` -(in the same directory as ``add.pyf``). Notice the extension module is -defined entirely in terms of the ``add.pyf`` and ``add.f`` files. The -conversion of the .pyf file to a .c file is handled by `numpy.distutils`. - Building with Meson (Examples) ============================== diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index ea0af7505ce7..aa7851da5dd2 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -71,12 +71,6 @@ Cygwin (FOSS) Cygwin is meant to compile UNIX software on Windows, instead of building native Windows programs. This means cross compilation is required. -The compilation suites described so far are compatible with the `now -deprecated`_ ``np.distutils`` build backend which is exposed by the F2PY CLI. -Additional build system usage (``meson``, ``cmake``) as described in -:ref:`f2py-bldsys` allows for a more flexible set of compiler -backends including: - Intel oneAPI The newer Intel compilers (``ifx``, ``icx``) are based on LLVM and can be used for native compilation. Licensing requirements can be onerous. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index cc46ba744a49..b2e3af4c0944 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -317,20 +317,6 @@ The generic steps to take are: machine. Otherwise you pick up a static library built for the wrong architecture. -When you build with ``numpy.distutils`` (deprecated), then use this in your ``setup.py``: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> from numpy.distutils.misc_util import get_info - >>> info = get_info('npymath') - >>> _ = config.add_extension('foo', sources=['foo.c'], extra_info=info) - -In other words, the usage of ``info`` is exactly the same as when using -``blas_info`` and co. - When you are building with `Meson `__, use:: # Note that this will get easier in the future, when Meson has diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst deleted file mode 100644 index 714c8836322e..000000000000 --- a/doc/source/reference/distutils.rst +++ /dev/null @@ -1,219 +0,0 @@ -.. _numpy-distutils-refguide: - -********* -Packaging -********* - -.. module:: numpy.distutils - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - -.. warning:: - - Note that ``setuptools`` does major releases often and those may contain - changes that break :mod:`numpy.distutils`, which will *not* be updated anymore - for new ``setuptools`` versions. It is therefore recommended to set an - upper version bound in your build configuration for the last known version - of ``setuptools`` that works with your build. - -NumPy provides enhanced distutils functionality to make it easier to -build and install sub-packages, auto-generate code, and extension -modules that use Fortran-compiled libraries. A useful :class:`Configuration -` class is also provided in -:mod:`numpy.distutils.misc_util` that can make it easier to construct -keyword arguments to pass to the setup function (by passing the -dictionary obtained from the todict() method of the class). More -information is available in the :ref:`distutils-user-guide`. - -The choice and location of linked libraries such as BLAS and LAPACK as well as -include paths and other such build options can be specified in a ``site.cfg`` -file located in the NumPy root repository or a ``.numpy-site.cfg`` file in your -home directory. See the ``site.cfg.example`` example file included in the NumPy -repository or sdist for documentation. - -.. index:: - single: distutils - - -Modules in :mod:`numpy.distutils` -================================= -.. toctree:: - :maxdepth: 2 - - distutils/misc_util - - -.. currentmodule:: numpy.distutils - -.. autosummary:: - :toctree: generated/ - - ccompiler - ccompiler_opt - cpuinfo.cpu - core.Extension - exec_command - log.set_verbosity - system_info.get_info - system_info.get_standard_file - - -Configuration class -=================== - -.. currentmodule:: numpy.distutils.misc_util - -.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs) - - Construct a configuration instance for the given package name. If - *parent_name* is not None, then construct the package as a - sub-package of the *parent_name* package. If *top_path* and - *package_path* are None then they are assumed equal to - the path of the file this instance was created in. The setup.py - files in the numpy distribution are good examples of how to use - the :class:`Configuration` instance. - - .. automethod:: todict - - .. automethod:: get_distribution - - .. automethod:: get_subpackage - - .. automethod:: add_subpackage - - .. automethod:: add_data_files - - .. automethod:: add_data_dir - - .. automethod:: add_include_dirs - - .. automethod:: add_headers - - .. automethod:: add_extension - - .. automethod:: add_library - - .. automethod:: add_scripts - - .. automethod:: add_installed_library - - .. automethod:: add_npy_pkg_config - - .. automethod:: paths - - .. automethod:: get_config_cmd - - .. automethod:: get_build_temp_dir - - .. automethod:: have_f77c - - .. automethod:: have_f90c - - .. automethod:: get_version - - .. automethod:: make_svn_version_py - - .. automethod:: make_config_py - - .. automethod:: get_info - -Building installable C libraries -================================ - -Conventional C libraries (installed through `add_library`) are not installed, and -are just used during the build (they are statically linked). An installable C -library is a pure C library, which does not depend on the python C runtime, and -is installed such that it may be used by third-party packages. To build and -install the C library, you just use the method `add_installed_library` instead of -`add_library`, which takes the same arguments except for an additional -``install_dir`` argument:: - - .. hidden in a comment so as to be included in refguide but not rendered documentation - >>> import numpy.distutils.misc_util - >>> config = np.distutils.misc_util.Configuration(None, '', '.') - >>> with open('foo.c', 'w') as f: pass - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - -npy-pkg-config files --------------------- - -To make the necessary build options available to third parties, you could use -the `npy-pkg-config` mechanism implemented in `numpy.distutils`. This mechanism is -based on a .ini file which contains all the options. A .ini file is very -similar to .pc files as used by the pkg-config unix utility:: - - [meta] - Name: foo - Version: 1.0 - Description: foo library - - [variables] - prefix = /home/user/local - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -Generally, the file needs to be generated during the build, since it needs some -information known at build time only (e.g. prefix). This is mostly automatic if -one uses the `Configuration` method `add_npy_pkg_config`. Assuming we have a -template file foo.ini.in as follows:: - - [meta] - Name: foo - Version: @version@ - Description: foo library - - [variables] - prefix = @prefix@ - libdir = ${prefix}/lib - includedir = ${prefix}/include - - [default] - cflags = -I${includedir} - libs = -L${libdir} -lfoo - -and the following code in setup.py:: - - >>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib') - >>> subst = {'version': '1.0'} - >>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst) - -This will install the file foo.ini into the directory package_dir/lib, and the -foo.ini file will be generated from foo.ini.in, where each ``@version@`` will be -replaced by ``subst_dict['version']``. The dictionary has an additional prefix -substitution rule automatically added, which contains the install prefix (since -this is not easy to get from setup.py). - -Reusing a C library from another package ----------------------------------------- - -Info are easily retrieved from the `get_info` function in -`numpy.distutils.misc_util`:: - - >>> info = np.distutils.misc_util.get_info('npymath') - >>> config.add_extension('foo', sources=['foo.c'], extra_info=info) - - - -An additional list of paths to look for .ini files can be given to `get_info`. - -Conversion of ``.src`` files -============================ - -NumPy distutils supports automatic conversion of source files named -.src. This facility can be used to maintain very similar -code blocks requiring only simple changes between blocks. During the -build phase of setup, if a template file named .src is -encountered, a new file named is constructed from the -template and placed in the build directory to be used instead. Two -forms of template conversion are supported. The first form occurs for -files named .ext.src where ext is a recognized Fortran -extension (f, f90, f95, f77, for, ftn, pyf). The second form is used -for all other cases. See :ref:`templating`. diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst deleted file mode 100644 index bbb83a5ab061..000000000000 --- a/doc/source/reference/distutils/misc_util.rst +++ /dev/null @@ -1,7 +0,0 @@ -distutils.misc_util -=================== - -.. automodule:: numpy.distutils.misc_util - :members: - :undoc-members: - :exclude-members: Configuration diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst deleted file mode 100644 index 0a815797ac30..000000000000 --- a/doc/source/reference/distutils_guide.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _distutils-user-guide: - -``numpy.distutils`` user guide -============================== - -.. warning:: - - ``numpy.distutils`` is deprecated, and will be removed for - Python >= 3.12. For more details, see :ref:`distutils-status-migration` - - -.. include:: ../../DISTUTILS.rst - :start-line: 6 diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 366b0e67f06a..2fea390a7a8b 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,16 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been deprecated in NumPy ``1.23.0``. It will be removed -for Python 3.12; for Python <= 3.11 it will not be removed until 2 years after -the Python 3.12 release (Oct 2025). - - -.. warning:: - - ``numpy.distutils`` is only tested with ``setuptools < 60.0``, newer - versions may break. See :ref:`numpy-setuptools-interaction` for details. - +`numpy.distutils` has been removed in NumPy ``1.25.0``. Migration advice ---------------- @@ -47,7 +38,7 @@ migrating. For more details about the SciPy migration, see: - `RFC: switch to Meson as a build system `__ - `Tracking issue for Meson support `__ -NumPy will migrate to Meson for the 1.26 release. +NumPy migrated to Meson for the 1.26 release. Moving to CMake / scikit-build @@ -73,15 +64,12 @@ present in ``setuptools``: - Support for a few other scientific libraries, like FFTW and UMFPACK - Better MinGW support - Per-compiler build flag customization (e.g. `-O3` and `SSE2` flags are default) -- a simple user build config system, see `site.cfg.example `__ +- a simple user build config system, see `site.cfg.example `__ - SIMD intrinsics support - Support for the NumPy-specific ``.src`` templating format for ``.c``/``.h`` files -The most widely used feature is nested ``setup.py`` files. This feature may -perhaps still be ported to ``setuptools`` in the future (it needs a volunteer -though, see `gh-18588 `__ for -status). Projects only using that feature could move to ``setuptools`` after -that is done. In case a project uses only a couple of ``setup.py`` files, it +The most widely used feature is nested ``setup.py`` files. In case a project +uses only a couple of ``setup.py`` files, it also could make sense to simply aggregate all the content of those files into a single ``setup.py`` file and then move to ``setuptools``. This involves dropping all ``Configuration`` instances, and using ``Extension`` instead. @@ -103,22 +91,10 @@ For more details, see the .. _numpy-setuptools-interaction: -Interaction of ``numpy.distutils`` with ``setuptools`` +Versioning ``setuptools`` ------------------------------------------------------ -It is recommended to use ``setuptools < 60.0``. Newer versions may work, but -are not guaranteed to. The reason for this is that ``setuptools`` 60.0 enabled -a vendored copy of ``distutils``, including backwards incompatible changes that -affect some functionality in ``numpy.distutils``. - -If you are using only simple Cython or C extensions with minimal use of -``numpy.distutils`` functionality beyond nested ``setup.py`` files (its most -popular feature, see :class:`Configuration `), -then latest ``setuptools`` is likely to continue working. In case of problems, -you can also try ``SETUPTOOLS_USE_DISTUTILS=stdlib`` to avoid the backwards -incompatible changes in ``setuptools``. - -Whatever you do, it is recommended to put an upper bound on your ``setuptools`` +It is recommended to put an upper bound on your ``setuptools`` build requirement in ``pyproject.toml`` to avoid future breakage - see :ref:`for-downstream-package-authors`. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index aa6c692d6b2b..2a7ac83a96ca 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -40,7 +40,6 @@ Python API :maxdepth: 1 typing - distutils C API ===== @@ -63,7 +62,6 @@ Other topics security testing distutils_status_migration - distutils_guide swig diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 98e3dda54e7b..5c6d8139b055 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -44,7 +44,6 @@ Prefer not to use these namespaces for new code. There are better alternatives and/or this code is deprecated or isn't reliable. - :ref:`numpy.char ` - legacy string functionality, only for fixed-width strings -- :ref:`numpy.distutils ` (deprecated) - build system support - :ref:`numpy.f2py ` - Fortran binding generation (usually used from the command line only) - :ref:`numpy.ma ` - masked arrays (not very reliable, needs an overhaul) - :ref:`numpy.matlib ` (pending deprecation) - functions supporting ``matrix`` instances @@ -70,7 +69,6 @@ and/or this code is deprecated or isn't reliable. numpy.rec numpy.version numpy.char - numpy.distutils numpy.f2py <../f2py/index> numpy.ma numpy.matlib diff --git a/doc/source/try_examples.json b/doc/source/try_examples.json index 305860dced55..823d4a5d1e82 100644 --- a/doc/source/try_examples.json +++ b/doc/source/try_examples.json @@ -1,7 +1,6 @@ { "global_min_height": "400px", "ignore_patterns": [ - "distutils.html*", "reference\/typing.html*", "numpy.__array_namespace_info__.html*" ] diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 5f620fa36cef..6909db8cb7e2 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -1348,7 +1348,7 @@ For example:: With a single iterable argument, return its biggest item. The default keyword-only argument specifies an object to return if the provided iterable is empty. - With two or more arguments, return the largest argument. + With two or more ...arguments, return the largest argument. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index c699760fdebd..20d3f1bb5937 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -178,13 +178,7 @@ write in a ``setup.py`` file: Adding the NumPy include directory is, of course, only necessary if you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). The distutils extensions in NumPy -also include support for automatically producing the extension-module -and linking it from a ``.pyx`` file. It works so that if the user does -not have Cython installed, then it looks for a file with the same -file-name but a ``.c`` extension which it then uses instead of trying -to produce the ``.c`` file again. - +assume you are using Cython for). If you just use Cython to compile a standard Python module, then you will get a C extension module that typically runs a bit faster than the equivalent Python module. Further speed increases can be gained by using diff --git a/meson_cpu/arm/meson.build b/meson_cpu/arm/meson.build index 5478e52cdcea..92d241883795 100644 --- a/meson_cpu/arm/meson.build +++ b/meson_cpu/arm/meson.build @@ -2,21 +2,21 @@ source_root = meson.project_source_root() mod_features = import('features') NEON = mod_features.new( 'NEON', 1, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon.c')[0] ) NEON_FP16 = mod_features.new( 'NEON_FP16', 2, implies: NEON, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_fp16.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_fp16.c')[0] ) # FMA NEON_VFPV4 = mod_features.new( 'NEON_VFPV4', 3, implies: NEON_FP16, - test_code: files(source_root + '/numpy/distutils/checks/cpu_neon_vfpv4.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c')[0] ) # Advanced SIMD ASIMD = mod_features.new( 'ASIMD', 4, implies: NEON_VFPV4, detect: {'val': 'ASIMD', 'match': 'NEON.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimd.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimd.c')[0] ) cpu_family = host_machine.cpu_family() if cpu_family == 'aarch64' @@ -37,25 +37,25 @@ endif ASIMDHP = mod_features.new( 'ASIMDHP', 5, implies: ASIMD, args: {'val': '-march=armv8.2-a+fp16', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdhp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdhp.c')[0] ) ## ARMv8.2 dot product ASIMDDP = mod_features.new( 'ASIMDDP', 6, implies: ASIMD, args: {'val': '-march=armv8.2-a+dotprod', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimddp.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimddp.c')[0] ) ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = mod_features.new( 'ASIMDFHM', 7, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+fp16fml', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_asimdfhm.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_asimdfhm.c')[0] ) ## Scalable Vector Extensions (SVE) SVE = mod_features.new( 'SVE', 8, implies: ASIMDHP, args: {'val': '-march=armv8.2-a+sve', 'match': '-march=.*', 'mfilter': '\+.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_sve.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_sve.c')[0] ) # TODO: Add support for MSVC ARM_FEATURES = { diff --git a/meson_cpu/loongarch64/meson.build b/meson_cpu/loongarch64/meson.build index 570e3bfcda01..d59b5682d646 100644 --- a/meson_cpu/loongarch64/meson.build +++ b/meson_cpu/loongarch64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') LSX = mod_features.new( 'LSX', 1, args: ['-mlsx'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_lsx.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_lsx.c')[0] ) LOONGARCH64_FEATURES = {'LSX': LSX} diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index 57fe47140429..8f2e8373c77c 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -4,9 +4,9 @@ compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( 'VSX', 1, args: '-mvsx', - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { - 'VSX_ASM': files(source_root + '/numpy/distutils/checks/extra_vsx_asm.c')[0] + 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) if compiler_id == 'clang' @@ -15,7 +15,7 @@ endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx2.c')[0], ) # VSX2 is hardware baseline feature on ppc64le since the first little-endian # support was part of Power8 @@ -25,17 +25,17 @@ endif VSX3 = mod_features.new( 'VSX3', 3, implies: VSX2, args: {'val': '-mcpu=power9', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX3', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx3.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx3.c')[0], extra_tests: { - 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/distutils/checks/extra_vsx3_half_double.c')[0] + 'VSX3_HALF_DOUBLE': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c')[0] } ) VSX4 = mod_features.new( 'VSX4', 4, implies: VSX3, args: {'val': '-mcpu=power10', 'match': '.*(?:mcpu=|vsx).*'}, detect: {'val': 'VSX4', 'match': 'VSX.*'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vsx4.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx4.c')[0], extra_tests: { - 'VSX4_MMA': files(source_root + '/numpy/distutils/checks/extra_vsx4_mma.c')[0] + 'VSX4_MMA': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx4_mma.c')[0] } ) PPC64_FEATURES = {'VSX': VSX, 'VSX2': VSX2, 'VSX3': VSX3, 'VSX4': VSX4} diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build index 3f930f39e27e..fdab67d246d6 100644 --- a/meson_cpu/riscv64/meson.build +++ b/meson_cpu/riscv64/meson.build @@ -3,6 +3,6 @@ mod_features = import('features') RVV = mod_features.new( 'RVV', 1, args: ['-march=rv64gcv'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_rvv.c')[0], ) RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/s390x/meson.build b/meson_cpu/s390x/meson.build index b7a420c27f0d..282ec056e78e 100644 --- a/meson_cpu/s390x/meson.build +++ b/meson_cpu/s390x/meson.build @@ -3,16 +3,16 @@ mod_features = import('features') VX = mod_features.new( 'VX', 1, args: ['-mzvector', '-march=arch11'], - test_code: files(source_root + '/numpy/distutils/checks/cpu_vx.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vx.c')[0], ) VXE = mod_features.new( 'VXE', 2, implies: VX, args: {'val': '-march=arch12', 'match': '-march=.*'}, detect: {'val': 'VXE', 'match': '\\bvxe\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe.c')[0], ) VXE2 = mod_features.new( 'VXE2', 3, implies: VXE, args: {'val': '-march=arch13', 'match': '-march=.*'}, detect: {'val': 'VXE2', 'match': '\\bvxe2\\b'}, - test_code: files(source_root + '/numpy/distutils/checks/cpu_vxe2.c')[0], + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vxe2.c')[0], ) S390X_FEATURES = {'VX': VX, 'VXE': VXE, 'VXE2': VXE2} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index add073376d98..412803e5ddbb 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -43,14 +43,14 @@ AVX512_ICL = mod_features.new( group: ['AVX512VBMI', 'AVX512VBMI2', 'AVX512VNNI', 'AVX512BITALG', 'AVX512VPOPCNTDQ', 'AVX512IFMA', 'VAES', 'GFNI', 'VPCLMULQDQ'], detect: 'AVX512_ICL', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_icl.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_icl.c')[0] ) AVX512_SPR = mod_features.new( 'AVX512_SPR', 35, implies: AVX512_ICL, args: ['-mavx512fp16', '-mavx512bf16'], group: ['AVX512FP16', 'AVX512BF16'], detect: 'AVX512_SPR', - test_code: files(source_root + '/numpy/distutils/checks/cpu_avx512_spr.c')[0] + test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_avx512_spr.c')[0] ) # Specializations for non unix-like compilers diff --git a/numpy/__init__.py b/numpy/__init__.py index ef7c1ed7678a..42a2e8896d68 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -52,9 +52,6 @@ Polynomial tools testing NumPy testing tools -distutils - Enhancements to distutils with support for - Fortran compilers support and more (for Python <= 3.11) Utilities --------- @@ -624,8 +621,8 @@ from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from - # __getattr__. Note that `distutils` (deprecated) and `array_api` - # (experimental label) are not added here, because `from numpy import *` + # __getattr__. Note that `array_api` + # (experimental label) is not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { "linalg", "fft", "dtypes", "random", "polynomial", "ma", @@ -757,12 +754,8 @@ def __getattr__(attr): import numpy.strings as strings return strings elif attr == "distutils": - if 'distutils' in __numpy_submodules__: - import numpy.distutils as distutils - return distutils - else: - raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards", name=None) + raise AttributeError("`numpy.distutils` is not available from " + "numpy 2.5 onwards", name=None) if attr in __future_scalars__: # And future warnings for those that will change, but also give @@ -797,7 +790,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "distutils", "array_api" + "array_api" } return list(public_symbols) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 975a04857db7..3c459952d6b4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -217,8 +217,6 @@ from numpy import ( matrixlib as matrixlib, version as version, ) -if sys.version_info < (3, 12): - from numpy import distutils as distutils from numpy._core.records import ( record, diff --git a/numpy/distutils/conv_template.py b/numpy/_build_utils/conv_template.py similarity index 100% rename from numpy/distutils/conv_template.py rename to numpy/_build_utils/conv_template.py diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 8bd1ea872a42..f934c222e838 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -5,11 +5,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', 'distutils', 'conv_template.py' + 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index e97177e46153..1087d176816b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -17,11 +17,11 @@ def get_processor(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import from numpy # (numpy is not yet built) conv_template_path = os.path.join( os.path.dirname(__file__), - '..', '..', 'distutils', 'conv_template.py' + '..', '..', '_build_utils', 'conv_template.py' ) spec = importlib.util.spec_from_file_location( 'conv_template', conv_template_path diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index ac108aa20370..c2b471c71757 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -19,7 +19,7 @@ def get_annotations(): - # Convoluted because we can't import from numpy.distutils + # Convoluted because we can't import numpy # (numpy is not yet built) genapi_py = os.path.join(os.path.dirname(__file__), 'genapi.py') spec = importlib.util.spec_from_file_location('conv_template', genapi_py) diff --git a/numpy/distutils/checks/cpu_asimd.c b/numpy/_core/src/_simd/checks/cpu_asimd.c similarity index 100% rename from numpy/distutils/checks/cpu_asimd.c rename to numpy/_core/src/_simd/checks/cpu_asimd.c diff --git a/numpy/distutils/checks/cpu_asimddp.c b/numpy/_core/src/_simd/checks/cpu_asimddp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimddp.c rename to numpy/_core/src/_simd/checks/cpu_asimddp.c diff --git a/numpy/distutils/checks/cpu_asimdfhm.c b/numpy/_core/src/_simd/checks/cpu_asimdfhm.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdfhm.c rename to numpy/_core/src/_simd/checks/cpu_asimdfhm.c diff --git a/numpy/distutils/checks/cpu_asimdhp.c b/numpy/_core/src/_simd/checks/cpu_asimdhp.c similarity index 100% rename from numpy/distutils/checks/cpu_asimdhp.c rename to numpy/_core/src/_simd/checks/cpu_asimdhp.c diff --git a/numpy/distutils/checks/cpu_avx.c b/numpy/_core/src/_simd/checks/cpu_avx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx.c rename to numpy/_core/src/_simd/checks/cpu_avx.c diff --git a/numpy/distutils/checks/cpu_avx2.c b/numpy/_core/src/_simd/checks/cpu_avx2.c similarity index 100% rename from numpy/distutils/checks/cpu_avx2.c rename to numpy/_core/src/_simd/checks/cpu_avx2.c diff --git a/numpy/distutils/checks/cpu_avx512_clx.c b/numpy/_core/src/_simd/checks/cpu_avx512_clx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_clx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_clx.c diff --git a/numpy/distutils/checks/cpu_avx512_cnl.c b/numpy/_core/src/_simd/checks/cpu_avx512_cnl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_cnl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_cnl.c diff --git a/numpy/distutils/checks/cpu_avx512_icl.c b/numpy/_core/src/_simd/checks/cpu_avx512_icl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_icl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_icl.c diff --git a/numpy/distutils/checks/cpu_avx512_knl.c b/numpy/_core/src/_simd/checks/cpu_avx512_knl.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knl.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knl.c diff --git a/numpy/distutils/checks/cpu_avx512_knm.c b/numpy/_core/src/_simd/checks/cpu_avx512_knm.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_knm.c rename to numpy/_core/src/_simd/checks/cpu_avx512_knm.c diff --git a/numpy/distutils/checks/cpu_avx512_skx.c b/numpy/_core/src/_simd/checks/cpu_avx512_skx.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_skx.c rename to numpy/_core/src/_simd/checks/cpu_avx512_skx.c diff --git a/numpy/distutils/checks/cpu_avx512_spr.c b/numpy/_core/src/_simd/checks/cpu_avx512_spr.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512_spr.c rename to numpy/_core/src/_simd/checks/cpu_avx512_spr.c diff --git a/numpy/distutils/checks/cpu_avx512cd.c b/numpy/_core/src/_simd/checks/cpu_avx512cd.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512cd.c rename to numpy/_core/src/_simd/checks/cpu_avx512cd.c diff --git a/numpy/distutils/checks/cpu_avx512f.c b/numpy/_core/src/_simd/checks/cpu_avx512f.c similarity index 100% rename from numpy/distutils/checks/cpu_avx512f.c rename to numpy/_core/src/_simd/checks/cpu_avx512f.c diff --git a/numpy/distutils/checks/cpu_f16c.c b/numpy/_core/src/_simd/checks/cpu_f16c.c similarity index 100% rename from numpy/distutils/checks/cpu_f16c.c rename to numpy/_core/src/_simd/checks/cpu_f16c.c diff --git a/numpy/distutils/checks/cpu_fma3.c b/numpy/_core/src/_simd/checks/cpu_fma3.c similarity index 100% rename from numpy/distutils/checks/cpu_fma3.c rename to numpy/_core/src/_simd/checks/cpu_fma3.c diff --git a/numpy/distutils/checks/cpu_fma4.c b/numpy/_core/src/_simd/checks/cpu_fma4.c similarity index 100% rename from numpy/distutils/checks/cpu_fma4.c rename to numpy/_core/src/_simd/checks/cpu_fma4.c diff --git a/numpy/distutils/checks/cpu_lsx.c b/numpy/_core/src/_simd/checks/cpu_lsx.c similarity index 100% rename from numpy/distutils/checks/cpu_lsx.c rename to numpy/_core/src/_simd/checks/cpu_lsx.c diff --git a/numpy/distutils/checks/cpu_neon.c b/numpy/_core/src/_simd/checks/cpu_neon.c similarity index 100% rename from numpy/distutils/checks/cpu_neon.c rename to numpy/_core/src/_simd/checks/cpu_neon.c diff --git a/numpy/distutils/checks/cpu_neon_fp16.c b/numpy/_core/src/_simd/checks/cpu_neon_fp16.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_fp16.c rename to numpy/_core/src/_simd/checks/cpu_neon_fp16.c diff --git a/numpy/distutils/checks/cpu_neon_vfpv4.c b/numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c similarity index 100% rename from numpy/distutils/checks/cpu_neon_vfpv4.c rename to numpy/_core/src/_simd/checks/cpu_neon_vfpv4.c diff --git a/numpy/distutils/checks/cpu_popcnt.c b/numpy/_core/src/_simd/checks/cpu_popcnt.c similarity index 100% rename from numpy/distutils/checks/cpu_popcnt.c rename to numpy/_core/src/_simd/checks/cpu_popcnt.c diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/_core/src/_simd/checks/cpu_rvv.c similarity index 100% rename from numpy/distutils/checks/cpu_rvv.c rename to numpy/_core/src/_simd/checks/cpu_rvv.c diff --git a/numpy/distutils/checks/cpu_sse.c b/numpy/_core/src/_simd/checks/cpu_sse.c similarity index 100% rename from numpy/distutils/checks/cpu_sse.c rename to numpy/_core/src/_simd/checks/cpu_sse.c diff --git a/numpy/distutils/checks/cpu_sse2.c b/numpy/_core/src/_simd/checks/cpu_sse2.c similarity index 100% rename from numpy/distutils/checks/cpu_sse2.c rename to numpy/_core/src/_simd/checks/cpu_sse2.c diff --git a/numpy/distutils/checks/cpu_sse3.c b/numpy/_core/src/_simd/checks/cpu_sse3.c similarity index 100% rename from numpy/distutils/checks/cpu_sse3.c rename to numpy/_core/src/_simd/checks/cpu_sse3.c diff --git a/numpy/distutils/checks/cpu_sse41.c b/numpy/_core/src/_simd/checks/cpu_sse41.c similarity index 100% rename from numpy/distutils/checks/cpu_sse41.c rename to numpy/_core/src/_simd/checks/cpu_sse41.c diff --git a/numpy/distutils/checks/cpu_sse42.c b/numpy/_core/src/_simd/checks/cpu_sse42.c similarity index 100% rename from numpy/distutils/checks/cpu_sse42.c rename to numpy/_core/src/_simd/checks/cpu_sse42.c diff --git a/numpy/distutils/checks/cpu_ssse3.c b/numpy/_core/src/_simd/checks/cpu_ssse3.c similarity index 100% rename from numpy/distutils/checks/cpu_ssse3.c rename to numpy/_core/src/_simd/checks/cpu_ssse3.c diff --git a/numpy/distutils/checks/cpu_sve.c b/numpy/_core/src/_simd/checks/cpu_sve.c similarity index 100% rename from numpy/distutils/checks/cpu_sve.c rename to numpy/_core/src/_simd/checks/cpu_sve.c diff --git a/numpy/distutils/checks/cpu_vsx.c b/numpy/_core/src/_simd/checks/cpu_vsx.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx.c rename to numpy/_core/src/_simd/checks/cpu_vsx.c diff --git a/numpy/distutils/checks/cpu_vsx2.c b/numpy/_core/src/_simd/checks/cpu_vsx2.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx2.c rename to numpy/_core/src/_simd/checks/cpu_vsx2.c diff --git a/numpy/distutils/checks/cpu_vsx3.c b/numpy/_core/src/_simd/checks/cpu_vsx3.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx3.c rename to numpy/_core/src/_simd/checks/cpu_vsx3.c diff --git a/numpy/distutils/checks/cpu_vsx4.c b/numpy/_core/src/_simd/checks/cpu_vsx4.c similarity index 100% rename from numpy/distutils/checks/cpu_vsx4.c rename to numpy/_core/src/_simd/checks/cpu_vsx4.c diff --git a/numpy/distutils/checks/cpu_vx.c b/numpy/_core/src/_simd/checks/cpu_vx.c similarity index 100% rename from numpy/distutils/checks/cpu_vx.c rename to numpy/_core/src/_simd/checks/cpu_vx.c diff --git a/numpy/distutils/checks/cpu_vxe.c b/numpy/_core/src/_simd/checks/cpu_vxe.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe.c rename to numpy/_core/src/_simd/checks/cpu_vxe.c diff --git a/numpy/distutils/checks/cpu_vxe2.c b/numpy/_core/src/_simd/checks/cpu_vxe2.c similarity index 100% rename from numpy/distutils/checks/cpu_vxe2.c rename to numpy/_core/src/_simd/checks/cpu_vxe2.c diff --git a/numpy/distutils/checks/cpu_xop.c b/numpy/_core/src/_simd/checks/cpu_xop.c similarity index 100% rename from numpy/distutils/checks/cpu_xop.c rename to numpy/_core/src/_simd/checks/cpu_xop.c diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/_core/src/_simd/checks/extra_avx512bw_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512bw_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512bw_mask.c diff --git a/numpy/distutils/checks/extra_avx512dq_mask.c b/numpy/_core/src/_simd/checks/extra_avx512dq_mask.c similarity index 100% rename from numpy/distutils/checks/extra_avx512dq_mask.c rename to numpy/_core/src/_simd/checks/extra_avx512dq_mask.c diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/_core/src/_simd/checks/extra_avx512f_reduce.c similarity index 100% rename from numpy/distutils/checks/extra_avx512f_reduce.c rename to numpy/_core/src/_simd/checks/extra_avx512f_reduce.c diff --git a/numpy/distutils/checks/extra_vsx3_half_double.c b/numpy/_core/src/_simd/checks/extra_vsx3_half_double.c similarity index 100% rename from numpy/distutils/checks/extra_vsx3_half_double.c rename to numpy/_core/src/_simd/checks/extra_vsx3_half_double.c diff --git a/numpy/distutils/checks/extra_vsx4_mma.c b/numpy/_core/src/_simd/checks/extra_vsx4_mma.c similarity index 100% rename from numpy/distutils/checks/extra_vsx4_mma.c rename to numpy/_core/src/_simd/checks/extra_vsx4_mma.c diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/_core/src/_simd/checks/extra_vsx_asm.c similarity index 100% rename from numpy/distutils/checks/extra_vsx_asm.c rename to numpy/_core/src/_simd/checks/extra_vsx_asm.c diff --git a/numpy/distutils/checks/test_flags.c b/numpy/_core/src/_simd/checks/test_flags.c similarity index 100% rename from numpy/distutils/checks/test_flags.c rename to numpy/_core/src/_simd/checks/test_flags.c diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 61c224b33810..b6f15e6edde8 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -32,5 +32,4 @@ "f2py", "setuptools", "distutils", - "numpy.distutils", ] diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 77342e44aea0..ecc6a1cda738 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -136,14 +136,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # offset verbosity. The "-q" cancels a "-v". pytest_args += ["-q"] - if sys.version_info < (3, 12): - with warnings.catch_warnings(): - warnings.simplefilter("always") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - from numpy.distutils import cpuinfo # noqa: F401 - # Filter out annoying import messages. Want these in both develop and # release mode. pytest_args += [ diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py deleted file mode 100644 index f74ed4d3f6db..000000000000 --- a/numpy/distutils/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -An enhanced distutils, providing support for Fortran compilers, for BLAS, -LAPACK and other common libraries for numerical computing, and more. - -Public submodules are:: - - misc_util - system_info - cpu_info - log - exec_command - -For details, please see the *Packaging* and *NumPy Distutils User Guide* -sections of the NumPy Reference Guide. - -For configuring the preference for and location of libraries like BLAS and -LAPACK, and for setting include paths and similar build options, please see -``site.cfg.example`` in the root of the NumPy repository or sdist. - -""" - -import warnings - -# Must import local ccompiler ASAP in order to get -# customized CCompiler.spawn effective. -from . import ccompiler -from . import unixccompiler - -from .npy_pkg_config import * - -warnings.warn("\n\n" - " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" - " of the deprecation of `distutils` itself. It will be removed for\n" - " Python >= 3.12. For older Python versions it will remain present.\n" - " It is recommended to use `setuptools < 60.0` for those Python versions.\n" - " For more details, see:\n" - " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", - DeprecationWarning, stacklevel=2 -) -del warnings - -# If numpy is installed, add distutils.test() -try: - from . import __config__ - # Normally numpy is installed if the above import works, but an interrupted - # in-place build could also have left a __config__.py. In that case the - # next import may still fail, so keep it inside the try block. - from numpy._pytesttester import PytestTester - test = PytestTester(__name__) - del PytestTester -except ImportError: - pass - - -def customized_fcompiler(plat=None, compiler=None): - from numpy.distutils.fcompiler import new_fcompiler - c = new_fcompiler(plat=plat, compiler=compiler) - c.customize() - return c - -def customized_ccompiler(plat=None, compiler=None, verbose=1): - c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) - c.customize('') - return c diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi deleted file mode 100644 index 3938d68de14c..000000000000 --- a/numpy/distutils/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# TODO: remove when the full numpy namespace is defined -def __getattr__(name: str) -> Any: ... diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py deleted file mode 100644 index 9a1c8ce718c9..000000000000 --- a/numpy/distutils/_shell_utils.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Helper functions for interacting with the shell, and consuming shell-style -parameters provided in config files. -""" -import os -import shlex -import subprocess - -__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] - - -class CommandLineParser: - """ - An object that knows how to split and join command-line arguments. - - It must be true that ``argv == split(join(argv))`` for all ``argv``. - The reverse neednt be true - `join(split(cmd))` may result in the addition - or removal of unnecessary escaping. - """ - @staticmethod - def join(argv): - """ Join a list of arguments into a command line string """ - raise NotImplementedError - - @staticmethod - def split(cmd): - """ Split a command line string into a list of arguments """ - raise NotImplementedError - - -class WindowsParser: - """ - The parsing behavior used by `subprocess.call("string")` on Windows, which - matches the Microsoft C/C++ runtime. - - Note that this is _not_ the behavior of cmd. - """ - @staticmethod - def join(argv): - # note that list2cmdline is specific to the windows syntax - return subprocess.list2cmdline(argv) - - @staticmethod - def split(cmd): - import ctypes # guarded import for systems without ctypes - try: - ctypes.windll - except AttributeError: - raise NotImplementedError - - # Windows has special parsing rules for the executable (no quotes), - # that we do not care about - insert a dummy element - if not cmd: - return [] - cmd = 'dummy ' + cmd - - CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW - CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) - CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) - - nargs = ctypes.c_int() - lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) - args = [lpargs[i] for i in range(nargs.value)] - assert not ctypes.windll.kernel32.LocalFree(lpargs) - - # strip the element we inserted - assert args[0] == "dummy" - return args[1:] - - -class PosixParser: - """ - The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. - """ - @staticmethod - def join(argv): - return ' '.join(shlex.quote(arg) for arg in argv) - - @staticmethod - def split(cmd): - return shlex.split(cmd, posix=True) - - -if os.name == 'nt': - NativeParser = WindowsParser -elif os.name == 'posix': - NativeParser = PosixParser diff --git a/numpy/distutils/armccompiler.py b/numpy/distutils/armccompiler.py deleted file mode 100644 index afba7eb3b352..000000000000 --- a/numpy/distutils/armccompiler.py +++ /dev/null @@ -1,26 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class ArmCCompiler(UnixCCompiler): - - """ - Arm compiler. - """ - - compiler_type = 'arm' - cc_exe = 'armclang' - cxx_exe = 'armclang++' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler + - ' -O3 -fPIC', - compiler_so=cc_compiler + - ' -O3 -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -fPIC', - linker_exe=cc_compiler + - ' -lamath', - linker_so=cc_compiler + - ' -lamath -shared') diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py deleted file mode 100644 index dee13b1c9e84..000000000000 --- a/numpy/distutils/ccompiler.py +++ /dev/null @@ -1,826 +0,0 @@ -import os -import re -import sys -import platform -import shlex -import time -import subprocess -from copy import copy -from pathlib import Path -from distutils import ccompiler -from distutils.ccompiler import ( - compiler_class, gen_lib_options, get_default_compiler, new_compiler, - CCompiler -) -from distutils.errors import ( - DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, - CompileError, UnknownFileError -) -from distutils.sysconfig import customize_compiler -from distutils.version import LooseVersion - -from numpy.distutils import log -from numpy.distutils.exec_command import ( - filepath_from_subprocess_output, forward_bytes_to_stdout -) -from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ - get_num_build_jobs, \ - _commandline_dep_string, \ - sanitize_cxx_flags - -# globals for parallel build management -import threading - -_job_semaphore = None -_global_lock = threading.Lock() -_processing_files = set() - - -def _needs_build(obj, cc_args, extra_postargs, pp_opts): - """ - Check if an objects needs to be rebuild based on its dependencies - - Parameters - ---------- - obj : str - object file - - Returns - ------- - bool - """ - # defined in unixcompiler.py - dep_file = obj + '.d' - if not os.path.exists(dep_file): - return True - - # dep_file is a makefile containing 'object: dependencies' - # formatted like posix shell (spaces escaped, \ line continuations) - # the last line contains the compiler commandline arguments as some - # projects may compile an extension multiple times with different - # arguments - with open(dep_file) as f: - lines = f.readlines() - - cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) - last_cmdline = lines[-1] - if last_cmdline != cmdline: - return True - - contents = ''.join(lines[:-1]) - deps = [x for x in shlex.split(contents, posix=True) - if x != "\n" and not x.endswith(":")] - - try: - t_obj = os.stat(obj).st_mtime - - # check if any of the dependencies is newer than the object - # the dependencies includes the source used to create the object - for f in deps: - if os.stat(f).st_mtime > t_obj: - return True - except OSError: - # no object counts as newer (shouldn't happen if dep_file exists) - return True - - return False - - -def replace_method(klass, method_name, func): - # Py3k does not have unbound method anymore, MethodType does not work - m = lambda self, *args, **kw: func(self, *args, **kw) - setattr(klass, method_name, m) - - -###################################################################### -## Method that subclasses may redefine. But don't call this method, -## it i private to CCompiler class and may return unexpected -## results if used elsewhere. So, you have been warned.. - -def CCompiler_find_executables(self): - """ - Does nothing here, but is called by the get_version method and can be - overridden by subclasses. In particular it is redefined in the `FCompiler` - class where more documentation can be found. - - """ - pass - - -replace_method(CCompiler, 'find_executables', CCompiler_find_executables) - - -# Using customized CCompiler.spawn. -def CCompiler_spawn(self, cmd, display=None, env=None): - """ - Execute a command in a sub-process. - - Parameters - ---------- - cmd : str - The command to execute. - display : str or sequence of str, optional - The text to add to the log file kept by `numpy.distutils`. - If not given, `display` is equal to `cmd`. - env : a dictionary for environment variables, optional - - Returns - ------- - None - - Raises - ------ - DistutilsExecError - If the command failed, i.e. the exit status was not 0. - - """ - env = env if env is not None else dict(os.environ) - if display is None: - display = cmd - if is_sequence(display): - display = ' '.join(list(display)) - log.info(display) - try: - if self.verbose: - subprocess.check_output(cmd, env=env) - else: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - # OSError doesn't have the same hooks for the exception - # output, but exec_command() historically would use an - # empty string for EnvironmentError (base class for - # OSError) - # o = b'' - # still that would make the end-user lost in translation! - o = f"\n\n{e}\n\n\n" - try: - o = o.encode(sys.stdout.encoding) - except AttributeError: - o = o.encode('utf8') - # status previously used by exec_command() for parent - # of OSError - s = 127 - else: - # use a convenience return here so that any kind of - # caught exception will execute the default code after the - # try / except block, which handles various exceptions - return None - - if is_sequence(cmd): - cmd = ' '.join(list(cmd)) - - if self.verbose: - forward_bytes_to_stdout(o) - - if re.search(b'Too many open files', o): - msg = '\nTry rerunning setup command until build succeeds.' - else: - msg = '' - raise DistutilsExecError('Command "%s" failed with exit status %d%s' % - (cmd, s, msg)) - -replace_method(CCompiler, 'spawn', CCompiler_spawn) - -def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - """ - Return the name of the object files for the given source files. - - Parameters - ---------- - source_filenames : list of str - The list of paths to source files. Paths can be either relative or - absolute, this is handled transparently. - strip_dir : bool, optional - Whether to strip the directory from the returned paths. If True, - the file name prepended by `output_dir` is returned. Default is False. - output_dir : str, optional - If given, this path is prepended to the returned paths to the - object files. - - Returns - ------- - obj_names : list of str - The list of paths to the object files corresponding to the source - files in `source_filenames`. - - """ - if output_dir is None: - output_dir = '' - obj_names = [] - for src_name in source_filenames: - base, ext = os.path.splitext(os.path.normpath(src_name)) - base = os.path.splitdrive(base)[1] # Chop off the drive - base = base[os.path.isabs(base):] # If abs, chop off leading / - if base.startswith('..'): - # Resolve starting relative path components, middle ones - # (if any) have been handled by os.path.normpath above. - i = base.rfind('..')+2 - d = base[:i] - d = os.path.basename(os.path.abspath(d)) - base = d + base[i:] - if ext not in self.src_extensions: - raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) - if strip_dir: - base = os.path.basename(base) - obj_name = os.path.join(output_dir, base + self.obj_extension) - obj_names.append(obj_name) - return obj_names - -replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) - -def CCompiler_compile(self, sources, output_dir=None, macros=None, - include_dirs=None, debug=0, extra_preargs=None, - extra_postargs=None, depends=None): - """ - Compile one or more source files. - - Please refer to the Python distutils API reference for more details. - - Parameters - ---------- - sources : list of str - A list of filenames - output_dir : str, optional - Path to the output directory. - macros : list of tuples - A list of macro definitions. - include_dirs : list of str, optional - The directories to add to the default include file search path for - this compilation only. - debug : bool, optional - Whether or not to output debug symbols in or alongside the object - file(s). - extra_preargs, extra_postargs : ? - Extra pre- and post-arguments. - depends : list of str, optional - A list of file names that all targets depend on. - - Returns - ------- - objects : list of str - A list of object file names, one per source file `sources`. - - Raises - ------ - CompileError - If compilation fails. - - """ - global _job_semaphore - - jobs = get_num_build_jobs() - - # setup semaphore to not exceed number of compile jobs when parallelized at - # extension level (python >= 3.5) - with _global_lock: - if _job_semaphore is None: - _job_semaphore = threading.Semaphore(jobs) - - if not sources: - return [] - from numpy.distutils.fcompiler import (FCompiler, - FORTRAN_COMMON_FIXED_EXTENSIONS, - has_f90_header) - if isinstance(self, FCompiler): - display = [] - for fc in ['f77', 'f90', 'fix']: - fcomp = getattr(self, 'compiler_'+fc) - if fcomp is None: - continue - display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) - display = '\n'.join(display) - else: - ccomp = self.compiler_so - display = "C compiler: %s\n" % (' '.join(ccomp),) - log.info(display) - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) - cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) - display = "compile options: '%s'" % (' '.join(cc_args)) - if extra_postargs: - display += "\nextra options: '%s'" % (' '.join(extra_postargs)) - log.info(display) - - def single_compile(args): - obj, (src, ext) = args - if not _needs_build(obj, cc_args, extra_postargs, pp_opts): - return - - # check if we are currently already processing the same object - # happens when using the same source in multiple extensions - while True: - # need explicit lock as there is no atomic check and add with GIL - with _global_lock: - # file not being worked on, start working - if obj not in _processing_files: - _processing_files.add(obj) - break - # wait for the processing to end - time.sleep(0.1) - - try: - # retrieve slot from our #job semaphore and build - with _job_semaphore: - self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) - finally: - # register being done processing - with _global_lock: - _processing_files.remove(obj) - - - if isinstance(self, FCompiler): - objects_to_build = list(build.keys()) - f77_objects, other_objects = [], [] - for obj in objects: - if obj in objects_to_build: - src, ext = build[obj] - if self.compiler_type=='absoft': - obj = cyg2win32(obj) - src = cyg2win32(src) - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - f77_objects.append((obj, (src, ext))) - else: - other_objects.append((obj, (src, ext))) - - # f77 objects can be built in parallel - build_items = f77_objects - # build f90 modules serial, module files are generated during - # compilation and may be used by files later in the list so the - # ordering is important - for o in other_objects: - single_compile(o) - else: - build_items = build.items() - - if len(build) > 1 and jobs > 1: - # build parallel - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(jobs) as pool: - res = pool.map(single_compile, build_items) - list(res) # access result to raise errors - else: - # build serial - for o in build_items: - single_compile(o) - - # Return *all* object filenames, not just the ones we just built. - return objects - -replace_method(CCompiler, 'compile', CCompiler_compile) - -def CCompiler_customize_cmd(self, cmd, ignore=()): - """ - Customize compiler using distutils command. - - Parameters - ---------- - cmd : class instance - An instance inheriting from ``distutils.cmd.Command``. - ignore : sequence of str, optional - List of ``distutils.ccompiler.CCompiler`` commands (without ``'set_'``) that should not be - altered. Strings that are checked for are: - ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', - 'rpath', 'link_objects')``. - - Returns - ------- - None - - """ - log.info('customize %s using %s' % (self.__class__.__name__, - cmd.__class__.__name__)) - - if ( - hasattr(self, 'compiler') and - 'clang' in self.compiler[0] and - not (platform.machine() == 'arm64' and sys.platform == 'darwin') - ): - # clang defaults to a non-strict floating error point model. - # However, '-ftrapping-math' is not currently supported (2023-04-08) - # for macosx_arm64. - # Since NumPy and most Python libs give warnings for these, override: - self.compiler.append('-ftrapping-math') - self.compiler_so.append('-ftrapping-math') - - def allow(attr): - return getattr(cmd, attr, None) is not None and attr not in ignore - - if allow('include_dirs'): - self.set_include_dirs(cmd.include_dirs) - if allow('define'): - for (name, value) in cmd.define: - self.define_macro(name, value) - if allow('undef'): - for macro in cmd.undef: - self.undefine_macro(macro) - if allow('libraries'): - self.set_libraries(self.libraries + cmd.libraries) - if allow('library_dirs'): - self.set_library_dirs(self.library_dirs + cmd.library_dirs) - if allow('rpath'): - self.set_runtime_library_dirs(cmd.rpath) - if allow('link_objects'): - self.set_link_objects(cmd.link_objects) - -replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) - -def _compiler_to_string(compiler): - props = [] - mx = 0 - keys = list(compiler.executables.keys()) - for key in ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch', - 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: - if key not in keys: - keys.append(key) - for key in keys: - if hasattr(compiler, key): - v = getattr(compiler, key) - mx = max(mx, len(key)) - props.append((key, repr(v))) - fmt = '%-' + repr(mx+1) + 's = %s' - lines = [fmt % prop for prop in props] - return '\n'.join(lines) - -def CCompiler_show_customization(self): - """ - Print the compiler customizations to stdout. - - Parameters - ---------- - None - - Returns - ------- - None - - Notes - ----- - Printing is only done if the distutils log threshold is < 2. - - """ - try: - self.get_version() - except Exception: - pass - if log._global_log.threshold<2: - print('*'*80) - print(self.__class__) - print(_compiler_to_string(self)) - print('*'*80) - -replace_method(CCompiler, 'show_customization', CCompiler_show_customization) - -def CCompiler_customize(self, dist, need_cxx=0): - """ - Do any platform-specific customization of a compiler instance. - - This method calls ``distutils.sysconfig.customize_compiler`` for - platform-specific customization, as well as optionally remove a flag - to suppress spurious warnings in case C++ code is being compiled. - - Parameters - ---------- - dist : object - This parameter is not used for anything. - need_cxx : bool, optional - Whether or not C++ has to be compiled. If so (True), the - ``"-Wstrict-prototypes"`` option is removed to prevent spurious - warnings. Default is False. - - Returns - ------- - None - - Notes - ----- - All the default options used by distutils can be extracted with:: - - from distutils import sysconfig - sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', - 'CCSHARED', 'LDSHARED', 'SO') - - """ - # See FCompiler.customize for suggested usage. - log.info('customize %s' % (self.__class__.__name__)) - customize_compiler(self) - if need_cxx: - # In general, distutils uses -Wstrict-prototypes, but this option is - # not valid for C++ code, only for C. Remove it if it's there to - # avoid a spurious warning on every compilation. - try: - self.compiler_so.remove('-Wstrict-prototypes') - except (AttributeError, ValueError): - pass - - if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: - if not self.compiler_cxx: - if self.compiler[0].startswith('gcc'): - a, b = 'gcc', 'g++' - else: - a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a, b)]\ - + self.compiler[1:] - else: - if hasattr(self, 'compiler'): - log.warn("#### %s #######" % (self.compiler,)) - if not hasattr(self, 'compiler_cxx'): - log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) - - - # check if compiler supports gcc style automatic dependencies - # run on every extension so skip for known good compilers - if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or - 'g++' in self.compiler[0] or - 'clang' in self.compiler[0]): - self._auto_depends = True - elif os.name == 'posix': - import tempfile - import shutil - tmpdir = tempfile.mkdtemp() - try: - fn = os.path.join(tmpdir, "file.c") - with open(fn, "w") as f: - f.write("int a;\n") - self.compile([fn], output_dir=tmpdir, - extra_preargs=['-MMD', '-MF', fn + '.d']) - self._auto_depends = True - except CompileError: - self._auto_depends = False - finally: - shutil.rmtree(tmpdir) - - return - -replace_method(CCompiler, 'customize', CCompiler_customize) - -def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): - """ - Simple matching of version numbers, for use in CCompiler and FCompiler. - - Parameters - ---------- - pat : str, optional - A regular expression matching version numbers. - Default is ``r'[-.\\d]+'``. - ignore : str, optional - A regular expression matching patterns to skip. - Default is ``''``, in which case nothing is skipped. - start : str, optional - A regular expression matching the start of where to start looking - for version numbers. - Default is ``''``, in which case searching is started at the - beginning of the version string given to `matcher`. - - Returns - ------- - matcher : callable - A function that is appropriate to use as the ``.version_match`` - attribute of a ``distutils.ccompiler.CCompiler`` class. `matcher` takes a single parameter, - a version string. - - """ - def matcher(self, version_string): - # version string may appear in the second line, so getting rid - # of new lines: - version_string = version_string.replace('\n', ' ') - pos = 0 - if start: - m = re.match(start, version_string) - if not m: - return None - pos = m.end() - while True: - m = re.search(pat, version_string[pos:]) - if not m: - return None - if ignore and re.match(ignore, m.group(0)): - pos = m.end() - continue - break - return m.group(0) - return matcher - -def CCompiler_get_version(self, force=False, ok_status=[0]): - """ - Return compiler version, or None if compiler is not available. - - Parameters - ---------- - force : bool, optional - If True, force a new determination of the version, even if the - compiler already has a version attribute. Default is False. - ok_status : list of int, optional - The list of status values returned by the version look-up process - for which a version string is returned. If the status value is not - in `ok_status`, None is returned. Default is ``[0]``. - - Returns - ------- - version : str or None - Version string, in the format of ``distutils.version.LooseVersion``. - - """ - if not force and hasattr(self, 'version'): - return self.version - self.find_executables() - try: - version_cmd = self.version_cmd - except AttributeError: - return None - if not version_cmd or not version_cmd[0]: - return None - try: - matcher = self.version_match - except AttributeError: - try: - pat = self.version_pattern - except AttributeError: - return None - def matcher(version_string): - m = re.match(pat, version_string) - if not m: - return None - version = m.group('version') - return version - - try: - output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as exc: - output = exc.output - status = exc.returncode - except OSError: - # match the historical returns for a parent - # exception class caught by exec_command() - status = 127 - output = b'' - else: - # output isn't actually a filepath but we do this - # for now to match previous distutils behavior - output = filepath_from_subprocess_output(output) - status = 0 - - version = None - if status in ok_status: - version = matcher(output) - if version: - version = LooseVersion(version) - self.version = version - return version - -replace_method(CCompiler, 'get_version', CCompiler_get_version) - -def CCompiler_cxx_compiler(self): - """ - Return the C++ compiler. - - Parameters - ---------- - None - - Returns - ------- - cxx : class instance - The C++ compiler, as a ``distutils.ccompiler.CCompiler`` instance. - - """ - if self.compiler_type in ('msvc', 'intelw', 'intelemw'): - return self - - cxx = copy(self) - cxx.compiler_cxx = cxx.compiler_cxx - cxx.compiler_so = [cxx.compiler_cxx[0]] + \ - sanitize_cxx_flags(cxx.compiler_so[1:]) - if (sys.platform.startswith(('aix', 'os400')) and - 'ld_so_aix' in cxx.linker_so[0]): - # AIX needs the ld_so_aix script included with Python - cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ - + cxx.linker_so[2:] - if sys.platform.startswith('os400'): - #This is required by i 7.4 and prievous for PRId64 in printf() call. - cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') - #This a bug of gcc10.3, which failed to handle the TLS init. - cxx.compiler_so.append('-fno-extern-tls-init') - cxx.linker_so.append('-fno-extern-tls-init') - else: - cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] - return cxx - -replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) - -compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', - "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', - "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', - "Intel C Compiler for 64-bit applications") -compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', - "Intel C Compiler for 32-bit applications on Windows") -compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', - "Intel C Compiler for 64-bit applications on Windows") -compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', - "PathScale Compiler for SiCortex-based applications") -compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', - "Arm C Compiler") -compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', - "Fujitsu C Compiler") - -ccompiler._default_compilers += (('linux.*', 'intel'), - ('linux.*', 'intele'), - ('linux.*', 'intelem'), - ('linux.*', 'pathcc'), - ('nt', 'intelw'), - ('nt', 'intelemw')) - -if sys.platform == 'win32': - compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', - "Mingw32 port of GNU C Compiler for Win32"\ - "(for MSC built Python)") - if mingw32(): - # On windows platforms, we want to default to mingw32 (gcc) - # because msvc can't build blitz stuff. - log.info('Setting mingw32 as default compiler for nt.') - ccompiler._default_compilers = (('nt', 'mingw32'),) \ - + ccompiler._default_compilers - - -_distutils_new_compiler = new_compiler -def new_compiler (plat=None, - compiler=None, - verbose=None, - dry_run=0, - force=0): - # Try first C compilers from numpy.distutils. - if verbose is None: - verbose = log.get_threshold() <= log.INFO - if plat is None: - plat = os.name - try: - if compiler is None: - compiler = get_default_compiler(plat) - (module_name, class_name, long_description) = compiler_class[compiler] - except KeyError: - msg = "don't know how to compile C/C++ code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError(msg) - module_name = "numpy.distutils." + module_name - try: - __import__ (module_name) - except ImportError as e: - msg = str(e) - log.info('%s in numpy.distutils; trying from distutils', - str(msg)) - module_name = module_name[6:] - try: - __import__(module_name) - except ImportError as e: - msg = str(e) - raise DistutilsModuleError("can't compile C/C++ code: unable to load " - "module '%s'" % module_name) - try: - module = sys.modules[module_name] - klass = vars(module)[class_name] - except KeyError: - raise DistutilsModuleError(("can't compile C/C++ code: unable to find " - "class '%s' in module '%s'") % (class_name, module_name)) - compiler = klass(None, dry_run, force) - compiler.verbose = verbose - log.debug('new_compiler returns %s' % (klass)) - return compiler - -ccompiler.new_compiler = new_compiler - -_distutils_gen_lib_options = gen_lib_options -def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): - # the version of this function provided by CPython allows the following - # to return lists, which are unpacked automatically: - # - compiler.runtime_library_dir_option - # our version extends the behavior to: - # - compiler.library_dir_option - # - compiler.library_option - # - compiler.find_library_file - r = _distutils_gen_lib_options(compiler, library_dirs, - runtime_library_dirs, libraries) - lib_opts = [] - for i in r: - if is_sequence(i): - lib_opts.extend(list(i)) - else: - lib_opts.append(i) - return lib_opts -ccompiler.gen_lib_options = gen_lib_options - -# Also fix up the various compiler modules, which do -# from distutils.ccompiler import gen_lib_options -# Don't bother with mwerks, as we don't support Classic Mac. -for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: - _m = sys.modules.get('distutils.' + _cc + 'compiler') - if _m is not None: - setattr(_m, 'gen_lib_options', gen_lib_options) - diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py deleted file mode 100644 index 4dea2f9b1da1..000000000000 --- a/numpy/distutils/ccompiler_opt.py +++ /dev/null @@ -1,2668 +0,0 @@ -"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware -optimization, starting from parsing the command arguments, to managing the -relation between the CPU baseline and dispatch-able features, -also generating the required C headers and ending with compiling -the sources with proper compiler's flags. - -`CCompilerOpt` doesn't provide runtime detection for the CPU features, -instead only focuses on the compiler side, but it creates abstract C headers -that can be used later for the final runtime dispatching process.""" - -import atexit -import inspect -import os -import pprint -import re -import subprocess -import textwrap - -class _Config: - """An abstract class holds all configurable attributes of `CCompilerOpt`, - these class attributes can be used to change the default behavior - of `CCompilerOpt` in order to fit other requirements. - - Attributes - ---------- - conf_nocache : bool - Set True to disable memory and file cache. - Default is False. - - conf_noopt : bool - Set True to forces the optimization to be disabled, - in this case `CCompilerOpt` tends to generate all - expected headers in order to 'not' break the build. - Default is False. - - conf_cache_factors : list - Add extra factors to the primary caching factors. The caching factors - are utilized to determine if there are changes had happened that - requires to discard the cache and re-updating it. The primary factors - are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). - Default is list of two items, containing the time of last modification - of `ccompiler_opt` and value of attribute "conf_noopt" - - conf_tmp_path : str, - The path of temporary directory. Default is auto-created - temporary directory via ``tempfile.mkdtemp()``. - - conf_check_path : str - The path of testing files. Each added CPU feature must have a - **C** source file contains at least one intrinsic or instruction that - related to this feature, so it can be tested against the compiler. - Default is ``./distutils/checks``. - - conf_target_groups : dict - Extra tokens that can be reached from dispatch-able sources through - the special mark ``@targets``. Default is an empty dictionary. - - **Notes**: - - case-insensitive for tokens and group names - - sign '#' must stick in the begin of group name and only within ``@targets`` - - **Example**: - .. code-block:: console - - $ "@targets #avx_group other_tokens" > group_inside.c - - >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ - "$werror $maxopt avx2 avx512f avx512_skx" - >>> cco = CCompilerOpt(cc_instance) - >>> cco.try_dispatch(["group_inside.c"]) - - conf_c_prefix : str - The prefix of public C definitions. Default is ``"NPY_"``. - - conf_c_prefix_ : str - The prefix of internal C definitions. Default is ``"NPY__"``. - - conf_cc_flags : dict - Nested dictionaries defining several compiler flags - that linked to some major functions, the main key - represent the compiler name and sub-keys represent - flags names. Default is already covers all supported - **C** compilers. - - Sub-keys explained as follows: - - "native": str or None - used by argument option `native`, to detect the current - machine support via the compiler. - "werror": str or None - utilized to treat warning as errors during testing CPU features - against the compiler and also for target's policy `$werror` - via dispatch-able sources. - "maxopt": str or None - utilized for target's policy '$maxopt' and the value should - contains the maximum acceptable optimization by the compiler. - e.g. in gcc ``'-O3'`` - - **Notes**: - * case-sensitive for compiler names and flags - * use space to separate multiple flags - * any flag will tested against the compiler and it will skipped - if it's not applicable. - - conf_min_features : dict - A dictionary defines the used CPU features for - argument option ``'min'``, the key represent the CPU architecture - name e.g. ``'x86'``. Default values provide the best effort - on wide range of users platforms. - - **Note**: case-sensitive for architecture names. - - conf_features : dict - Nested dictionaries used for identifying the CPU features. - the primary key is represented as a feature name or group name - that gathers several features. Default values covers all - supported features but without the major options like "flags", - these undefined options handle it by method `conf_features_partial()`. - Default value is covers almost all CPU features for *X86*, *IBM/Power64* - and *ARM 7/8*. - - Sub-keys explained as follows: - - "implies" : str or list, optional, - List of CPU feature names to be implied by it, - the feature name must be defined within `conf_features`. - Default is None. - - "flags": str or list, optional - List of compiler flags. Default is None. - - "detect": str or list, optional - List of CPU feature names that required to be detected - in runtime. By default, its the feature name or features - in "group" if its specified. - - "implies_detect": bool, optional - If True, all "detect" of implied features will be combined. - Default is True. see `feature_detect()`. - - "group": str or list, optional - Same as "implies" but doesn't require the feature name to be - defined within `conf_features`. - - "interest": int, required - a key for sorting CPU features - - "headers": str or list, optional - intrinsics C header file - - "disable": str, optional - force disable feature, the string value should contains the - reason of disabling. - - "autovec": bool or None, optional - True or False to declare that CPU feature can be auto-vectorized - by the compiler. - By default(None), treated as True if the feature contains at - least one applicable flag. see `feature_can_autovec()` - - "extra_checks": str or list, optional - Extra test case names for the CPU feature that need to be tested - against the compiler. - - Each test case must have a C file named ``extra_xxxx.c``, where - ``xxxx`` is the case name in lower case, under 'conf_check_path'. - It should contain at least one intrinsic or function related to the test case. - - If the compiler able to successfully compile the C file then `CCompilerOpt` - will add a C ``#define`` for it into the main dispatch header, e.g. - ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. - - **NOTES**: - * space can be used as separator with options that supports "str or list" - * case-sensitive for all values and feature name must be in upper-case. - * if flags aren't applicable, its will skipped rather than disable the - CPU feature - * the CPU feature will disabled if the compiler fail to compile - the test file - """ - conf_nocache = False - conf_noopt = False - conf_cache_factors = None - conf_tmp_path = None - conf_check_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "checks" - ) - conf_target_groups = {} - conf_c_prefix = 'NPY_' - conf_c_prefix_ = 'NPY__' - conf_cc_flags = dict( - gcc = dict( - # native should always fail on arm and ppc64, - # native usually works only with x86 - native = '-march=native', - opt = '-O3', - werror = '-Werror', - ), - clang = dict( - native = '-march=native', - opt = "-O3", - # One of the following flags needs to be applicable for Clang to - # guarantee the sanity of the testing process, however in certain - # cases `-Werror` gets skipped during the availability test due to - # "unused arguments" warnings. - # see https://github.com/numpy/numpy/issues/19624 - werror = '-Werror=switch -Werror', - ), - icc = dict( - native = '-xHost', - opt = '-O3', - werror = '-Werror', - ), - iccw = dict( - native = '/QxHost', - opt = '/O3', - werror = '/Werror', - ), - msvc = dict( - native = None, - opt = '/O2', - werror = '/WX', - ), - fcc = dict( - native = '-mcpu=a64fx', - opt = None, - werror = None, - ) - ) - conf_min_features = dict( - x86 = "SSE SSE2", - x64 = "SSE SSE2 SSE3", - ppc64 = '', # play it safe - ppc64le = "VSX VSX2", - s390x = '', - armhf = '', # play it safe - aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" - ) - conf_features = dict( - # X86 - SSE = dict( - interest=1, headers="xmmintrin.h", - # enabling SSE without SSE2 is useless also - # it's non-optional for x86_64 - implies="SSE2" - ), - SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), - SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), - SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), - SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), - POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), - SSE42 = dict(interest=7, implies="POPCNT"), - AVX = dict( - interest=8, implies="SSE42", headers="immintrin.h", - implies_detect=False - ), - XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), - FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), - F16C = dict(interest=11, implies="AVX"), - FMA3 = dict(interest=12, implies="F16C"), - AVX2 = dict(interest=13, implies="F16C"), - AVX512F = dict( - interest=20, implies="FMA3 AVX2", implies_detect=False, - extra_checks="AVX512F_REDUCE" - ), - AVX512CD = dict(interest=21, implies="AVX512F"), - AVX512_KNL = dict( - interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", - detect="AVX512_KNL", implies_detect=False - ), - AVX512_KNM = dict( - interest=41, implies="AVX512_KNL", - group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", - detect="AVX512_KNM", implies_detect=False - ), - AVX512_SKX = dict( - interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", - detect="AVX512_SKX", implies_detect=False, - extra_checks="AVX512BW_MASK AVX512DQ_MASK" - ), - AVX512_CLX = dict( - interest=43, implies="AVX512_SKX", group="AVX512VNNI", - detect="AVX512_CLX" - ), - AVX512_CNL = dict( - interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", - detect="AVX512_CNL", implies_detect=False - ), - AVX512_ICL = dict( - interest=45, implies="AVX512_CLX AVX512_CNL", - group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", - detect="AVX512_ICL", implies_detect=False - ), - AVX512_SPR = dict( - interest=46, implies="AVX512_ICL", group="AVX512FP16", - detect="AVX512_SPR", implies_detect=False - ), - # IBM/Power - ## Power7/ISA 2.06 - VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), - ## Power8/ISA 2.07 - VSX2 = dict(interest=2, implies="VSX", implies_detect=False), - ## Power9/ISA 3.00 - VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, - extra_checks="VSX3_HALF_DOUBLE"), - ## Power10/ISA 3.1 - VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, - extra_checks="VSX4_MMA"), - # IBM/Z - ## VX(z13) support - VX = dict(interest=1, headers="vecintrin.h"), - ## Vector-Enhancements Facility - VXE = dict(interest=2, implies="VX", implies_detect=False), - ## Vector-Enhancements Facility 2 - VXE2 = dict(interest=3, implies="VXE", implies_detect=False), - # ARM - NEON = dict(interest=1, headers="arm_neon.h"), - NEON_FP16 = dict(interest=2, implies="NEON"), - ## FMA - NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), - ## Advanced SIMD - ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), - ## ARMv8.2 half-precision & vector arithm - ASIMDHP = dict(interest=5, implies="ASIMD"), - ## ARMv8.2 dot product - ASIMDDP = dict(interest=6, implies="ASIMD"), - ## ARMv8.2 Single & half-precision Multiply - ASIMDFHM = dict(interest=7, implies="ASIMDHP") - ) - def conf_features_partial(self): - """Return a dictionary of supported CPU features by the platform, - and accumulate the rest of undefined options in `conf_features`, - the returned dict has same rules and notes in - class attribute `conf_features`, also its override - any options that been set in 'conf_features'. - """ - if self.cc_noopt: - # optimization is disabled - return {} - - on_x86 = self.cc_on_x86 or self.cc_on_x64 - is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc - - if on_x86 and is_unix: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = dict(flags="-mpopcnt"), - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = dict(flags="-mf16c"), - XOP = dict(flags="-mxop"), - FMA4 = dict(flags="-mfma4"), - FMA3 = dict(flags="-mfma"), - AVX2 = dict(flags="-mavx2"), - AVX512F = dict(flags="-mavx512f -mno-mmx"), - AVX512CD = dict(flags="-mavx512cd"), - AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), - AVX512_KNM = dict( - flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" - ), - AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), - AVX512_CLX = dict(flags="-mavx512vnni"), - AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), - AVX512_ICL = dict( - flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" - ), - AVX512_SPR = dict(flags="-mavx512fp16"), - ) - if on_x86 and self.cc_is_icc: return dict( - SSE = dict(flags="-msse"), - SSE2 = dict(flags="-msse2"), - SSE3 = dict(flags="-msse3"), - SSSE3 = dict(flags="-mssse3"), - SSE41 = dict(flags="-msse4.1"), - POPCNT = {}, - SSE42 = dict(flags="-msse4.2"), - AVX = dict(flags="-mavx"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support AVX2 or FMA3 independently - FMA3 = dict( - implies="F16C AVX2", flags="-march=core-avx2" - ), - AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="-march=common-avx512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="-march=common-avx512" - ), - AVX512_KNL = dict(flags="-xKNL"), - AVX512_KNM = dict(flags="-xKNM"), - AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), - AVX512_CLX = dict(flags="-xCASCADELAKE"), - AVX512_CNL = dict(flags="-xCANNONLAKE"), - AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_iccw: return dict( - SSE = dict(flags="/arch:SSE"), - SSE2 = dict(flags="/arch:SSE2"), - SSE3 = dict(flags="/arch:SSE3"), - SSSE3 = dict(flags="/arch:SSSE3"), - SSE41 = dict(flags="/arch:SSE4.1"), - POPCNT = {}, - SSE42 = dict(flags="/arch:SSE4.2"), - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(disable="Intel Compiler doesn't support it"), - FMA4 = dict(disable="Intel Compiler doesn't support it"), - # Intel Compiler doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:CORE-AVX2" - ), - AVX2 = dict( - implies="FMA3", flags="/arch:CORE-AVX2" - ), - # Intel Compiler doesn't support AVX512F or AVX512CD independently - AVX512F = dict( - implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" - ), - AVX512CD = dict( - implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" - ), - AVX512_KNL = dict(flags="/Qx:KNL"), - AVX512_KNM = dict(flags="/Qx:KNM"), - AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), - AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), - AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), - AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), - AVX512_SPR = dict(disable="Not supported yet") - ) - if on_x86 and self.cc_is_msvc: return dict( - SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, - SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, - SSE3 = {}, - SSSE3 = {}, - SSE41 = {}, - POPCNT = dict(headers="nmmintrin.h"), - SSE42 = {}, - AVX = dict(flags="/arch:AVX"), - F16C = {}, - XOP = dict(headers="ammintrin.h"), - FMA4 = dict(headers="ammintrin.h"), - # MSVC doesn't support FMA3 or AVX2 independently - FMA3 = dict( - implies="F16C AVX2", flags="/arch:AVX2" - ), - AVX2 = dict( - implies="F16C FMA3", flags="/arch:AVX2" - ), - # MSVC doesn't support AVX512F or AVX512CD independently, - # always generate instructions belong to (VL/VW/DQ) - AVX512F = dict( - implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" - ), - AVX512CD = dict( - implies="AVX512F AVX512_SKX", flags="/arch:AVX512" - ), - AVX512_KNL = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_KNM = dict( - disable="MSVC compiler doesn't support it" - ), - AVX512_SKX = dict(flags="/arch:AVX512"), - AVX512_CLX = {}, - AVX512_CNL = {}, - AVX512_ICL = {}, - AVX512_SPR= dict( - disable="MSVC compiler doesn't support it" - ) - ) - - on_power = self.cc_on_ppc64le or self.cc_on_ppc64 - if on_power: - partial = dict( - VSX = dict( - implies=("VSX2" if self.cc_on_ppc64le else ""), - flags="-mvsx" - ), - VSX2 = dict( - flags="-mcpu=power8", implies_detect=False - ), - VSX3 = dict( - flags="-mcpu=power9 -mtune=power9", implies_detect=False - ), - VSX4 = dict( - flags="-mcpu=power10 -mtune=power10", implies_detect=False - ) - ) - if self.cc_is_clang: - partial["VSX"]["flags"] = "-maltivec -mvsx" - partial["VSX2"]["flags"] = "-mcpu=power8" - partial["VSX3"]["flags"] = "-mcpu=power9" - partial["VSX4"]["flags"] = "-mcpu=power10" - - return partial - - on_zarch = self.cc_on_s390x - if on_zarch: - partial = dict( - VX = dict( - flags="-march=arch11 -mzvector" - ), - VXE = dict( - flags="-march=arch12", implies_detect=False - ), - VXE2 = dict( - flags="-march=arch13", implies_detect=False - ) - ) - - return partial - - - if self.cc_on_aarch64 and is_unix: return dict( - NEON = dict( - implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True - ), - NEON_FP16 = dict( - implies="NEON NEON_VFPV4 ASIMD", autovec=True - ), - NEON_VFPV4 = dict( - implies="NEON NEON_FP16 ASIMD", autovec=True - ), - ASIMD = dict( - implies="NEON NEON_FP16 NEON_VFPV4", autovec=True - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod" - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ), - ) - if self.cc_on_armhf and is_unix: return dict( - NEON = dict( - flags="-mfpu=neon" - ), - NEON_FP16 = dict( - flags="-mfpu=neon-fp16 -mfp16-format=ieee" - ), - NEON_VFPV4 = dict( - flags="-mfpu=neon-vfpv4", - ), - ASIMD = dict( - flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", - ), - ASIMDHP = dict( - flags="-march=armv8.2-a+fp16" - ), - ASIMDDP = dict( - flags="-march=armv8.2-a+dotprod", - ), - ASIMDFHM = dict( - flags="-march=armv8.2-a+fp16fml" - ) - ) - # TODO: ARM MSVC - return {} - - def __init__(self): - if self.conf_tmp_path is None: - import shutil - import tempfile - tmp = tempfile.mkdtemp() - def rm_temp(): - try: - shutil.rmtree(tmp) - except OSError: - pass - atexit.register(rm_temp) - self.conf_tmp_path = tmp - - if self.conf_cache_factors is None: - self.conf_cache_factors = [ - os.path.getmtime(__file__), - self.conf_nocache - ] - -class _Distutils: - """A helper class that provides a collection of fundamental methods - implemented in a top of Python and NumPy Distutils. - - The idea behind this class is to gather all methods that it may - need to override in case of reuse 'CCompilerOpt' in environment - different than of what NumPy has. - - Parameters - ---------- - ccompiler : `CCompiler` - The generate instance that returned from `distutils.ccompiler.new_compiler()`. - """ - def __init__(self, ccompiler): - self._ccompiler = ccompiler - - def dist_compile(self, sources, flags, ccompiler=None, **kwargs): - """Wrap CCompiler.compile()""" - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - flags = kwargs.pop("extra_postargs", []) + flags - if not ccompiler: - ccompiler = self._ccompiler - - return ccompiler.compile(sources, extra_postargs=flags, **kwargs) - - def dist_test(self, source, flags, macros=[]): - """Return True if 'CCompiler.compile()' able to compile - a source file with certain flags. - """ - assert(isinstance(source, str)) - from distutils.errors import CompileError - cc = self._ccompiler; - bk_spawn = getattr(cc, 'spawn', None) - if bk_spawn: - cc_type = getattr(self._ccompiler, "compiler_type", "") - if cc_type in ("msvc",): - setattr(cc, 'spawn', self._dist_test_spawn_paths) - else: - setattr(cc, 'spawn', self._dist_test_spawn) - test = False - try: - self.dist_compile( - [source], flags, macros=macros, output_dir=self.conf_tmp_path - ) - test = True - except CompileError as e: - self.dist_log(str(e), stderr=True) - if bk_spawn: - setattr(cc, 'spawn', bk_spawn) - return test - - def dist_info(self): - """ - Return a tuple containing info about (platform, compiler, extra_args), - required by the abstract class '_CCompiler' for discovering the - platform environment. This is also used as a cache factor in order - to detect any changes happening from outside. - """ - if hasattr(self, "_dist_info"): - return self._dist_info - - cc_type = getattr(self._ccompiler, "compiler_type", '') - if cc_type in ("intelem", "intelemw"): - platform = "x86_64" - elif cc_type in ("intel", "intelw", "intele"): - platform = "x86" - else: - from distutils.util import get_platform - platform = get_platform() - - cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) - if not cc_type or cc_type == "unix": - if hasattr(cc_info, "__iter__"): - compiler = cc_info[0] - else: - compiler = str(cc_info) - else: - compiler = cc_type - - if hasattr(cc_info, "__iter__") and len(cc_info) > 1: - extra_args = ' '.join(cc_info[1:]) - else: - extra_args = os.environ.get("CFLAGS", "") - extra_args += os.environ.get("CPPFLAGS", "") - - self._dist_info = (platform, compiler, extra_args) - return self._dist_info - - @staticmethod - def dist_error(*args): - """Raise a compiler error""" - from distutils.errors import CompileError - raise CompileError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_fatal(*args): - """Raise a distutils error""" - from distutils.errors import DistutilsError - raise DistutilsError(_Distutils._dist_str(*args)) - - @staticmethod - def dist_log(*args, stderr=False): - """Print a console message""" - from numpy.distutils import log - out = _Distutils._dist_str(*args) - if stderr: - log.warn(out) - else: - log.info(out) - - @staticmethod - def dist_load_module(name, path): - """Load a module from file, required by the abstract class '_Cache'.""" - from .misc_util import exec_mod_from_location - try: - return exec_mod_from_location(name, path) - except Exception as e: - _Distutils.dist_log(e, stderr=True) - return None - - @staticmethod - def _dist_str(*args): - """Return a string to print by log and errors.""" - def to_str(arg): - if not isinstance(arg, str) and hasattr(arg, '__iter__'): - ret = [] - for a in arg: - ret.append(to_str(a)) - return '('+ ' '.join(ret) + ')' - return str(arg) - - stack = inspect.stack()[2] - start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) - out = ' '.join([ - to_str(a) - for a in (*args,) - ]) - return start + out - - def _dist_test_spawn_paths(self, cmd, display=None): - """ - Fix msvc SDK ENV path same as distutils do - without it we get c1: fatal error C1356: unable to find mspdbcore.dll - """ - if not hasattr(self._ccompiler, "_paths"): - self._dist_test_spawn(cmd) - return - old_path = os.getenv("path") - try: - os.environ["path"] = self._ccompiler._paths - self._dist_test_spawn(cmd) - finally: - os.environ["path"] = old_path - - _dist_warn_regex = re.compile( - # intel and msvc compilers don't raise - # fatal errors when flags are wrong or unsupported - ".*(" - "warning D9002|" # msvc, it should be work with any language. - "invalid argument for option" # intel - ").*" - ) - @staticmethod - def _dist_test_spawn(cmd, display=None): - try: - o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - text=True) - if o and re.match(_Distutils._dist_warn_regex, o): - _Distutils.dist_error( - "Flags in command", cmd ,"aren't supported by the compiler" - ", output -> \n%s" % o - ) - except subprocess.CalledProcessError as exc: - o = exc.output - s = exc.returncode - except OSError as e: - o = e - s = 127 - else: - return None - _Distutils.dist_error( - "Command", cmd, "failed with exit status %d output -> \n%s" % ( - s, o - )) - -_share_cache = {} -class _Cache: - """An abstract class handles caching functionality, provides two - levels of caching, in-memory by share instances attributes among - each other and by store attributes into files. - - **Note**: - any attributes that start with ``_`` or ``conf_`` will be ignored. - - Parameters - ---------- - cache_path : str or None - The path of cache file, if None then cache in file will disabled. - - *factors : - The caching factors that need to utilize next to `conf_cache_factors`. - - Attributes - ---------- - cache_private : set - Hold the attributes that need be skipped from "in-memory cache". - - cache_infile : bool - Utilized during initializing this class, to determine if the cache was able - to loaded from the specified cache path in 'cache_path'. - """ - - # skip attributes from cache - _cache_ignore = re.compile("^(_|conf_)") - - def __init__(self, cache_path=None, *factors): - self.cache_me = {} - self.cache_private = set() - self.cache_infile = False - self._cache_path = None - - if self.conf_nocache: - self.dist_log("cache is disabled by `Config`") - return - - self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) - self._cache_path = cache_path - if cache_path: - if os.path.exists(cache_path): - self.dist_log("load cache from file ->", cache_path) - cache_mod = self.dist_load_module("cache", cache_path) - if not cache_mod: - self.dist_log( - "unable to load the cache file as a module", - stderr=True - ) - elif not hasattr(cache_mod, "hash") or \ - not hasattr(cache_mod, "data"): - self.dist_log("invalid cache file", stderr=True) - elif self._cache_hash == cache_mod.hash: - self.dist_log("hit the file cache") - for attr, val in cache_mod.data.items(): - setattr(self, attr, val) - self.cache_infile = True - else: - self.dist_log("miss the file cache") - - if not self.cache_infile: - other_cache = _share_cache.get(self._cache_hash) - if other_cache: - self.dist_log("hit the memory cache") - for attr, val in other_cache.__dict__.items(): - if attr in other_cache.cache_private or \ - re.match(self._cache_ignore, attr): - continue - setattr(self, attr, val) - - _share_cache[self._cache_hash] = self - atexit.register(self.cache_flush) - - def __del__(self): - for h, o in _share_cache.items(): - if o == self: - _share_cache.pop(h) - break - - def cache_flush(self): - """ - Force update the cache. - """ - if not self._cache_path: - return - # TODO: don't write if the cache doesn't change - self.dist_log("write cache to path ->", self._cache_path) - cdict = self.__dict__.copy() - for attr in self.__dict__.keys(): - if re.match(self._cache_ignore, attr): - cdict.pop(attr) - - d = os.path.dirname(self._cache_path) - if not os.path.exists(d): - os.makedirs(d) - - repr_dict = pprint.pformat(cdict, compact=True) - with open(self._cache_path, "w") as f: - f.write(textwrap.dedent("""\ - # AUTOGENERATED DON'T EDIT - # Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - hash = {} - data = \\ - """).format(self._cache_hash)) - f.write(repr_dict) - - def cache_hash(self, *factors): - # is there a built-in non-crypto hash? - # sdbm - chash = 0 - for f in factors: - for char in str(f): - chash = ord(char) + (chash << 6) + (chash << 16) - chash - chash &= 0xFFFFFFFF - return chash - - @staticmethod - def me(cb): - """ - A static method that can be treated as a decorator to - dynamically cache certain methods. - """ - def cache_wrap_me(self, *args, **kwargs): - # good for normal args - cache_key = str(( - cb.__name__, *args, *kwargs.keys(), *kwargs.values() - )) - if cache_key in self.cache_me: - return self.cache_me[cache_key] - ccb = cb(self, *args, **kwargs) - self.cache_me[cache_key] = ccb - return ccb - return cache_wrap_me - -class _CCompiler: - """A helper class for `CCompilerOpt` containing all utilities that - related to the fundamental compiler's functions. - - Attributes - ---------- - cc_on_x86 : bool - True when the target architecture is 32-bit x86 - cc_on_x64 : bool - True when the target architecture is 64-bit x86 - cc_on_ppc64 : bool - True when the target architecture is 64-bit big-endian powerpc - cc_on_ppc64le : bool - True when the target architecture is 64-bit litle-endian powerpc - cc_on_s390x : bool - True when the target architecture is IBM/ZARCH on linux - cc_on_armhf : bool - True when the target architecture is 32-bit ARMv7+ - cc_on_aarch64 : bool - True when the target architecture is 64-bit Armv8-a+ - cc_on_noarch : bool - True when the target architecture is unknown or not supported - cc_is_gcc : bool - True if the compiler is GNU or - if the compiler is unknown - cc_is_clang : bool - True if the compiler is Clang - cc_is_icc : bool - True if the compiler is Intel compiler (unix like) - cc_is_iccw : bool - True if the compiler is Intel compiler (msvc like) - cc_is_nocc : bool - True if the compiler isn't supported directly, - Note: that cause a fail-back to gcc - cc_has_debug : bool - True if the compiler has debug flags - cc_has_native : bool - True if the compiler has native flags - cc_noopt : bool - True if the compiler has definition 'DISABLE_OPT*', - or 'cc_on_noarch' is True - cc_march : str - The target architecture name, or "unknown" if - the architecture isn't supported - cc_name : str - The compiler name, or "unknown" if the compiler isn't supported - cc_flags : dict - Dictionary containing the initialized flags of `_Config.conf_cc_flags` - """ - def __init__(self): - if hasattr(self, "cc_is_cached"): - return - # attr regex compiler-expression - detect_arch = ( - ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), - ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), - ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__LITTLE_ENDIAN__)"), - ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", - "defined(__powerpc64__) && " - "defined(__BIG_ENDIAN__)"), - ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), - ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " - "defined(__ARM_ARCH_7A__)"), - ("cc_on_s390x", ".*s390x.*", ""), - # undefined platform - ("cc_on_noarch", "", ""), - ) - detect_compiler = ( - ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), - ("cc_is_clang", ".*clang.*", ""), - # intel msvc like - ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), - ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like - ("cc_is_msvc", ".*msvc.*", ""), - ("cc_is_fcc", ".*fcc.*", ""), - # undefined compiler will be treat it as gcc - ("cc_is_nocc", "", ""), - ) - detect_args = ( - ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), - ("cc_has_native", - ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), - # in case if the class run with -DNPY_DISABLE_OPTIMIZATION - ("cc_noopt", ".*DISABLE_OPT.*", ""), - ) - - dist_info = self.dist_info() - platform, compiler_info, extra_args = dist_info - # set False to all attrs - for section in (detect_arch, detect_compiler, detect_args): - for attr, rgex, cexpr in section: - setattr(self, attr, False) - - for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): - for attr, rgex, cexpr in detect: - if rgex and not re.match(rgex, searchin, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - break - - for attr, rgex, cexpr in detect_args: - if rgex and not re.match(rgex, extra_args, re.IGNORECASE): - continue - if cexpr and not self.cc_test_cexpr(cexpr): - continue - setattr(self, attr, True) - - if self.cc_on_noarch: - self.dist_log( - "unable to detect CPU architecture which lead to disable the optimization. " - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_noopt = True - - if self.conf_noopt: - self.dist_log("Optimization is disabled by the Config", stderr=True) - self.cc_noopt = True - - if self.cc_is_nocc: - """ - mingw can be treated as a gcc, and also xlc even if it based on clang, - but still has the same gcc optimization flags. - """ - self.dist_log( - "unable to detect compiler type which leads to treating it as GCC. " - "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." - f"check dist_info:<<\n{dist_info}\n>>", - stderr=True - ) - self.cc_is_gcc = True - - self.cc_march = "unknown" - for arch in ("x86", "x64", "ppc64", "ppc64le", - "armhf", "aarch64", "s390x"): - if getattr(self, "cc_on_" + arch): - self.cc_march = arch - break - - self.cc_name = "unknown" - for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): - if getattr(self, "cc_is_" + name): - self.cc_name = name - break - - self.cc_flags = {} - compiler_flags = self.conf_cc_flags.get(self.cc_name) - if compiler_flags is None: - self.dist_fatal( - "undefined flag for compiler '%s', " - "leave an empty dict instead" % self.cc_name - ) - for name, flags in compiler_flags.items(): - self.cc_flags[name] = nflags = [] - if flags: - assert(isinstance(flags, str)) - flags = flags.split() - for f in flags: - if self.cc_test_flags([f]): - nflags.append(f) - - self.cc_is_cached = True - - @_Cache.me - def cc_test_flags(self, flags): - """ - Returns True if the compiler supports 'flags'. - """ - assert(isinstance(flags, list)) - self.dist_log("testing flags", flags) - test_path = os.path.join(self.conf_check_path, "test_flags.c") - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def cc_test_cexpr(self, cexpr, flags=[]): - """ - Same as the above but supports compile-time expressions. - """ - self.dist_log("testing compiler expression", cexpr) - test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") - with open(test_path, "w") as fd: - fd.write(textwrap.dedent(f"""\ - #if !({cexpr}) - #error "unsupported expression" - #endif - int dummy; - """)) - test = self.dist_test(test_path, flags) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - def cc_normalize_flags(self, flags): - """ - Remove the conflicts that caused due gathering implied features flags. - - Parameters - ---------- - 'flags' list, compiler flags - flags should be sorted from the lowest to the highest interest. - - Returns - ------- - list, filtered from any conflicts. - - Examples - -------- - >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) - ['armv8.2-a+fp16+dotprod'] - - >>> self.cc_normalize_flags( - ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] - ) - ['-march=core-avx2'] - """ - assert(isinstance(flags, list)) - if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: - return self._cc_normalize_unix(flags) - - if self.cc_is_msvc or self.cc_is_iccw: - return self._cc_normalize_win(flags) - return flags - - _cc_normalize_unix_mrgx = re.compile( - # 1- to check the highest of - r"^(-mcpu=|-march=|-x[A-Z0-9\-])" - ) - _cc_normalize_unix_frgx = re.compile( - # 2- to remove any flags starts with - # -march, -mcpu, -x(INTEL) and '-m' without '=' - r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" - # exclude: - r"(?:-mzvector)" - ) - _cc_normalize_unix_krgx = re.compile( - # 3- keep only the highest of - r"^(-mfpu|-mtune)" - ) - _cc_normalize_arch_ver = re.compile( - r"[0-9.]" - ) - def _cc_normalize_unix(self, flags): - def ver_flags(f): - # arch ver subflag - # -march=armv8.2-a+fp16fml - tokens = f.split('+') - ver = float('0' + ''.join( - re.findall(self._cc_normalize_arch_ver, tokens[0]) - )) - return ver, tokens[0], tokens[1:] - - if len(flags) <= 1: - return flags - # get the highest matched flag - for i, cur_flag in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_unix_mrgx, cur_flag): - continue - lower_flags = flags[:-(i+1)] - upper_flags = flags[-i:] - filtered = list(filter( - self._cc_normalize_unix_frgx.search, lower_flags - )) - # gather subflags - ver, arch, subflags = ver_flags(cur_flag) - if ver > 0 and len(subflags) > 0: - for xflag in lower_flags: - xver, _, xsubflags = ver_flags(xflag) - if ver == xver: - subflags = xsubflags + subflags - cur_flag = arch + '+' + '+'.join(subflags) - - flags = filtered + [cur_flag] - if i > 0: - flags += upper_flags - break - - # to remove overridable flags - final_flags = [] - matched = set() - for f in reversed(flags): - match = re.match(self._cc_normalize_unix_krgx, f) - if not match: - pass - elif match[0] in matched: - continue - else: - matched.add(match[0]) - final_flags.insert(0, f) - return final_flags - - _cc_normalize_win_frgx = re.compile( - r"^(?!(/arch\:|/Qx\:))" - ) - _cc_normalize_win_mrgx = re.compile( - r"^(/arch|/Qx:)" - ) - def _cc_normalize_win(self, flags): - for i, f in enumerate(reversed(flags)): - if not re.match(self._cc_normalize_win_mrgx, f): - continue - i += 1 - return list(filter( - self._cc_normalize_win_frgx.search, flags[:-i] - )) + flags[-i:] - return flags - -class _Feature: - """A helper class for `CCompilerOpt` that managing CPU features. - - Attributes - ---------- - feature_supported : dict - Dictionary containing all CPU features that supported - by the platform, according to the specified values in attribute - `_Config.conf_features` and `_Config.conf_features_partial()` - - feature_min : set - The minimum support of CPU features, according to - the specified values in attribute `_Config.conf_min_features`. - """ - def __init__(self): - if hasattr(self, "feature_is_cached"): - return - self.feature_supported = pfeatures = self.conf_features_partial() - for feature_name in list(pfeatures.keys()): - feature = pfeatures[feature_name] - cfeature = self.conf_features[feature_name] - feature.update({ - k:v for k,v in cfeature.items() if k not in feature - }) - disabled = feature.get("disable") - if disabled is not None: - pfeatures.pop(feature_name) - self.dist_log( - "feature '%s' is disabled," % feature_name, - disabled, stderr=True - ) - continue - # list is used internally for these options - for option in ( - "implies", "group", "detect", "headers", "flags", "extra_checks" - ) : - oval = feature.get(option) - if isinstance(oval, str): - feature[option] = oval.split() - - self.feature_min = set() - min_f = self.conf_min_features.get(self.cc_march, "") - for F in min_f.upper().split(): - if F in self.feature_supported: - self.feature_min.add(F) - - self.feature_is_cached = True - - def feature_names(self, names=None, force_flags=None, macros=[]): - """ - Returns a set of CPU feature names that supported by platform and the **C** compiler. - - Parameters - ---------- - names : sequence or None, optional - Specify certain CPU features to test it against the **C** compiler. - if None(default), it will test all current supported features. - **Note**: feature names must be in upper-case. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during the test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert( - names is None or ( - not isinstance(names, str) and - hasattr(names, "__iter__") - ) - ) - assert(force_flags is None or isinstance(force_flags, list)) - if names is None: - names = self.feature_supported.keys() - supported_names = set() - for f in names: - if self.feature_is_supported( - f, force_flags=force_flags, macros=macros - ): - supported_names.add(f) - return supported_names - - def feature_is_exist(self, name): - """ - Returns True if a certain feature is exist and covered within - ``_Config.conf_features``. - - Parameters - ---------- - 'name': str - feature name in uppercase. - """ - assert(name.isupper()) - return name in self.conf_features - - def feature_sorted(self, names, reverse=False): - """ - Sort a list of CPU features ordered by the lowest interest. - - Parameters - ---------- - 'names': sequence - sequence of supported feature names in uppercase. - 'reverse': bool, optional - If true, the sorted features is reversed. (highest interest) - - Returns - ------- - list, sorted CPU features - """ - def sort_cb(k): - if isinstance(k, str): - return self.feature_supported[k]["interest"] - # multiple features - rank = max([self.feature_supported[f]["interest"] for f in k]) - # FIXME: that's not a safe way to increase the rank for - # multi targets - rank += len(k) -1 - return rank - return sorted(names, reverse=reverse, key=sort_cb) - - def feature_implies(self, names, keep_origins=False): - """ - Return a set of CPU features that implied by 'names' - - Parameters - ---------- - names : str or sequence of str - CPU feature name(s) in uppercase. - - keep_origins : bool - if False(default) then the returned set will not contain any - features from 'names'. This case happens only when two features - imply each other. - - Examples - -------- - >>> self.feature_implies("SSE3") - {'SSE', 'SSE2'} - >>> self.feature_implies("SSE2") - {'SSE'} - >>> self.feature_implies("SSE2", keep_origins=True) - # 'SSE2' found here since 'SSE' and 'SSE2' imply each other - {'SSE', 'SSE2'} - """ - def get_implies(name, _caller=set()): - implies = set() - d = self.feature_supported[name] - for i in d.get("implies", []): - implies.add(i) - if i in _caller: - # infinity recursive guard since - # features can imply each other - continue - _caller.add(name) - implies = implies.union(get_implies(i, _caller)) - return implies - - if isinstance(names, str): - implies = get_implies(names) - names = [names] - else: - assert(hasattr(names, "__iter__")) - implies = set() - for n in names: - implies = implies.union(get_implies(n)) - if not keep_origins: - implies.difference_update(names) - return implies - - def feature_implies_c(self, names): - """same as feature_implies() but combining 'names'""" - if isinstance(names, str): - names = set((names,)) - else: - names = set(names) - return names.union(self.feature_implies(names)) - - def feature_ahead(self, names): - """ - Return list of features in 'names' after remove any - implied features and keep the origins. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) - ["SSE41"] - # assume AVX2 and FMA3 implies each other and AVX2 - # is the highest interest - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2"] - # assume AVX2 and FMA3 don't implies each other - >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) - ["AVX2", "FMA3"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - implies = self.feature_implies(names, keep_origins=True) - ahead = [n for n in names if n not in implies] - if len(ahead) == 0: - # return the highest interested feature - # if all features imply each other - ahead = self.feature_sorted(names, reverse=True)[:1] - return ahead - - def feature_untied(self, names): - """ - same as 'feature_ahead()' but if both features implied each other - and keep the highest interest. - - Parameters - ---------- - 'names': sequence - sequence of CPU feature names in uppercase. - - Returns - ------- - list of CPU features sorted as-is 'names' - - Examples - -------- - >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) - ["SSE2", "SSE3", "SSE41"] - # assume AVX2 and FMA3 implies each other - >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) - ["SSE2", "SSE3", "SSE41", "AVX2"] - """ - assert( - not isinstance(names, str) - and hasattr(names, '__iter__') - ) - final = [] - for n in names: - implies = self.feature_implies(n) - tied = [ - nn for nn in final - if nn in implies and n in self.feature_implies(nn) - ] - if tied: - tied = self.feature_sorted(tied + [n]) - if n not in tied[1:]: - continue - final.remove(tied[:1][0]) - final.append(n) - return final - - def feature_get_til(self, names, keyisfalse): - """ - same as `feature_implies_c()` but stop collecting implied - features when feature's option that provided through - parameter 'keyisfalse' is False, also sorting the returned - features. - """ - def til(tnames): - # sort from highest to lowest interest then cut if "key" is False - tnames = self.feature_implies_c(tnames) - tnames = self.feature_sorted(tnames, reverse=True) - for i, n in enumerate(tnames): - if not self.feature_supported[n].get(keyisfalse, True): - tnames = tnames[:i+1] - break - return tnames - - if isinstance(names, str) or len(names) <= 1: - names = til(names) - # normalize the sort - names.reverse() - return names - - names = self.feature_ahead(names) - names = {t for n in names for t in til(n)} - return self.feature_sorted(names) - - def feature_detect(self, names): - """ - Return a list of CPU features that required to be detected - sorted from the lowest to highest interest. - """ - names = self.feature_get_til(names, "implies_detect") - detect = [] - for n in names: - d = self.feature_supported[n] - detect += d.get("detect", d.get("group", [n])) - return detect - - @_Cache.me - def feature_flags(self, names): - """ - Return a list of CPU features flags sorted from the lowest - to highest interest. - """ - names = self.feature_sorted(self.feature_implies_c(names)) - flags = [] - for n in names: - d = self.feature_supported[n] - f = d.get("flags", []) - if not f or not self.cc_test_flags(f): - continue - flags += f - return self.cc_normalize_flags(flags) - - @_Cache.me - def feature_test(self, name, force_flags=None, macros=[]): - """ - Test a certain CPU feature against the compiler through its own - check file. - - Parameters - ---------- - name : str - Supported CPU feature name. - - force_flags : list or None, optional - If None(default), the returned flags from `feature_flags()` - will be used. - - macros : list of tuples, optional - A list of C macro definitions. - """ - if force_flags is None: - force_flags = self.feature_flags(name) - - self.dist_log( - "testing feature '%s' with flags (%s)" % ( - name, ' '.join(force_flags) - )) - # Each CPU feature must have C source code contains at - # least one intrinsic or instruction related to this feature. - test_path = os.path.join( - self.conf_check_path, "cpu_%s.c" % name.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("feature test file is not exist", test_path) - - test = self.dist_test( - test_path, force_flags + self.cc_flags["werror"], macros=macros - ) - if not test: - self.dist_log("testing failed", stderr=True) - return test - - @_Cache.me - def feature_is_supported(self, name, force_flags=None, macros=[]): - """ - Check if a certain CPU feature is supported by the platform and compiler. - - Parameters - ---------- - name : str - CPU feature name in uppercase. - - force_flags : list or None, optional - If None(default), default compiler flags for every CPU feature will - be used during test. - - macros : list of tuples, optional - A list of C macro definitions. - """ - assert(name.isupper()) - assert(force_flags is None or isinstance(force_flags, list)) - - supported = name in self.feature_supported - if supported: - for impl in self.feature_implies(name): - if not self.feature_test(impl, force_flags, macros=macros): - return False - if not self.feature_test(name, force_flags, macros=macros): - return False - return supported - - @_Cache.me - def feature_can_autovec(self, name): - """ - check if the feature can be auto-vectorized by the compiler - """ - assert(isinstance(name, str)) - d = self.feature_supported[name] - can = d.get("autovec", None) - if can is None: - valid_flags = [ - self.cc_test_flags([f]) for f in d.get("flags", []) - ] - can = valid_flags and any(valid_flags) - return can - - @_Cache.me - def feature_extra_checks(self, name): - """ - Return a list of supported extra checks after testing them against - the compiler. - - Parameters - ---------- - names : str - CPU feature name in uppercase. - """ - assert isinstance(name, str) - d = self.feature_supported[name] - extra_checks = d.get("extra_checks", []) - if not extra_checks: - return [] - - self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) - flags = self.feature_flags(name) - available = [] - not_available = [] - for chk in extra_checks: - test_path = os.path.join( - self.conf_check_path, "extra_%s.c" % chk.lower() - ) - if not os.path.exists(test_path): - self.dist_fatal("extra check file does not exist", test_path) - - is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) - if is_supported: - available.append(chk) - else: - not_available.append(chk) - - if not_available: - self.dist_log("testing failed for checks", not_available, stderr=True) - return available - - - def feature_c_preprocessor(self, feature_name, tabs=0): - """ - Generate C preprocessor definitions and include headers of a CPU feature. - - Parameters - ---------- - 'feature_name': str - CPU feature name in uppercase. - 'tabs': int - if > 0, align the generated strings to the right depend on number of tabs. - - Returns - ------- - str, generated C preprocessor - - Examples - -------- - >>> self.feature_c_preprocessor("SSE3") - /** SSE3 **/ - #define NPY_HAVE_SSE3 1 - #include - """ - assert(feature_name.isupper()) - feature = self.feature_supported.get(feature_name) - assert(feature is not None) - - prepr = [ - "/** %s **/" % feature_name, - "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) - ] - prepr += [ - "#include <%s>" % h for h in feature.get("headers", []) - ] - - extra_defs = feature.get("group", []) - extra_defs += self.feature_extra_checks(feature_name) - for edef in extra_defs: - # Guard extra definitions in case of duplicate with - # another feature - prepr += [ - "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), - "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), - "#endif", - ] - - if tabs > 0: - prepr = [('\t'*tabs) + l for l in prepr] - return '\n'.join(prepr) - -class _Parse: - """A helper class that parsing main arguments of `CCompilerOpt`, - also parsing configuration statements in dispatch-able sources. - - Parameters - ---------- - cpu_baseline : str or None - minimal set of required CPU features or special options. - - cpu_dispatch : str or None - dispatched set of additional CPU features or special options. - - Special options can be: - - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - - **MAX**: Enables all supported CPU features by the Compiler and platform. - - **NATIVE**: Enables all CPU features that supported by the current machine. - - **NONE**: Enables nothing - - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. - NOTE: operand + is only added for nominal reason. - - NOTES: - - Case-insensitive among all CPU features and special options. - - Comma or space can be used as a separator. - - If the CPU feature is not supported by the user platform or compiler, - it will be skipped rather than raising a fatal error. - - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - - 'cpu_baseline' force enables implied features. - - Attributes - ---------- - parse_baseline_names : list - Final CPU baseline's feature names(sorted from low to high) - parse_baseline_flags : list - Compiler flags of baseline features - parse_dispatch_names : list - Final CPU dispatch-able feature names(sorted from low to high) - parse_target_groups : dict - Dictionary containing initialized target groups that configured - through class attribute `conf_target_groups`. - - The key is represent the group name and value is a tuple - contains three items : - - bool, True if group has the 'baseline' option. - - list, list of CPU features. - - list, list of extra compiler flags. - - """ - def __init__(self, cpu_baseline, cpu_dispatch): - self._parse_policies = dict( - # POLICY NAME, (HAVE, NOT HAVE, [DEB]) - KEEP_BASELINE = ( - None, self._parse_policy_not_keepbase, - [] - ), - KEEP_SORT = ( - self._parse_policy_keepsort, - self._parse_policy_not_keepsort, - [] - ), - MAXOPT = ( - self._parse_policy_maxopt, None, - [] - ), - WERROR = ( - self._parse_policy_werror, None, - [] - ), - AUTOVEC = ( - self._parse_policy_autovec, None, - ["MAXOPT"] - ) - ) - if hasattr(self, "parse_is_cached"): - return - - self.parse_baseline_names = [] - self.parse_baseline_flags = [] - self.parse_dispatch_names = [] - self.parse_target_groups = {} - - if self.cc_noopt: - # skip parsing baseline and dispatch args and keep parsing target groups - cpu_baseline = cpu_dispatch = None - - self.dist_log("check requested baseline") - if cpu_baseline is not None: - cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) - baseline_names = self.feature_names(cpu_baseline) - self.parse_baseline_flags = self.feature_flags(baseline_names) - self.parse_baseline_names = self.feature_sorted( - self.feature_implies_c(baseline_names) - ) - - self.dist_log("check requested dispatch-able features") - if cpu_dispatch is not None: - cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) - cpu_dispatch = { - f for f in cpu_dispatch_ - if f not in self.parse_baseline_names - } - conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) - self.parse_dispatch_names = self.feature_sorted( - self.feature_names(cpu_dispatch) - ) - if len(conflict_baseline) > 0: - self.dist_log( - "skip features", conflict_baseline, "since its part of baseline" - ) - - self.dist_log("initialize targets groups") - for group_name, tokens in self.conf_target_groups.items(): - self.dist_log("parse target group", group_name) - GROUP_NAME = group_name.upper() - if not tokens or not tokens.strip(): - # allow empty groups, useful in case if there's a need - # to disable certain group since '_parse_target_tokens()' - # requires at least one valid target - self.parse_target_groups[GROUP_NAME] = ( - False, [], [] - ) - continue - has_baseline, features, extra_flags = \ - self._parse_target_tokens(tokens) - self.parse_target_groups[GROUP_NAME] = ( - has_baseline, features, extra_flags - ) - - self.parse_is_cached = True - - def parse_targets(self, source): - """ - Fetch and parse configuration statements that required for - defining the targeted CPU features, statements should be declared - in the top of source in between **C** comment and start - with a special mark **@targets**. - - Configuration statements are sort of keywords representing - CPU features names, group of statements and policies, combined - together to determine the required optimization. - - Parameters - ---------- - source : str - the path of **C** source file. - - Returns - ------- - - bool, True if group has the 'baseline' option - - list, list of CPU features - - list, list of extra compiler flags - """ - self.dist_log("looking for '@targets' inside -> ", source) - # get lines between /*@targets and */ - with open(source) as fd: - tokens = "" - max_to_reach = 1000 # good enough, isn't? - start_with = "@targets" - start_pos = -1 - end_with = "*/" - end_pos = -1 - for current_line, line in enumerate(fd): - if current_line == max_to_reach: - self.dist_fatal("reached the max of lines") - break - if start_pos == -1: - start_pos = line.find(start_with) - if start_pos == -1: - continue - start_pos += len(start_with) - tokens += line - end_pos = line.find(end_with) - if end_pos != -1: - end_pos += len(tokens) - len(line) - break - - if start_pos == -1: - self.dist_fatal("expected to find '%s' within a C comment" % start_with) - if end_pos == -1: - self.dist_fatal("expected to end with '%s'" % end_with) - - tokens = tokens[start_pos:end_pos] - return self._parse_target_tokens(tokens) - - _parse_regex_arg = re.compile(r'\s|,|([+-])') - def _parse_arg_features(self, arg_name, req_features): - if not isinstance(req_features, str): - self.dist_fatal("expected a string in '%s'" % arg_name) - - final_features = set() - # space and comma can be used as a separator - tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) - append = True # append is the default - for tok in tokens: - if tok[0] in ("#", "$"): - self.dist_fatal( - arg_name, "target groups and policies " - "aren't allowed from arguments, " - "only from dispatch-able sources" - ) - if tok == '+': - append = True - continue - if tok == '-': - append = False - continue - - TOK = tok.upper() # we use upper-case internally - features_to = set() - if TOK == "NONE": - pass - elif TOK == "NATIVE": - native = self.cc_flags["native"] - if not native: - self.dist_fatal(arg_name, - "native option isn't supported by the compiler" - ) - features_to = self.feature_names( - force_flags=native, macros=[("DETECT_FEATURES", 1)] - ) - elif TOK == "MAX": - features_to = self.feature_supported.keys() - elif TOK == "MIN": - features_to = self.feature_min - else: - if TOK in self.feature_supported: - features_to.add(TOK) - else: - if not self.feature_is_exist(TOK): - self.dist_fatal(arg_name, - ", '%s' isn't a known feature or option" % tok - ) - if append: - final_features = final_features.union(features_to) - else: - final_features = final_features.difference(features_to) - - append = True # back to default - - return final_features - - _parse_regex_target = re.compile(r'\s|[*,/]|([()])') - def _parse_target_tokens(self, tokens): - assert(isinstance(tokens, str)) - final_targets = [] # to keep it sorted as specified - extra_flags = [] - has_baseline = False - - skipped = set() - policies = set() - multi_target = None - - tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) - if not tokens: - self.dist_fatal("expected one token at least") - - for tok in tokens: - TOK = tok.upper() - ch = tok[0] - if ch in ('+', '-'): - self.dist_fatal( - "+/- are 'not' allowed from target's groups or @targets, " - "only from cpu_baseline and cpu_dispatch parms" - ) - elif ch == '$': - if multi_target is not None: - self.dist_fatal( - "policies aren't allowed inside multi-target '()'" - ", only CPU features" - ) - policies.add(self._parse_token_policy(TOK)) - elif ch == '#': - if multi_target is not None: - self.dist_fatal( - "target groups aren't allowed inside multi-target '()'" - ", only CPU features" - ) - has_baseline, final_targets, extra_flags = \ - self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) - elif ch == '(': - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - multi_target = set() - elif ch == ')': - if multi_target is None: - self.dist_fatal("multi-target opener '(' wasn't found") - targets = self._parse_multi_target(multi_target) - if targets is None: - skipped.add(tuple(multi_target)) - else: - if len(targets) == 1: - targets = targets[0] - if targets and targets not in final_targets: - final_targets.append(targets) - multi_target = None # back to default - else: - if TOK == "BASELINE": - if multi_target is not None: - self.dist_fatal("baseline isn't allowed inside multi-target '()'") - has_baseline = True - continue - - if multi_target is not None: - multi_target.add(TOK) - continue - - if not self.feature_is_exist(TOK): - self.dist_fatal("invalid target name '%s'" % TOK) - - is_enabled = ( - TOK in self.parse_baseline_names or - TOK in self.parse_dispatch_names - ) - if is_enabled: - if TOK not in final_targets: - final_targets.append(TOK) - continue - - skipped.add(TOK) - - if multi_target is not None: - self.dist_fatal("unclosed multi-target, missing ')'") - if skipped: - self.dist_log( - "skip targets", skipped, - "not part of baseline or dispatch-able features" - ) - - final_targets = self.feature_untied(final_targets) - - # add polices dependencies - for p in list(policies): - _, _, deps = self._parse_policies[p] - for d in deps: - if d in policies: - continue - self.dist_log( - "policy '%s' force enables '%s'" % ( - p, d - )) - policies.add(d) - - # release policies filtrations - for p, (have, nhave, _) in self._parse_policies.items(): - func = None - if p in policies: - func = have - self.dist_log("policy '%s' is ON" % p) - else: - func = nhave - if not func: - continue - has_baseline, final_targets, extra_flags = func( - has_baseline, final_targets, extra_flags - ) - - return has_baseline, final_targets, extra_flags - - def _parse_token_policy(self, token): - """validate policy token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'$' must stuck in the begin of policy name") - token = token[1:] - if token not in self._parse_policies: - self.dist_fatal( - "'%s' is an invalid policy name, available policies are" % token, - self._parse_policies.keys() - ) - return token - - def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): - """validate group token""" - if len(token) <= 1 or token[-1:] == token[0]: - self.dist_fatal("'#' must stuck in the begin of group name") - - token = token[1:] - ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( - token, (False, None, []) - ) - if gtargets is None: - self.dist_fatal( - "'%s' is an invalid target group name, " % token + \ - "available target groups are", - self.parse_target_groups.keys() - ) - if ghas_baseline: - has_baseline = True - # always keep sorting as specified - final_targets += [f for f in gtargets if f not in final_targets] - extra_flags += [f for f in gextra_flags if f not in extra_flags] - return has_baseline, final_targets, extra_flags - - def _parse_multi_target(self, targets): - """validate multi targets that defined between parentheses()""" - # remove any implied features and keep the origins - if not targets: - self.dist_fatal("empty multi-target '()'") - if not all([ - self.feature_is_exist(tar) for tar in targets - ]) : - self.dist_fatal("invalid target name in multi-target", targets) - if not all([ - ( - tar in self.parse_baseline_names or - tar in self.parse_dispatch_names - ) - for tar in targets - ]) : - return None - targets = self.feature_ahead(targets) - if not targets: - return None - # force sort multi targets, so it can be comparable - targets = self.feature_sorted(targets) - targets = tuple(targets) # hashable - return targets - - def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): - """skip all baseline features""" - skipped = [] - for tar in final_targets[:]: - is_base = False - if isinstance(tar, str): - is_base = tar in self.parse_baseline_names - else: - # multi targets - is_base = all([ - f in self.parse_baseline_names - for f in tar - ]) - if is_base: - skipped.append(tar) - final_targets.remove(tar) - - if skipped: - self.dist_log("skip baseline features", skipped) - - return has_baseline, final_targets, extra_flags - - def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): - """leave a notice that $keep_sort is on""" - self.dist_log( - "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" - "are 'not' sorted depend on the highest interest but" - "as specified in the dispatch-able source or the extra group" - ) - return has_baseline, final_targets, extra_flags - - def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): - """sorted depend on the highest interest""" - final_targets = self.feature_sorted(final_targets, reverse=True) - return has_baseline, final_targets, extra_flags - - def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): - """append the compiler optimization flags""" - if self.cc_has_debug: - self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") - elif self.cc_noopt: - self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") - else: - flags = self.cc_flags["opt"] - if not flags: - self.dist_log( - "current compiler doesn't support optimization flags, " - "policy 'maxopt' is skipped", stderr=True - ) - else: - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): - """force warnings to treated as errors""" - flags = self.cc_flags["werror"] - if not flags: - self.dist_log( - "current compiler doesn't support werror flags, " - "warnings will 'not' treated as errors", stderr=True - ) - else: - self.dist_log("compiler warnings are treated as errors") - extra_flags += flags - return has_baseline, final_targets, extra_flags - - def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): - """skip features that has no auto-vectorized support by compiler""" - skipped = [] - for tar in final_targets[:]: - if isinstance(tar, str): - can = self.feature_can_autovec(tar) - else: # multiple target - can = all([ - self.feature_can_autovec(t) - for t in tar - ]) - if not can: - final_targets.remove(tar) - skipped.append(tar) - - if skipped: - self.dist_log("skip non auto-vectorized features", skipped) - - return has_baseline, final_targets, extra_flags - -class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): - """ - A helper class for `CCompiler` aims to provide extra build options - to effectively control of compiler optimizations that are directly - related to CPU features. - """ - def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): - _Config.__init__(self) - _Distutils.__init__(self, ccompiler) - _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) - _CCompiler.__init__(self) - _Feature.__init__(self) - if not self.cc_noopt and self.cc_has_native: - self.dist_log( - "native flag is specified through environment variables. " - "force cpu-baseline='native'" - ) - cpu_baseline = "native" - _Parse.__init__(self, cpu_baseline, cpu_dispatch) - # keep the requested features untouched, need it later for report - # and trace purposes - self._requested_baseline = cpu_baseline - self._requested_dispatch = cpu_dispatch - # key is the dispatch-able source and value is a tuple - # contains two items (has_baseline[boolean], dispatched-features[list]) - self.sources_status = getattr(self, "sources_status", {}) - # every instance should has a separate one - self.cache_private.add("sources_status") - # set it at the end to make sure the cache writing was done after init - # this class - self.hit_cache = hasattr(self, "hit_cache") - - def is_cached(self): - """ - Returns True if the class loaded from the cache file - """ - return self.cache_infile and self.hit_cache - - def cpu_baseline_flags(self): - """ - Returns a list of final CPU baseline compiler flags - """ - return self.parse_baseline_flags - - def cpu_baseline_names(self): - """ - return a list of final CPU baseline feature names - """ - return self.parse_baseline_names - - def cpu_dispatch_names(self): - """ - return a list of final CPU dispatch feature names - """ - return self.parse_dispatch_names - - def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): - """ - Compile one or more dispatch-able sources and generates object files, - also generates abstract C config headers and macros that - used later for the final runtime dispatching process. - - The mechanism behind it is to takes each source file that specified - in 'sources' and branching it into several files depend on - special configuration statements that must be declared in the - top of each source which contains targeted CPU features, - then it compiles every branched source with the proper compiler flags. - - Parameters - ---------- - sources : list - Must be a list of dispatch-able sources file paths, - and configuration statements must be declared inside - each file. - - src_dir : str - Path of parent directory for the generated headers and wrapped sources. - If None(default) the files will generated in-place. - - ccompiler : CCompiler - Distutils `CCompiler` instance to be used for compilation. - If None (default), the provided instance during the initialization - will be used instead. - - **kwargs : any - Arguments to pass on to the `CCompiler.compile()` - - Returns - ------- - list : generated object files - - Raises - ------ - CompileError - Raises by `CCompiler.compile()` on compiling failure. - DistutilsError - Some errors during checking the sanity of configuration statements. - - See Also - -------- - parse_targets : - Parsing the configuration statements of dispatch-able sources. - """ - to_compile = {} - baseline_flags = self.cpu_baseline_flags() - include_dirs = kwargs.setdefault("include_dirs", []) - - for src in sources: - output_dir = os.path.dirname(src) - if src_dir: - if not output_dir.startswith(src_dir): - output_dir = os.path.join(src_dir, output_dir) - if output_dir not in include_dirs: - # To allow including the generated config header(*.dispatch.h) - # by the dispatch-able sources - include_dirs.append(output_dir) - - has_baseline, targets, extra_flags = self.parse_targets(src) - nochange = self._generate_config(output_dir, src, targets, has_baseline) - for tar in targets: - tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) - flags = tuple(extra_flags + self.feature_flags(tar)) - to_compile.setdefault(flags, []).append(tar_src) - - if has_baseline: - flags = tuple(extra_flags + baseline_flags) - to_compile.setdefault(flags, []).append(src) - - self.sources_status[src] = (has_baseline, targets) - - # For these reasons, the sources are compiled in a separate loop: - # - Gathering all sources with the same flags to benefit from - # the parallel compiling as much as possible. - # - To generate all config headers of the dispatchable sources, - # before the compilation in case if there are dependency relationships - # among them. - objects = [] - for flags, srcs in to_compile.items(): - objects += self.dist_compile( - srcs, list(flags), ccompiler=ccompiler, **kwargs - ) - return objects - - def generate_dispatch_header(self, header_path): - """ - Generate the dispatch header which contains the #definitions and headers - for platform-specific instruction-sets for the enabled CPU baseline and - dispatch-able features. - - Its highly recommended to take a look at the generated header - also the generated source files via `try_dispatch()` - in order to get the full picture. - """ - self.dist_log("generate CPU dispatch header: (%s)" % header_path) - - baseline_names = self.cpu_baseline_names() - dispatch_names = self.cpu_dispatch_names() - baseline_len = len(baseline_names) - dispatch_len = len(dispatch_names) - - header_dir = os.path.dirname(header_path) - if not os.path.exists(header_dir): - self.dist_log( - f"dispatch header dir {header_dir} does not exist, creating it", - stderr=True - ) - os.makedirs(header_dir) - - with open(header_path, 'w') as f: - baseline_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in baseline_names - ]) - dispatch_calls = ' \\\n'.join([ - ( - "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" - ) % (self.conf_c_prefix, f) - for f in dispatch_names - ]) - f.write(textwrap.dedent("""\ - /* - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #define {pfx}WITH_CPU_BASELINE "{baseline_str}" - #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" - #define {pfx}WITH_CPU_BASELINE_N {baseline_len} - #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} - #define {pfx}WITH_CPU_EXPAND_(X) X - #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ - {baseline_calls} - #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), - dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, - dispatch_len=dispatch_len, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls - )) - baseline_pre = '' - for name in baseline_names: - baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' - - dispatch_pre = '' - for name in dispatch_names: - dispatch_pre += textwrap.dedent("""\ - #ifdef {pfx}CPU_TARGET_{name} - {pre} - #endif /*{pfx}CPU_TARGET_{name}*/ - """).format( - pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( - name, tabs=1 - )) - - f.write(textwrap.dedent("""\ - /******* baseline features *******/ - {baseline_pre} - /******* dispatch features *******/ - {dispatch_pre} - """).format( - pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, - dispatch_pre=dispatch_pre - )) - - def report(self, full=False): - report = [] - platform_rows = [] - baseline_rows = [] - dispatch_rows = [] - report.append(("Platform", platform_rows)) - report.append(("", "")) - report.append(("CPU baseline", baseline_rows)) - report.append(("", "")) - report.append(("CPU dispatch", dispatch_rows)) - - ########## platform ########## - platform_rows.append(("Architecture", ( - "unsupported" if self.cc_on_noarch else self.cc_march) - )) - platform_rows.append(("Compiler", ( - "unix-like" if self.cc_is_nocc else self.cc_name) - )) - ########## baseline ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - baseline_rows.append(("Requested", repr(self._requested_baseline))) - - baseline_names = self.cpu_baseline_names() - baseline_rows.append(( - "Enabled", (' '.join(baseline_names) if baseline_names else "none") - )) - baseline_flags = self.cpu_baseline_flags() - baseline_rows.append(( - "Flags", (' '.join(baseline_flags) if baseline_flags else "none") - )) - extra_checks = [] - for name in baseline_names: - extra_checks += self.feature_extra_checks(name) - baseline_rows.append(( - "Extra checks", (' '.join(extra_checks) if extra_checks else "none") - )) - - ########## dispatch ########## - if self.cc_noopt: - baseline_rows.append(("Requested", "optimization disabled")) - else: - dispatch_rows.append(("Requested", repr(self._requested_dispatch))) - - dispatch_names = self.cpu_dispatch_names() - dispatch_rows.append(( - "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") - )) - ########## Generated ########## - # TODO: - # - collect object names from 'try_dispatch()' - # then get size of each object and printed - # - give more details about the features that not - # generated due compiler support - # - find a better output's design. - # - target_sources = {} - for source, (_, targets) in self.sources_status.items(): - for tar in targets: - target_sources.setdefault(tar, []).append(source) - - if not full or not target_sources: - generated = "" - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - generated += name + "[%d] " % len(sources) - dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) - else: - dispatch_rows.append(("Generated", '')) - for tar in self.feature_sorted(target_sources): - sources = target_sources[tar] - pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) - flags = ' '.join(self.feature_flags(tar)) - implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) - detect = ' '.join(self.feature_detect(tar)) - extra_checks = [] - for name in ((tar,) if isinstance(tar, str) else tar): - extra_checks += self.feature_extra_checks(name) - extra_checks = (' '.join(extra_checks) if extra_checks else "none") - - dispatch_rows.append(('', '')) - dispatch_rows.append((pretty_name, implies)) - dispatch_rows.append(("Flags", flags)) - dispatch_rows.append(("Extra checks", extra_checks)) - dispatch_rows.append(("Detect", detect)) - for src in sources: - dispatch_rows.append(("", src)) - - ############################### - # TODO: add support for 'markdown' format - text = [] - secs_len = [len(secs) for secs, _ in report] - cols_len = [len(col) for _, rows in report for col, _ in rows] - tab = ' ' * 2 - pad = max(max(secs_len), max(cols_len)) - for sec, rows in report: - if not sec: - text.append("") # empty line - continue - sec += ' ' * (pad - len(sec)) - text.append(sec + tab + ': ') - for col, val in rows: - col += ' ' * (pad - len(col)) - text.append(tab + col + ': ' + val) - - return '\n'.join(text) - - def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): - assert(isinstance(target, (str, tuple))) - if isinstance(target, str): - ext_name = target_name = target - else: - # multi-target - ext_name = '.'.join(target) - target_name = '__'.join(target) - - wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) - wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) - if nochange and os.path.exists(wrap_path): - return wrap_path - - self.dist_log("wrap dispatch-able target -> ", wrap_path) - # sorting for readability - features = self.feature_sorted(self.feature_implies_c(target)) - target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ - target_defs = [target_join + f for f in features] - target_defs = '\n'.join(target_defs) - - with open(wrap_path, "w") as fd: - fd.write(textwrap.dedent("""\ - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator \ - (distutils/ccompiler_opt.py) - */ - #define {pfx}CPU_TARGET_MODE - #define {pfx}CPU_TARGET_CURRENT {target_name} - {target_defs} - #include "{path}" - """).format( - pfx=self.conf_c_prefix_, target_name=target_name, - path=os.path.abspath(dispatch_src), target_defs=target_defs - )) - return wrap_path - - def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): - config_path = os.path.basename(dispatch_src) - config_path = os.path.splitext(config_path)[0] + '.h' - config_path = os.path.join(output_dir, config_path) - # check if targets didn't change to avoid recompiling - cache_hash = self.cache_hash(targets, has_baseline) - try: - with open(config_path) as f: - last_hash = f.readline().split("cache_hash:") - if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: - return True - except OSError: - pass - - os.makedirs(os.path.dirname(config_path), exist_ok=True) - - self.dist_log("generate dispatched config -> ", config_path) - dispatch_calls = [] - for tar in targets: - if isinstance(tar, str): - target_name = tar - else: # multi target - target_name = '__'.join([t for t in tar]) - req_detect = self.feature_detect(tar) - req_detect = '&&'.join([ - "CHK(%s)" % f for f in req_detect - ]) - dispatch_calls.append( - "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( - self.conf_c_prefix_, req_detect, target_name - )) - dispatch_calls = ' \\\n'.join(dispatch_calls) - - if has_baseline: - baseline_calls = ( - "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" - ) % self.conf_c_prefix_ - else: - baseline_calls = '' - - with open(config_path, "w") as fd: - fd.write(textwrap.dedent("""\ - // cache_hash:{cache_hash} - /** - * AUTOGENERATED DON'T EDIT - * Please make changes to the code generator (distutils/ccompiler_opt.py) - */ - #ifndef {pfx}CPU_DISPATCH_EXPAND_ - #define {pfx}CPU_DISPATCH_EXPAND_(X) X - #endif - #undef {pfx}CPU_DISPATCH_BASELINE_CALL - #undef {pfx}CPU_DISPATCH_CALL - #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ - {baseline_calls} - #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ - {dispatch_calls} - """).format( - pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, - dispatch_calls=dispatch_calls, cache_hash=cache_hash - )) - return False - -def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): - """ - Create a new instance of 'CCompilerOpt' and generate the dispatch header - which contains the #definitions and headers of platform-specific instruction-sets for - the enabled CPU baseline and dispatch-able features. - - Parameters - ---------- - compiler : CCompiler instance - dispatch_hpath : str - path of the dispatch header - - **kwargs: passed as-is to `CCompilerOpt(...)` - Returns - ------- - new instance of CCompilerOpt - """ - opt = CCompilerOpt(compiler, **kwargs) - if not os.path.exists(dispatch_hpath) or not opt.is_cached(): - opt.generate_dispatch_header(dispatch_hpath) - return opt diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py deleted file mode 100644 index 3ba501de03b6..000000000000 --- a/numpy/distutils/command/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -"""distutils.command - -Package containing implementation of all the standard Distutils -commands. - -""" -def test_na_writable_attributes_deletion(): - a = np.NA(2) - attr = ['payload', 'dtype'] - for s in attr: - assert_raises(AttributeError, delattr, a, s) - - -__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $" - -distutils_all = [ #'build_py', - 'clean', - 'install_clib', - 'install_scripts', - 'bdist', - 'bdist_dumb', - 'bdist_wininst', - ] - -__import__('distutils.command', globals(), locals(), distutils_all) - -__all__ = ['build', - 'config_compiler', - 'config', - 'build_src', - 'build_py', - 'build_ext', - 'build_clib', - 'build_scripts', - 'install', - 'install_data', - 'install_headers', - 'install_lib', - 'bdist_rpm', - 'sdist', - ] + distutils_all diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py deleted file mode 100644 index b72d0cab1a7d..000000000000 --- a/numpy/distutils/command/autodist.py +++ /dev/null @@ -1,148 +0,0 @@ -"""This module implements additional tests ala autoconf which can be useful. - -""" -import textwrap - -# We put them here since they could be easily reused outside numpy.distutils - -def check_inline(cmd): - """Return the inline identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - #ifndef __cplusplus - static %(inline)s int static_func (void) - { - return 0; - } - %(inline)s int nostatic_func (void) - { - return 0; - } - #endif""") - - for kw in ['inline', '__inline__', '__inline']: - st = cmd.try_compile(body % {'inline': kw}, None, None) - if st: - return kw - - return '' - - -def check_restrict(cmd): - """Return the restrict identifier (may be empty).""" - cmd._check_compiler() - body = textwrap.dedent(""" - static int static_func (char * %(restrict)s a) - { - return 0; - } - """) - - for kw in ['restrict', '__restrict__', '__restrict']: - st = cmd.try_compile(body % {'restrict': kw}, None, None) - if st: - return kw - - return '' - - -def check_compiler_gcc(cmd): - """Check if the compiler is GCC.""" - - cmd._check_compiler() - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) - #error gcc required - #endif - return 0; - } - """) - return cmd.try_compile(body, None, None) - - -def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0): - """ - Check that the gcc version is at least the specified version.""" - - cmd._check_compiler() - version = '.'.join([str(major), str(minor), str(patchlevel)]) - body = textwrap.dedent(""" - int - main() - { - #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\ - (__GNUC_MINOR__ < %(minor)d) || \\ - (__GNUC_PATCHLEVEL__ < %(patchlevel)d) - #error gcc >= %(version)s required - #endif - return 0; - } - """) - kw = {'version': version, 'major': major, 'minor': minor, - 'patchlevel': patchlevel} - - return cmd.try_compile(body % kw, None, None) - - -def check_gcc_function_attribute(cmd, attribute, name): - """Return True if the given function attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s %s(void* unused) - { - return 0; - } - - int - main() - { - return 0; - } - """) % (attribute, name) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, - include): - """Return True if the given function attribute is supported with - intrinsics.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #include<%s> - int %s %s(void) - { - %s; - return 0; - } - - int - main() - { - return 0; - } - """) % (include, attribute, name, code) - return cmd.try_compile(body, None, None) != 0 - - -def check_gcc_variable_attribute(cmd, attribute): - """Return True if the given variable attribute is supported.""" - cmd._check_compiler() - body = textwrap.dedent(""" - #pragma GCC diagnostic error "-Wattributes" - #pragma clang diagnostic error "-Wattributes" - - int %s foo; - - int - main() - { - return 0; - } - """) % (attribute, ) - return cmd.try_compile(body, None, None) != 0 diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py deleted file mode 100644 index 682e7a8eb8e2..000000000000 --- a/numpy/distutils/command/bdist_rpm.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import sys -if 'setuptools' in sys.modules: - from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm -else: - from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm - -class bdist_rpm(old_bdist_rpm): - - def _make_spec_file(self): - spec_file = old_bdist_rpm._make_spec_file(self) - - # Replace hardcoded setup.py script name - # with the real setup script name. - setup_py = os.path.basename(sys.argv[0]) - if setup_py == 'setup.py': - return spec_file - new_spec_file = [] - for line in spec_file: - line = line.replace('setup.py', setup_py) - new_spec_file.append(line) - return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py deleted file mode 100644 index 80830d559c61..000000000000 --- a/numpy/distutils/command/build.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import sys -from distutils.command.build import build as old_build -from distutils.util import get_platform -from numpy.distutils.command.config_compiler import show_fortran_compilers - -class build(old_build): - - sub_commands = [('config_cc', lambda *args: True), - ('config_fc', lambda *args: True), - ('build_src', old_build.has_ext_modules), - ] + old_build.sub_commands - - user_options = old_build.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - def initialize_options(self): - old_build.initialize_options(self) - self.fcompiler = None - self.warn_error = False - self.cpu_baseline = "min" - self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default - self.disable_optimization = False - """ - the '_simd' module is a very large. Adding more dispatched features - will increase binary size and compile time. By default we minimize - the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), - NOTE: any specified features will be ignored if they're: - - part of the baseline(--cpu-baseline) - - not part of dispatch-able features(--cpu-dispatch) - - not supported by compiler or platform - """ - self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \ - "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" - - def finalize_options(self): - build_scripts = self.build_scripts - old_build.finalize_options(self) - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - if build_scripts is None: - self.build_scripts = os.path.join(self.build_base, - 'scripts' + plat_specifier) - - def run(self): - old_build.run(self) diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py deleted file mode 100644 index 26e2f4ed0f4a..000000000000 --- a/numpy/distutils/command/build_clib.py +++ /dev/null @@ -1,469 +0,0 @@ -""" Modified version of build_clib that handles fortran source files. -""" -import os -from glob import glob -import shutil -from distutils.command.build_clib import build_clib as old_build_clib -from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError - -from numpy.distutils import log -from distutils.dep_util import newer_group -from numpy.distutils.misc_util import ( - filter_sources, get_lib_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.ccompiler_opt import new_ccompiler_opt - -# Fix Python distutils bug sf #1718574: -_l = old_build_clib.user_options -for _i in range(len(_l)): - if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] -# - - -class build_clib(old_build_clib): - - description = "build C/C++/F libraries used by Python extensions" - - user_options = old_build_clib.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('inplace', 'i', 'Build in-place'), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ] - - boolean_options = old_build_clib.boolean_options + \ - ['inplace', 'warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_clib.initialize_options(self) - self.fcompiler = None - self.inplace = 0 - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - old_build_clib.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization') - ) - - def have_f_sources(self): - for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources', [])): - return True - return False - - def have_cxx_sources(self): - for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources', [])): - return True - return False - - def run(self): - if not self.libraries: - return - - # Make sure that library sources are complete. - languages = [] - - # Make sure that extension sources are complete. - self.run_command('build_src') - - for (lib_name, build_info) in self.libraries: - l = build_info.get('language', None) - if l and l not in languages: - languages.append(l) - - from distutils.ccompiler import new_compiler - self.compiler = new_compiler(compiler=self.compiler, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution, - need_cxx=self.have_cxx_sources()) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - libraries = self.libraries - self.libraries = None - self.compiler.customize_cmd(self) - self.libraries = libraries - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### CLIB COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - if self.have_f_sources(): - from numpy.distutils.fcompiler import new_fcompiler - self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) - if self._f_compiler is not None: - self._f_compiler.customize(self.distribution) - - libraries = self.libraries - self.libraries = None - self._f_compiler.customize_cmd(self) - self.libraries = libraries - - self._f_compiler.show_customization() - else: - self._f_compiler = None - - self.build_libraries(self.libraries) - - if self.inplace: - for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) - source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) - self.mkpath(l.target_dir) - shutil.copy(source, target) - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib in self.libraries: - filenames.extend(get_lib_source_files(lib)) - return filenames - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - self.build_a_library(build_info, lib_name, libraries) - - def assemble_flags(self, in_flags): - """ Assemble flags from flag list - - Parameters - ---------- - in_flags : None or sequence - None corresponds to empty list. Sequence elements can be strings - or callables that return lists of strings. Callable takes `self` as - single parameter. - - Returns - ------- - out_flags : list - """ - if in_flags is None: - return [] - out_flags = [] - for in_flag in in_flags: - if callable(in_flag): - out_flags += in_flag(self) - else: - out_flags.append(in_flag) - return out_flags - - def build_a_library(self, build_info, lib_name, libraries): - # default compilers - compiler = self.compiler - fcompiler = self._f_compiler - - sources = build_info.get('sources') - if sources is None or not is_sequence(sources): - raise DistutilsSetupError(("in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % lib_name) - sources = list(sources) - - c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) - requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c') == 'f90' - - # save source type information so that build_ext can use it. - source_languages = [] - if c_sources: - source_languages.append('c') - if cxx_sources: - source_languages.append('c++') - if requiref90: - source_languages.append('f90') - elif f_sources: - source_languages.append('f77') - build_info['source_languages'] = source_languages - - lib_file = compiler.library_filename(lib_name, - output_dir=self.build_clib) - depends = sources + build_info.get('depends', []) - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, lib_file, 'newer')): - log.debug("skipping '%s' library (up-to-date)", lib_name) - return - else: - log.info("building '%s' library", lib_name) - - config_fc = build_info.get('config_fc', {}) - if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script ' - 'for fortran compiler: %s' - % (config_fc,)) - from numpy.distutils.fcompiler import new_fcompiler - fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=requiref90, - c_compiler=self.compiler) - if fcompiler is not None: - dist = self.distribution - base_config_fc = dist.get_option_dict('config_fc').copy() - base_config_fc.update(config_fc) - fcompiler.customize(base_config_fc) - - # check availability of Fortran compilers - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources" - " but no Fortran compiler found" % (lib_name)) - - if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get( - 'extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get( - 'extra_f90_compile_args') or [] - - macros = build_info.get('macros') - if macros is None: - macros = [] - include_dirs = build_info.get('include_dirs') - if include_dirs is None: - include_dirs = [] - # Flags can be strings, or callables that return a list of strings. - extra_postargs = self.assemble_flags( - build_info.get('extra_compiler_args')) - extra_cflags = self.assemble_flags( - build_info.get('extra_cflags')) - extra_cxxflags = self.assemble_flags( - build_info.get('extra_cxxflags')) - - include_dirs.extend(get_numpy_include_dirs()) - # where compiled F90 module files are: - module_dirs = build_info.get('module_dirs') or [] - module_build_dir = os.path.dirname(lib_file) - if requiref90: - self.mkpath(module_build_dir) - - if compiler.compiler_type == 'msvc': - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - extra_cflags += extra_cxxflags - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - # copt_build_src = None if self.inplace else bsrc_dir - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cxxflags, - ccompiler=cxx_compiler - ) - - if copt_c_sources: - log.info("compiling C dispatch-able sources") - objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=self.build_temp, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs + extra_cflags) - - if c_sources: - log.info("compiling C sources") - objects += compiler.compile( - c_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cflags)) - - if cxx_sources: - log.info("compiling C++ sources") - cxx_compiler = compiler.cxx_compiler() - cxx_objects = cxx_compiler.compile( - cxx_sources, - output_dir=self.build_temp, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_postargs + - copt_baseline_flags + - extra_cxxflags)) - objects.extend(cxx_objects) - - if f_sources or fmodule_sources: - extra_postargs = [] - f_objects = [] - - if requiref90: - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - - if requiref90 and self._f_compiler.module_dir_switch is None: - # move new compiled F90 module files to module_build_dir - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' - % (f, module_build_dir)) - - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs) - else: - f_objects = [] - - if f_objects and not fcompiler.can_ccompiler_link(compiler): - # Default linker cannot link Fortran object files, and results - # need to be wrapped later. Instead of creating a real static - # library, just keep track of the object files. - listfn = os.path.join(self.build_clib, - lib_name + '.fobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) - - listfn = os.path.join(self.build_clib, - lib_name + '.cobjects') - with open(listfn, 'w') as f: - f.write("\n".join(os.path.abspath(obj) for obj in objects)) - - # create empty "library" file for dependency tracking - lib_fname = os.path.join(self.build_clib, - lib_name + compiler.static_lib_extension) - with open(lib_fname, 'wb') as f: - pass - else: - # assume that default linker is suitable for - # linking Fortran object files - objects.extend(f_objects) - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) - - # fix library dependencies - clib_libraries = build_info.get('libraries', []) - for lname, binfo in libraries: - if lname in clib_libraries: - clib_libraries.extend(binfo.get('libraries', [])) - if clib_libraries: - build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py deleted file mode 100644 index 42137e5f859d..000000000000 --- a/numpy/distutils/command/build_ext.py +++ /dev/null @@ -1,752 +0,0 @@ -""" Modified version of build_ext that handles fortran source files. - -""" -import os -import subprocess -from glob import glob - -from distutils.dep_util import newer_group -from distutils.command.build_ext import build_ext as old_build_ext -from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError -from distutils.file_util import copy_file - -from numpy.distutils import log -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.system_info import combine_paths -from numpy.distutils.misc_util import ( - filter_sources, get_ext_source_files, get_numpy_include_dirs, - has_cxx_sources, has_f_sources, is_sequence -) -from numpy.distutils.command.config_compiler import show_fortran_compilers -from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt - -class build_ext (old_build_ext): - - description = "build C/C++/F extensions (compile/link to build directory)" - - user_options = old_build_ext.user_options + [ - ('fcompiler=', None, - "specify the Fortran compiler type"), - ('parallel=', 'j', - "number of parallel jobs"), - ('warn-error', None, - "turn all warnings into errors (-Werror)"), - ('cpu-baseline=', None, - "specify a list of enabled baseline CPU optimizations"), - ('cpu-dispatch=', None, - "specify a list of dispatched CPU optimizations"), - ('disable-optimization', None, - "disable CPU optimized code(dispatch,simd,fast...)"), - ('simd-test=', None, - "specify a list of CPU optimizations to be tested against NumPy SIMD interface"), - ] - - help_options = old_build_ext.help_options + [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization'] - - def initialize_options(self): - old_build_ext.initialize_options(self) - self.fcompiler = None - self.parallel = None - self.warn_error = None - self.cpu_baseline = None - self.cpu_dispatch = None - self.disable_optimization = None - self.simd_test = None - - def finalize_options(self): - if self.parallel: - try: - self.parallel = int(self.parallel) - except ValueError as e: - raise ValueError("--parallel/-j argument must be an integer") from e - - # Ensure that self.include_dirs and self.distribution.include_dirs - # refer to the same list object. finalize_options will modify - # self.include_dirs, but self.distribution.include_dirs is used - # during the actual build. - # self.include_dirs is None unless paths are specified with - # --include-dirs. - # The include paths will be passed to the compiler in the order: - # numpy paths, --include-dirs paths, Python include path. - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - incl_dirs = self.include_dirs or [] - if self.distribution.include_dirs is None: - self.distribution.include_dirs = [] - self.include_dirs = self.distribution.include_dirs - self.include_dirs.extend(incl_dirs) - - old_build_ext.finalize_options(self) - self.set_undefined_options('build', - ('parallel', 'parallel'), - ('warn_error', 'warn_error'), - ('cpu_baseline', 'cpu_baseline'), - ('cpu_dispatch', 'cpu_dispatch'), - ('disable_optimization', 'disable_optimization'), - ('simd_test', 'simd_test') - ) - CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test - - def run(self): - if not self.extensions: - return - - # Make sure that extension sources are complete. - self.run_command('build_src') - - if self.distribution.has_c_libraries(): - if self.inplace: - if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj( - 'build_clib') - else: - build_clib = self.distribution.get_command_obj( - 'build_clib') - build_clib.inplace = 1 - build_clib.ensure_finalized() - build_clib.run() - self.distribution.have_run['build_clib'] = 1 - - else: - self.run_command('build_clib') - build_clib = self.get_finalized_command('build_clib') - self.library_dirs.append(build_clib.build_clib) - else: - build_clib = None - - # Not including C libraries to the list of - # extension libraries automatically to prevent - # bogus linking commands. Extensions must - # explicitly specify the C libraries that they use. - - from distutils.ccompiler import new_compiler - from numpy.distutils.fcompiler import new_fcompiler - - compiler_type = self.compiler - # Initialize C compiler: - self.compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - self.compiler.customize(self.distribution) - self.compiler.customize_cmd(self) - - if self.warn_error: - self.compiler.compiler.append('-Werror') - self.compiler.compiler_so.append('-Werror') - - self.compiler.show_customization() - - if not self.disable_optimization: - dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h") - dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath) - opt_cache_path = os.path.abspath( - os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py') - ) - if hasattr(self, "compiler_opt"): - # By default `CCompilerOpt` update the cache at the exit of - # the process, which may lead to duplicate building - # (see build_extension()/force_rebuild) if run() called - # multiple times within the same os process/thread without - # giving the chance the previous instances of `CCompilerOpt` - # to update the cache. - self.compiler_opt.cache_flush() - - self.compiler_opt = new_ccompiler_opt( - compiler=self.compiler, dispatch_hpath=dispatch_hpath, - cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch, - cache_path=opt_cache_path - ) - def report(copt): - log.info("\n########### EXT COMPILER OPTIMIZATION ###########") - log.info(copt.report(full=True)) - - import atexit - atexit.register(report, self.compiler_opt) - - # Setup directory for storing generated extra DLL files on Windows - self.extra_dll_dir = os.path.join(self.build_temp, '.libs') - if not os.path.isdir(self.extra_dll_dir): - os.makedirs(self.extra_dll_dir) - - # Create mapping of libraries built by build_clib: - clibs = {} - if build_clib is not None: - for libname, build_info in build_clib.libraries or []: - if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,' - ' overwriting build_info\n%s... \nwith\n%s...' - % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) - clibs[libname] = build_info - # .. and distribution libraries: - for libname, build_info in self.distribution.libraries or []: - if libname in clibs: - # build_clib libraries have a precedence before distribution ones - continue - clibs[libname] = build_info - - # Determine if C++/Fortran 77/Fortran 90 compilers are needed. - # Update extension libraries, library_dirs, and macros. - all_languages = set() - for ext in self.extensions: - ext_languages = set() - c_libs = [] - c_lib_dirs = [] - macros = [] - for libname in ext.libraries: - if libname in clibs: - binfo = clibs[libname] - c_libs += binfo.get('libraries', []) - c_lib_dirs += binfo.get('library_dirs', []) - for m in binfo.get('macros', []): - if m not in macros: - macros.append(m) - - for l in clibs.get(libname, {}).get('source_languages', []): - ext_languages.add(l) - if c_libs: - new_c_libs = ext.libraries + c_libs - log.info('updating extension %r libraries from %r to %r' - % (ext.name, ext.libraries, new_c_libs)) - ext.libraries = new_c_libs - ext.library_dirs = ext.library_dirs + c_lib_dirs - if macros: - log.info('extending extension %r defined_macros with %r' - % (ext.name, macros)) - ext.define_macros = ext.define_macros + macros - - # determine extension languages - if has_f_sources(ext.sources): - ext_languages.add('f77') - if has_cxx_sources(ext.sources): - ext_languages.add('c++') - l = ext.language or self.compiler.detect_language(ext.sources) - if l: - ext_languages.add(l) - - # reset language attribute for choosing proper linker - # - # When we build extensions with multiple languages, we have to - # choose a linker. The rules here are: - # 1. if there is Fortran code, always prefer the Fortran linker, - # 2. otherwise prefer C++ over C, - # 3. Users can force a particular linker by using - # `language='c'` # or 'c++', 'f90', 'f77' - # in their config.add_extension() calls. - if 'c++' in ext_languages: - ext_language = 'c++' - else: - ext_language = 'c' # default - - has_fortran = False - if 'f90' in ext_languages: - ext_language = 'f90' - has_fortran = True - elif 'f77' in ext_languages: - ext_language = 'f77' - has_fortran = True - - if not ext.language or has_fortran: - if l and l != ext_language and ext.language: - log.warn('resetting extension %r language from %r to %r.' % - (ext.name, l, ext_language)) - - ext.language = ext_language - - # global language - all_languages.update(ext_languages) - - need_f90_compiler = 'f90' in all_languages - need_f77_compiler = 'f77' in all_languages - need_cxx_compiler = 'c++' in all_languages - - # Initialize C++ compiler: - if need_cxx_compiler: - self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) - compiler = self._cxx_compiler - compiler.customize(self.distribution, need_cxx=need_cxx_compiler) - compiler.customize_cmd(self) - compiler.show_customization() - self._cxx_compiler = compiler.cxx_compiler() - else: - self._cxx_compiler = None - - # Initialize Fortran 77 compiler: - if need_f77_compiler: - ctype = self.fcompiler - self._f77_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=False, - c_compiler=self.compiler) - fcompiler = self._f77_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f77_compiler=%s is not available.' % - (ctype)) - self._f77_compiler = None - else: - self._f77_compiler = None - - # Initialize Fortran 90 compiler: - if need_f90_compiler: - ctype = self.fcompiler - self._f90_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90=True, - c_compiler=self.compiler) - fcompiler = self._f90_compiler - if fcompiler: - ctype = fcompiler.compiler_type - fcompiler.customize(self.distribution) - if fcompiler and fcompiler.get_version(): - fcompiler.customize_cmd(self) - fcompiler.show_customization() - else: - self.warn('f90_compiler=%s is not available.' % - (ctype)) - self._f90_compiler = None - else: - self._f90_compiler = None - - # Build extensions - self.build_extensions() - - # Copy over any extra DLL files - # FIXME: In the case where there are more than two packages, - # we blindly assume that both packages need all of the libraries, - # resulting in a larger wheel than is required. This should be fixed, - # but it's so rare that I won't bother to handle it. - pkg_roots = { - self.get_ext_fullname(ext.name).split('.')[0] - for ext in self.extensions - } - for pkg_root in pkg_roots: - shared_lib_dir = os.path.join(pkg_root, '.libs') - if not self.inplace: - shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir) - for fn in os.listdir(self.extra_dll_dir): - if not os.path.isdir(shared_lib_dir): - os.makedirs(shared_lib_dir) - if not fn.lower().endswith('.dll'): - continue - runtime_lib = os.path.join(self.extra_dll_dir, fn) - copy_file(runtime_lib, shared_lib_dir) - - def swig_sources(self, sources, extensions=None): - # Do nothing. Swig sources have been handled in build_src command. - return sources - - def build_extension(self, ext): - sources = ext.sources - if sources is None or not is_sequence(sources): - raise DistutilsSetupError( - ("in 'ext_modules' option (extension '%s'), " - "'sources' must be present and must be " - "a list of source filenames") % ext.name) - sources = list(sources) - - if not sources: - return - - fullname = self.get_ext_fullname(ext.name) - if self.inplace: - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - base = modpath[-1] - build_py = self.get_finalized_command('build_py') - package_dir = build_py.get_package_dir(package) - ext_filename = os.path.join(package_dir, - self.get_ext_filename(base)) - else: - ext_filename = os.path.join(self.build_lib, - self.get_ext_filename(fullname)) - depends = sources + ext.depends - - force_rebuild = self.force - if not self.disable_optimization and not self.compiler_opt.is_cached(): - log.debug("Detected changes on compiler optimizations") - force_rebuild = True - if not (force_rebuild or newer_group(depends, ext_filename, 'newer')): - log.debug("skipping '%s' extension (up-to-date)", ext.name) - return - else: - log.info("building '%s' extension", ext.name) - - extra_args = ext.extra_compile_args or [] - extra_cflags = getattr(ext, 'extra_c_compile_args', None) or [] - extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or [] - - macros = ext.define_macros[:] - for undef in ext.undef_macros: - macros.append((undef,)) - - c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) - - if self.compiler.compiler_type == 'msvc': - if cxx_sources: - # Needed to compile kiva.agg._agg extension. - extra_args.append('/Zm1000') - extra_cflags += extra_cxxflags - # this hack works around the msvc compiler attributes - # problem, msvc uses its own convention :( - c_sources += cxx_sources - cxx_sources = [] - - # Set Fortran/C++ compilers for compilation and linking. - if ext.language == 'f90': - fcompiler = self._f90_compiler - elif ext.language == 'f77': - fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance - fcompiler = self._f90_compiler or self._f77_compiler - if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( - ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( - ext, 'extra_f90_compile_args') else [] - cxx_compiler = self._cxx_compiler - - # check for the availability of required compilers - if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" - "but no C++ compiler found" % (ext.name)) - if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " - "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language == 'c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " - "but no C++ linker found, using default linker" % (ext.name)) - - kws = {'depends': ext.depends} - output_dir = self.build_temp - - include_dirs = ext.include_dirs + get_numpy_include_dirs() - - # filtering C dispatch-table sources when optimization is not disabled, - # otherwise treated as normal sources. - copt_c_sources = [] - copt_cxx_sources = [] - copt_baseline_flags = [] - copt_macros = [] - if not self.disable_optimization: - bsrc_dir = self.get_finalized_command("build_src").build_src - dispatch_hpath = os.path.join("numpy", "distutils", "include") - dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath) - include_dirs.append(dispatch_hpath) - - # copt_build_src = None if self.inplace else bsrc_dir - # Always generate the generated config files and - # dispatch-able sources inside the build directory, - # even if the build option `inplace` is enabled. - # This approach prevents conflicts with Meson-generated - # config headers. Since `spin build --clean` will not remove - # these headers, they might overwrite the generated Meson headers, - # causing compatibility issues. Maintaining separate directories - # ensures compatibility between distutils dispatch config headers - # and Meson headers, avoiding build disruptions. - # See gh-24450 for more details. - copt_build_src = bsrc_dir - for _srcs, _dst, _ext in ( - ((c_sources,), copt_c_sources, ('.dispatch.c',)), - ((c_sources, cxx_sources), copt_cxx_sources, - ('.dispatch.cpp', '.dispatch.cxx')) - ): - for _src in _srcs: - _dst += [ - _src.pop(_src.index(s)) - for s in _src[:] if s.endswith(_ext) - ] - copt_baseline_flags = self.compiler_opt.cpu_baseline_flags() - else: - copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1)) - - c_objects = [] - if copt_cxx_sources: - log.info("compiling C++ dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_cxx_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cxxflags, - ccompiler=cxx_compiler, - **kws - ) - if copt_c_sources: - log.info("compiling C dispatch-able sources") - c_objects += self.compiler_opt.try_dispatch( - copt_c_sources, - output_dir=output_dir, - src_dir=copt_build_src, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_args + extra_cflags, - **kws) - if c_sources: - log.info("compiling C sources") - c_objects += self.compiler.compile( - c_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cflags), - **kws) - if cxx_sources: - log.info("compiling C++ sources") - c_objects += cxx_compiler.compile( - cxx_sources, - output_dir=output_dir, - macros=macros + copt_macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=(extra_args + copt_baseline_flags + - extra_cxxflags), - **kws) - - extra_postargs = [] - f_objects = [] - if fmodule_sources: - log.info("compiling Fortran 90 module sources") - module_dirs = ext.module_dirs[:] - module_build_dir = os.path.join( - self.build_temp, os.path.dirname( - self.get_ext_filename(fullname))) - - self.mkpath(module_build_dir) - if fcompiler.module_dir_switch is None: - existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options( - module_dirs, module_build_dir) - f_objects += fcompiler.compile(fmodule_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if fcompiler.module_dir_switch is None: - for f in glob('*.mod'): - if f in existing_modules: - continue - t = os.path.join(module_build_dir, f) - if os.path.abspath(f) == os.path.abspath(t): - continue - if os.path.isfile(t): - os.remove(t) - try: - self.move_file(f, module_build_dir) - except DistutilsFileError: - log.warn('failed to move %r to %r' % - (f, module_build_dir)) - if f_sources: - log.info("compiling Fortran sources") - f_objects += fcompiler.compile(f_sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - extra_postargs=extra_postargs, - depends=ext.depends) - - if f_objects and not fcompiler.can_ccompiler_link(self.compiler): - unlinkable_fobjects = f_objects - objects = c_objects - else: - unlinkable_fobjects = [] - objects = c_objects + f_objects - - if ext.extra_objects: - objects.extend(ext.extra_objects) - extra_args = ext.extra_link_args or [] - libraries = self.get_libraries(ext)[:] - library_dirs = ext.library_dirs[:] - - linker = self.compiler.link_shared_object - # Always use system linker when using MSVC compiler. - if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): - # expand libraries with fcompiler libraries as we are - # not using fcompiler linker - self._libs_with_msvc_and_fortran( - fcompiler, libraries, library_dirs) - if ext.runtime_library_dirs: - # gcc adds RPATH to the link. On windows, copy the dll into - # self.extra_dll_dir instead. - for d in ext.runtime_library_dirs: - for f in glob(d + '/*.dll'): - copy_file(f, self.extra_dll_dir) - ext.runtime_library_dirs = [] - - elif ext.language in ['f77', 'f90'] and fcompiler is not None: - linker = fcompiler.link_shared_object - if ext.language == 'c++' and cxx_compiler is not None: - linker = cxx_compiler.link_shared_object - - if fcompiler is not None: - objects, libraries = self._process_unlinkable_fobjects( - objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects) - - linker(objects, ext_filename, - libraries=libraries, - library_dirs=library_dirs, - runtime_library_dirs=ext.runtime_library_dirs, - extra_postargs=extra_args, - export_symbols=self.get_export_symbols(ext), - debug=self.debug, - build_temp=self.build_temp, - target_lang=ext.language) - - def _add_dummy_mingwex_sym(self, c_sources): - build_src = self.get_finalized_command("build_src").build_src - build_clib = self.get_finalized_command("build_clib").build_clib - objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib( - objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) - - def _process_unlinkable_fobjects(self, objects, libraries, - fcompiler, library_dirs, - unlinkable_fobjects): - libraries = list(libraries) - objects = list(objects) - unlinkable_fobjects = list(unlinkable_fobjects) - - # Expand possible fake static libraries to objects; - # make sure to iterate over a copy of the list as - # "fake" libraries will be removed as they are - # encountered - for lib in libraries[:]: - for libdir in library_dirs: - fake_lib = os.path.join(libdir, lib + '.fobjects') - if os.path.isfile(fake_lib): - # Replace fake static library - libraries.remove(lib) - with open(fake_lib) as f: - unlinkable_fobjects.extend(f.read().splitlines()) - - # Expand C objects - c_lib = os.path.join(libdir, lib + '.cobjects') - with open(c_lib) as f: - objects.extend(f.read().splitlines()) - - # Wrap unlinkable objects to a linkable one - if unlinkable_fobjects: - fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects] - wrapped = fcompiler.wrap_unlinkable_objects( - fobjects, output_dir=self.build_temp, - extra_dll_dir=self.extra_dll_dir) - objects.extend(wrapped) - - return objects, libraries - - def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, - c_library_dirs): - if fcompiler is None: - return - - for libname in c_libraries: - if libname.startswith('msvc'): - continue - fileexists = False - for libdir in c_library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: - continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in c_library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(self.build_temp, libname + '.lib') - copy_file(libfile, libfile2) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - fileexists = True - break - if fileexists: - continue - log.warn('could not find library %r in directories %s' - % (libname, c_library_dirs)) - - # Always use system linker when using MSVC compiler. - f_lib_dirs = [] - for dir in fcompiler.library_dirs: - # correct path when compiling in Cygwin but with normal Win - # Python - if dir.startswith('/usr/lib'): - try: - dir = subprocess.check_output(['cygpath', '-w', dir]) - except (OSError, subprocess.CalledProcessError): - pass - else: - dir = filepath_from_subprocess_output(dir) - f_lib_dirs.append(dir) - c_library_dirs.extend(f_lib_dirs) - - # make g77-compiled static libs available to MSVC - for lib in fcompiler.libraries: - if not lib.startswith('msvc'): - c_libraries.append(lib) - p = combine_paths(f_lib_dirs, 'lib' + lib + '.a') - if p: - dst_name = os.path.join(self.build_temp, lib + '.lib') - if not os.path.isfile(dst_name): - copy_file(p[0], dst_name) - if self.build_temp not in c_library_dirs: - c_library_dirs.append(self.build_temp) - - def get_source_files(self): - self.check_extensions_list(self.extensions) - filenames = [] - for ext in self.extensions: - filenames.extend(get_ext_source_files(ext)) - return filenames - - def get_outputs(self): - self.check_extensions_list(self.extensions) - - outputs = [] - for ext in self.extensions: - if not ext.sources: - continue - fullname = self.get_ext_fullname(ext.name) - outputs.append(os.path.join(self.build_lib, - self.get_ext_filename(fullname))) - return outputs diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py deleted file mode 100644 index d30dc5bf42d8..000000000000 --- a/numpy/distutils/command/build_py.py +++ /dev/null @@ -1,31 +0,0 @@ -from distutils.command.build_py import build_py as old_build_py -from numpy.distutils.misc_util import is_string - -class build_py(old_build_py): - - def run(self): - build_src = self.get_finalized_command('build_src') - if build_src.py_modules_dict and self.packages is None: - self.packages = list(build_src.py_modules_dict.keys ()) - old_build_py.run(self) - - def find_package_modules(self, package, package_dir): - modules = old_build_py.find_package_modules(self, package, package_dir) - - # Find build_src generated *.py files. - build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package, []) - - return modules - - def find_modules(self): - old_py_modules = self.py_modules[:] - new_py_modules = [_m for _m in self.py_modules if is_string(_m)] - self.py_modules[:] = new_py_modules - modules = old_build_py.find_modules(self) - self.py_modules[:] = old_py_modules - - return modules - - # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple - # and item[2] is source file. diff --git a/numpy/distutils/command/build_scripts.py b/numpy/distutils/command/build_scripts.py deleted file mode 100644 index d5cadb2745fe..000000000000 --- a/numpy/distutils/command/build_scripts.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Modified version of build_scripts that handles building scripts from functions. - -""" -from distutils.command.build_scripts import build_scripts as old_build_scripts -from numpy.distutils import log -from numpy.distutils.misc_util import is_string - -class build_scripts(old_build_scripts): - - def generate_scripts(self, scripts): - new_scripts = [] - func_scripts = [] - for script in scripts: - if is_string(script): - new_scripts.append(script) - else: - func_scripts.append(script) - if not func_scripts: - return new_scripts - - build_dir = self.build_dir - self.mkpath(build_dir) - for func in func_scripts: - script = func(build_dir) - if not script: - continue - if is_string(script): - log.info(" adding '%s' to scripts" % (script,)) - new_scripts.append(script) - else: - [log.info(" adding '%s' to scripts" % (s,)) for s in script] - new_scripts.extend(list(script)) - return new_scripts - - def run (self): - if not self.scripts: - return - - self.scripts = self.generate_scripts(self.scripts) - # Now make sure that the distribution object has this list of scripts. - # setuptools' develop command requires that this be a list of filenames, - # not functions. - self.distribution.scripts = self.scripts - - return old_build_scripts.run(self) - - def get_source_files(self): - from numpy.distutils.misc_util import get_script_files - return get_script_files(self.scripts) diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py deleted file mode 100644 index cfcc80caecd6..000000000000 --- a/numpy/distutils/command/build_src.py +++ /dev/null @@ -1,773 +0,0 @@ -""" Build swig and f2py sources. -""" -import os -import re -import sys -import shlex -import copy - -from distutils.command import build_ext -from distutils.dep_util import newer_group, newer -from distutils.util import get_platform -from distutils.errors import DistutilsError, DistutilsSetupError - - -# this import can't be done here, as it uses numpy stuff only available -# after it's installed -#import numpy.f2py -from numpy.distutils import log -from numpy.distutils.misc_util import ( - fortran_ext_match, appendpath, is_string, is_sequence, get_cmd - ) -from numpy.distutils.from_template import process_file as process_f_file -from numpy.distutils.conv_template import process_file as process_c_file - -def subst_vars(target, source, d): - """Substitute any occurrence of @foo@ by d['foo'] from source file into - target.""" - var = re.compile('@([a-zA-Z_]+)@') - with open(source, 'r') as fs: - with open(target, 'w') as ft: - for l in fs: - m = var.search(l) - if m: - ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) - else: - ft.write(l) - -class build_src(build_ext.build_ext): - - description = "build sources from SWIG, F2PY files or a function" - - user_options = [ - ('build-src=', 'd', "directory to \"build\" sources to"), - ('f2py-opts=', None, "list of f2py command line options"), - ('swig=', None, "path to the SWIG executable"), - ('swig-opts=', None, "list of SWIG command line options"), - ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"), - ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete - ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('inplace', 'i', - "ignore build-lib and put compiled extensions into the source " - "directory alongside your pure Python modules"), - ('verbose-cfg', None, - "change logging level from WARN to INFO which will show all " - "compiler output") - ] - - boolean_options = ['force', 'inplace', 'verbose-cfg'] - - help_options = [] - - def initialize_options(self): - self.extensions = None - self.package = None - self.py_modules = None - self.py_modules_dict = None - self.build_src = None - self.build_lib = None - self.build_base = None - self.force = None - self.inplace = None - self.package_dir = None - self.f2pyflags = None # obsolete - self.f2py_opts = None - self.swigflags = None # obsolete - self.swig_opts = None - self.swig_cpp = None - self.swig = None - self.verbose_cfg = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_base', 'build_base'), - ('build_lib', 'build_lib'), - ('force', 'force')) - if self.package is None: - self.package = self.distribution.ext_package - self.extensions = self.distribution.ext_modules - self.libraries = self.distribution.libraries or [] - self.py_modules = self.distribution.py_modules or [] - self.data_files = self.distribution.data_files or [] - - if self.build_src is None: - plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2]) - self.build_src = os.path.join(self.build_base, 'src'+plat_specifier) - - # py_modules_dict is used in build_py.find_package_modules - self.py_modules_dict = {} - - if self.f2pyflags: - if self.f2py_opts: - log.warn('ignoring --f2pyflags as --f2py-opts already used') - else: - self.f2py_opts = self.f2pyflags - self.f2pyflags = None - if self.f2py_opts is None: - self.f2py_opts = [] - else: - self.f2py_opts = shlex.split(self.f2py_opts) - - if self.swigflags: - if self.swig_opts: - log.warn('ignoring --swigflags as --swig-opts already used') - else: - self.swig_opts = self.swigflags - self.swigflags = None - - if self.swig_opts is None: - self.swig_opts = [] - else: - self.swig_opts = shlex.split(self.swig_opts) - - # use options from build_ext command - build_ext = self.get_finalized_command('build_ext') - if self.inplace is None: - self.inplace = build_ext.inplace - if self.swig_cpp is None: - self.swig_cpp = build_ext.swig_cpp - for c in ['swig', 'swig_opt']: - o = '--'+c.replace('_', '-') - v = getattr(build_ext, c, None) - if v: - if getattr(self, c): - log.warn('both build_src and build_ext define %s option' % (o)) - else: - log.info('using "%s=%s" option from build_ext command' % (o, v)) - setattr(self, c, v) - - def run(self): - log.info("build_src") - if not (self.extensions or self.libraries): - return - self.build_sources() - - def build_sources(self): - - if self.inplace: - self.get_package_dir = \ - self.get_finalized_command('build_py').get_package_dir - - self.build_py_modules_sources() - - for libname_info in self.libraries: - self.build_library_sources(*libname_info) - - if self.extensions: - self.check_extensions_list(self.extensions) - - for ext in self.extensions: - self.build_extension_sources(ext) - - self.build_data_files_sources() - self.build_npy_pkg_config() - - def build_data_files_sources(self): - if not self.data_files: - return - log.info('building data_files sources') - from numpy.distutils.misc_util import get_data_files - new_data_files = [] - for data in self.data_files: - if isinstance(data, str): - new_data_files.append(data) - elif isinstance(data, tuple): - d, files = data - if self.inplace: - build_dir = self.get_package_dir('.'.join(d.split(os.sep))) - else: - build_dir = os.path.join(self.build_src, d) - funcs = [f for f in files if hasattr(f, '__call__')] - files = [f for f in files if not hasattr(f, '__call__')] - for f in funcs: - if f.__code__.co_argcount==1: - s = f(build_dir) - else: - s = f() - if s is not None: - if isinstance(s, list): - files.extend(s) - elif isinstance(s, str): - files.append(s) - else: - raise TypeError(repr(s)) - filenames = get_data_files((d, files)) - new_data_files.append((d, filenames)) - else: - raise TypeError(repr(data)) - self.data_files[:] = new_data_files - - - def _build_npy_pkg_config(self, info, gd): - template, install_dir, subst_dict = info - template_dir = os.path.dirname(template) - for k, v in gd.items(): - subst_dict[k] = v - - if self.inplace == 1: - generated_dir = os.path.join(template_dir, install_dir) - else: - generated_dir = os.path.join(self.build_src, template_dir, - install_dir) - generated = os.path.basename(os.path.splitext(template)[0]) - generated_path = os.path.join(generated_dir, generated) - if not os.path.exists(generated_dir): - os.makedirs(generated_dir) - - subst_vars(generated_path, template, subst_dict) - - # Where to install relatively to install prefix - full_install_dir = os.path.join(template_dir, install_dir) - return full_install_dir, generated_path - - def build_npy_pkg_config(self): - log.info('build_src: building npy-pkg config files') - - # XXX: another ugly workaround to circumvent distutils brain damage. We - # need the install prefix here, but finalizing the options of the - # install command when only building sources cause error. Instead, we - # copy the install command instance, and finalize the copy so that it - # does not disrupt how distutils want to do things when with the - # original install command instance. - install_cmd = copy.copy(get_cmd('install')) - if not install_cmd.finalized == 1: - install_cmd.finalize_options() - build_npkg = False - if self.inplace == 1: - top_prefix = '.' - build_npkg = True - elif hasattr(install_cmd, 'install_libbase'): - top_prefix = install_cmd.install_libbase - build_npkg = True - - if build_npkg: - for pkg, infos in self.distribution.installed_pkg_config.items(): - pkg_path = self.distribution.package_dir[pkg] - prefix = os.path.join(os.path.abspath(top_prefix), pkg_path) - d = {'prefix': prefix} - for info in infos: - install_dir, generated = self._build_npy_pkg_config(info, d) - self.distribution.data_files.append((install_dir, - [generated])) - - def build_py_modules_sources(self): - if not self.py_modules: - return - log.info('building py_modules sources') - new_py_modules = [] - for source in self.py_modules: - if is_sequence(source) and len(source)==3: - package, module_base, source = source - if self.inplace: - build_dir = self.get_package_dir(package) - else: - build_dir = os.path.join(self.build_src, - os.path.join(*package.split('.'))) - if hasattr(source, '__call__'): - target = os.path.join(build_dir, module_base + '.py') - source = source(target) - if source is None: - continue - modules = [(package, module_base, source)] - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - self.py_modules_dict[package] += modules - else: - new_py_modules.append(source) - self.py_modules[:] = new_py_modules - - def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources', [])) - - if not sources: - return - - log.info('building library "%s" sources' % (lib_name)) - - sources = self.generate_sources(sources, (lib_name, build_info)) - - sources = self.template_sources(sources, (lib_name, build_info)) - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - self.package, h_files) - - #for f in h_files: - # self.distribution.headers.append((lib_name,f)) - - build_info['sources'] = sources - return - - def build_extension_sources(self, ext): - - sources = list(ext.sources) - - log.info('building extension "%s" sources' % (ext.name)) - - fullname = self.get_ext_fullname(ext.name) - - modpath = fullname.split('.') - package = '.'.join(modpath[0:-1]) - - if self.inplace: - self.ext_target_dir = self.get_package_dir(package) - - sources = self.generate_sources(sources, ext) - sources = self.template_sources(sources, ext) - sources = self.swig_sources(sources, ext) - sources = self.f2py_sources(sources, ext) - sources = self.pyrex_sources(sources, ext) - - sources, py_files = self.filter_py_files(sources) - - if package not in self.py_modules_dict: - self.py_modules_dict[package] = [] - modules = [] - for f in py_files: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - self.py_modules_dict[package] += modules - - sources, h_files = self.filter_h_files(sources) - - if h_files: - log.info('%s - nothing done with h_files = %s', - package, h_files) - #for f in h_files: - # self.distribution.headers.append((package,f)) - - ext.sources = sources - - def generate_sources(self, sources, extension): - new_sources = [] - func_sources = [] - for source in sources: - if is_string(source): - new_sources.append(source) - else: - func_sources.append(source) - if not func_sources: - return new_sources - if self.inplace and not is_sequence(extension): - build_dir = self.ext_target_dir - else: - if is_sequence(extension): - name = extension[0] - # if 'include_dirs' not in extension[1]: - # extension[1]['include_dirs'] = [] - # incl_dirs = extension[1]['include_dirs'] - else: - name = extension.name - # incl_dirs = extension.include_dirs - #if self.build_src not in incl_dirs: - # incl_dirs.append(self.build_src) - build_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - self.mkpath(build_dir) - - if self.verbose_cfg: - new_level = log.INFO - else: - new_level = log.WARN - old_level = log.set_threshold(new_level) - - for func in func_sources: - source = func(extension, build_dir) - if not source: - continue - if is_sequence(source): - [log.info(" adding '%s' to sources." % (s,)) for s in source] - new_sources.extend(source) - else: - log.info(" adding '%s' to sources." % (source,)) - new_sources.append(source) - log.set_threshold(old_level) - return new_sources - - def filter_py_files(self, sources): - return self.filter_files(sources, ['.py']) - - def filter_h_files(self, sources): - return self.filter_files(sources, ['.h', '.hpp', '.inc']) - - def filter_files(self, sources, exts = []): - new_sources = [] - files = [] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext in exts: - files.append(source) - else: - new_sources.append(source) - return new_sources, files - - def template_sources(self, sources, extension): - new_sources = [] - if is_sequence(extension): - depends = extension[1].get('depends') - include_dirs = extension[1].get('include_dirs') - else: - depends = extension.depends - include_dirs = extension.include_dirs - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.src': # Template file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - self.mkpath(target_dir) - target_file = os.path.join(target_dir, os.path.basename(base)) - if (self.force or newer_group([source] + depends, target_file)): - if _f_pyf_ext_match(base): - log.info("from_template:> %s" % (target_file)) - outstr = process_f_file(source) - else: - log.info("conv_template:> %s" % (target_file)) - outstr = process_c_file(source) - with open(target_file, 'w') as fid: - fid.write(outstr) - if _header_ext_match(target_file): - d = os.path.dirname(target_file) - if d not in include_dirs: - log.info(" adding '%s' to include_dirs." % (d)) - include_dirs.append(d) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def pyrex_sources(self, sources, extension): - """Pyrex not supported; this remains for Cython support (see below)""" - new_sources = [] - ext_name = extension.name.split('.')[-1] - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyx': - target_file = self.generate_a_pyrex_source(base, ext_name, - source, - extension) - new_sources.append(target_file) - else: - new_sources.append(source) - return new_sources - - def generate_a_pyrex_source(self, base, ext_name, source, extension): - """Pyrex is not supported, but some projects monkeypatch this method. - - That allows compiling Cython code, see gh-6955. - This method will remain here for compatibility reasons. - """ - return [] - - def f2py_sources(self, sources, extension): - new_sources = [] - f2py_sources = [] - f_sources = [] - f2py_targets = {} - target_dirs = [] - ext_name = extension.name.split('.')[-1] - skip_f2py = 0 - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.pyf': # F2PY interface file - if self.inplace: - target_dir = os.path.dirname(base) - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - if os.path.isfile(source): - name = get_f2py_modulename(source) - if name != ext_name: - raise DistutilsSetupError('mismatch of extension names: %s ' - 'provides %r but expected %r' % ( - source, name, ext_name)) - target_file = os.path.join(target_dir, name+'module.c') - else: - log.debug(' source %s does not exist: skipping f2py\'ing.' \ - % (source)) - name = ext_name - skip_f2py = 1 - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %smodule.c was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir, name+'module.c') - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.info(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - f2py_sources.append(source) - f2py_targets[source] = target_file - new_sources.append(target_file) - elif fortran_ext_match(ext): - f_sources.append(source) - else: - new_sources.append(source) - - if not (f2py_sources or f_sources): - return new_sources - - for d in target_dirs: - self.mkpath(d) - - f2py_options = extension.f2py_options + self.f2py_opts - - if self.distribution.libraries: - for name, build_info in self.distribution.libraries: - if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options', [])) - - log.info("f2py options: %s" % (f2py_options)) - - if f2py_sources: - if len(f2py_sources) != 1: - raise DistutilsSetupError( - 'only one .pyf file is allowed per extension module but got'\ - ' more: %r' % (f2py_sources,)) - source = f2py_sources[0] - target_file = f2py_targets[source] - target_dir = os.path.dirname(target_file) or '.' - depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py: %s" % (source)) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options - + ['--build-dir', target_dir, source]) - else: - log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) - else: - #XXX TODO: --inplace support for sdist command - if is_sequence(extension): - name = extension[0] - else: name = extension.name - target_dir = os.path.join(*([self.build_src] - +name.split('.')[:-1])) - target_file = os.path.join(target_dir, ext_name + 'module.c') - new_sources.append(target_file) - depends = f_sources + extension.depends - if (self.force or newer_group(depends, target_file, 'newer')) \ - and not skip_f2py: - log.info("f2py:> %s" % (target_file)) - self.mkpath(target_dir) - from numpy.f2py import f2py2e - f2py2e.run_main(f2py_options + ['--lower', - '--build-dir', target_dir]+\ - ['-m', ext_name]+f_sources) - else: - log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ - % (target_file)) - - if not os.path.isfile(target_file): - raise DistutilsError("f2py target file %r not generated" % (target_file,)) - - build_dir = os.path.join(self.build_src, target_dir) - target_c = os.path.join(build_dir, 'fortranobject.c') - target_h = os.path.join(build_dir, 'fortranobject.h') - log.info(" adding '%s' to sources." % (target_c)) - new_sources.append(target_c) - if build_dir not in extension.include_dirs: - log.info(" adding '%s' to include_dirs." % (build_dir)) - extension.include_dirs.append(build_dir) - - if not skip_f2py: - import numpy.f2py - d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d, 'src', 'fortranobject.c') - source_h = os.path.join(d, 'src', 'fortranobject.h') - if newer(source_c, target_c) or newer(source_h, target_h): - self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c, target_c) - self.copy_file(source_h, target_h) - else: - if not os.path.isfile(target_c): - raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) - if not os.path.isfile(target_h): - raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - - for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: - filename = os.path.join(target_dir, ext_name + name_ext) - if os.path.isfile(filename): - log.info(" adding '%s' to sources." % (filename)) - f_sources.append(filename) - - return new_sources + f_sources - - def swig_sources(self, sources, extension): - # Assuming SWIG 1.3.14 or later. See compatibility note in - # http://www.swig.org/Doc1.3/Python.html#Python_nn6 - - new_sources = [] - swig_sources = [] - swig_targets = {} - target_dirs = [] - py_files = [] # swig generated .py files - target_ext = '.c' - if '-c++' in extension.swig_opts: - typ = 'c++' - is_cpp = True - extension.swig_opts.remove('-c++') - elif self.swig_cpp: - typ = 'c++' - is_cpp = True - else: - typ = None - is_cpp = False - skip_swig = 0 - ext_name = extension.name.split('.')[-1] - - for source in sources: - (base, ext) = os.path.splitext(source) - if ext == '.i': # SWIG interface file - # the code below assumes that the sources list - # contains not more than one .i SWIG interface file - if self.inplace: - target_dir = os.path.dirname(base) - py_target_dir = self.ext_target_dir - else: - target_dir = appendpath(self.build_src, os.path.dirname(base)) - py_target_dir = target_dir - if os.path.isfile(source): - name = get_swig_modulename(source) - if name != ext_name[1:]: - raise DistutilsSetupError( - 'mismatch of extension names: %s provides %r' - ' but expected %r' % (source, name, ext_name[1:])) - if typ is None: - typ = get_swig_target(source) - is_cpp = typ=='c++' - else: - typ2 = get_swig_target(source) - if typ2 is None: - log.warn('source %r does not define swig target, assuming %s swig target' \ - % (source, typ)) - elif typ!=typ2: - log.warn('expected %r but source %r defines %r swig target' \ - % (typ, source, typ2)) - if typ2=='c++': - log.warn('resetting swig target to c++ (some targets may have .c extension)') - is_cpp = True - else: - log.warn('assuming that %r has c++ swig target' % (source)) - if is_cpp: - target_ext = '.cpp' - target_file = os.path.join(target_dir, '%s_wrap%s' \ - % (name, target_ext)) - else: - log.warn(' source %s does not exist: skipping swig\'ing.' \ - % (source)) - name = ext_name[1:] - skip_swig = 1 - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - log.warn(' target %s does not exist:\n '\ - 'Assuming %s_wrap.{c,cpp} was generated with '\ - '"build_src --inplace" command.' \ - % (target_file, name)) - target_dir = os.path.dirname(base) - target_file = _find_swig_target(target_dir, name) - if not os.path.isfile(target_file): - raise DistutilsSetupError("%r missing" % (target_file,)) - log.warn(' Yes! Using %r as up-to-date target.' \ - % (target_file)) - target_dirs.append(target_dir) - new_sources.append(target_file) - py_files.append(os.path.join(py_target_dir, name+'.py')) - swig_sources.append(source) - swig_targets[source] = new_sources[-1] - else: - new_sources.append(source) - - if not swig_sources: - return new_sources - - if skip_swig: - return new_sources + py_files - - for d in target_dirs: - self.mkpath(d) - - swig = self.swig or self.find_swig() - swig_cmd = [swig, "-python"] + extension.swig_opts - if is_cpp: - swig_cmd.append('-c++') - for d in extension.include_dirs: - swig_cmd.append('-I'+d) - for source in swig_sources: - target = swig_targets[source] - depends = [source] + extension.depends - if self.force or newer_group(depends, target, 'newer'): - log.info("%s: %s" % (os.path.basename(swig) \ - + (is_cpp and '++' or ''), source)) - self.spawn(swig_cmd + self.swig_opts \ - + ["-o", target, '-outdir', py_target_dir, source]) - else: - log.debug(" skipping '%s' swig interface (up-to-date)" \ - % (source)) - - return new_sources + py_files - -_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match -_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match - -#### SWIG related auxiliary functions #### -_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)', - re.I).match -_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search -_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search - -def get_swig_target(source): - with open(source) as f: - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - return result - -def get_swig_modulename(source): - with open(source) as f: - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - return name - -def _find_swig_target(target_dir, name): - for ext in ['.cpp', '.c']: - target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) - if os.path.isfile(target): - break - return target - -#### F2PY related auxiliary functions #### - -_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', - re.I).match -_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' - r'__user__[\w_]*)', re.I).match - -def get_f2py_modulename(source): - name = None - with open(source) as f: - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - return name - -########################################## diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py deleted file mode 100644 index 8bdfb7ec5823..000000000000 --- a/numpy/distutils/command/config.py +++ /dev/null @@ -1,516 +0,0 @@ -# Added Fortran compiler support to config. Currently useful only for -# try_compile call. try_run works but is untested for most of Fortran -# compilers (they must define linker_exe first). -# Pearu Peterson -import os -import signal -import subprocess -import sys -import textwrap -import warnings - -from distutils.command.config import config as old_config -from distutils.command.config import LANG_EXT -from distutils import log -from distutils.file_util import copy_file -from distutils.ccompiler import CompileError, LinkError -import distutils -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.mingw32ccompiler import generate_manifest -from numpy.distutils.command.autodist import (check_gcc_function_attribute, - check_gcc_function_attribute_with_intrinsics, - check_gcc_variable_attribute, - check_gcc_version_at_least, - check_inline, - check_restrict, - check_compiler_gcc) - -LANG_EXT['f77'] = '.f' -LANG_EXT['f90'] = '.f90' - -class config(old_config): - old_config.user_options += [ - ('fcompiler=', None, "specify the Fortran compiler type"), - ] - - def initialize_options(self): - self.fcompiler = None - old_config.initialize_options(self) - - def _check_compiler (self): - old_config._check_compiler(self) - from numpy.distutils.fcompiler import FCompiler, new_fcompiler - - if sys.platform == 'win32' and (self.compiler.compiler_type in - ('msvc', 'intelw', 'intelemw')): - # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: - # initialize call query_vcvarsall, which throws an OSError, and - # causes an error along the way without much information. We try to - # catch it here, hoping it is early enough, and print a helpful - # message instead of Error: None. - if not self.compiler.initialized: - try: - self.compiler.initialize() - except OSError as e: - msg = textwrap.dedent("""\ - Could not initialize compiler instance: do you have Visual Studio - installed? If you are trying to build with MinGW, please use "python setup.py - build -c mingw32" instead. If you have Visual Studio installed, check it is - correctly installed, and the right version (VS 2015 as of this writing). - - Original exception was: %s, and the Compiler class was %s - ============================================================================""") \ - % (e, self.compiler.__class__.__name__) - print(textwrap.dedent("""\ - ============================================================================""")) - raise distutils.errors.DistutilsPlatformError(msg) from e - - # After MSVC is initialized, add an explicit /MANIFEST to linker - # flags. See issues gh-4245 and gh-4101 for details. Also - # relevant are issues 4431 and 16296 on the Python bug tracker. - from distutils import msvc9compiler - if msvc9compiler.get_build_version() >= 10: - for ldflags in [self.compiler.ldflags_shared, - self.compiler.ldflags_shared_debug]: - if '/MANIFEST' not in ldflags: - ldflags.append('/MANIFEST') - - if not isinstance(self.fcompiler, FCompiler): - self.fcompiler = new_fcompiler(compiler=self.fcompiler, - dry_run=self.dry_run, force=1, - c_compiler=self.compiler) - if self.fcompiler is not None: - self.fcompiler.customize(self.distribution) - if self.fcompiler.get_version(): - self.fcompiler.customize_cmd(self) - self.fcompiler.show_customization() - - def _wrap_method(self, mth, lang, args): - from distutils.ccompiler import CompileError - from distutils.errors import DistutilsExecError - save_compiler = self.compiler - if lang in ['f77', 'f90']: - self.compiler = self.fcompiler - if self.compiler is None: - raise CompileError('%s compiler is not set' % (lang,)) - try: - ret = mth(*((self,)+args)) - except (DistutilsExecError, CompileError) as e: - self.compiler = save_compiler - raise CompileError from e - self.compiler = save_compiler - return ret - - def _compile (self, body, headers, include_dirs, lang): - src, obj = self._wrap_method(old_config._compile, lang, - (body, headers, include_dirs, lang)) - # _compile in unixcompiler.py sometimes creates .d dependency files. - # Clean them up. - self.temp_files.append(obj + '.d') - return src, obj - - def _link (self, body, - headers, include_dirs, - libraries, library_dirs, lang): - if self.compiler.compiler_type=='msvc': - libraries = (libraries or [])[:] - library_dirs = (library_dirs or [])[:] - if lang in ['f77', 'f90']: - lang = 'c' # always use system linker when using MSVC compiler - if self.fcompiler: - for d in self.fcompiler.library_dirs or []: - # correct path when compiling in Cygwin but with - # normal Win Python - if d.startswith('/usr/lib'): - try: - d = subprocess.check_output(['cygpath', - '-w', d]) - except (OSError, subprocess.CalledProcessError): - pass - else: - d = filepath_from_subprocess_output(d) - library_dirs.append(d) - for libname in self.fcompiler.libraries or []: - if libname not in libraries: - libraries.append(libname) - for libname in libraries: - if libname.startswith('msvc'): continue - fileexists = False - for libdir in library_dirs or []: - libfile = os.path.join(libdir, '%s.lib' % (libname)) - if os.path.isfile(libfile): - fileexists = True - break - if fileexists: continue - # make g77-compiled static libs available to MSVC - fileexists = False - for libdir in library_dirs: - libfile = os.path.join(libdir, 'lib%s.a' % (libname)) - if os.path.isfile(libfile): - # copy libname.a file to name.lib so that MSVC linker - # can find it - libfile2 = os.path.join(libdir, '%s.lib' % (libname)) - copy_file(libfile, libfile2) - self.temp_files.append(libfile2) - fileexists = True - break - if fileexists: continue - log.warn('could not find library %r in directories %s' \ - % (libname, library_dirs)) - elif self.compiler.compiler_type == 'mingw32': - generate_manifest(self) - return self._wrap_method(old_config._link, lang, - (body, headers, include_dirs, - libraries, library_dirs, lang)) - - def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): - self._check_compiler() - return self.try_compile( - "/* we need a dummy line to make distutils happy */", - [header], include_dirs) - - def check_decl(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #ifndef %s - (void) %s; - #endif - ; - return 0; - }""") % (symbol, symbol) - - return self.try_compile(body, headers, include_dirs) - - def check_macro_true(self, symbol, - headers=None, include_dirs=None): - self._check_compiler() - body = textwrap.dedent(""" - int main(void) - { - #if %s - #else - #error false or undefined macro - #endif - ; - return 0; - }""") % (symbol,) - - return self.try_compile(body, headers, include_dirs) - - def check_type(self, type_name, headers=None, include_dirs=None, - library_dirs=None): - """Check type availability. Return True if the type can be compiled, - False otherwise""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - int main(void) { - if ((%(name)s *) 0) - return 0; - if (sizeof (%(name)s)) - return 0; - } - """) % {'name': type_name} - - st = False - try: - try: - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - st = True - except distutils.errors.CompileError: - st = False - finally: - self._clean() - - return st - - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): - """Check size of a given type.""" - self._check_compiler() - - # First check the type can be compiled - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; - test_array [0] = 0 - - ; - return 0; - } - """) - self._compile(body % {'type': type_name}, - headers, include_dirs, 'c') - self._clean() - - if expected: - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - for size in expected: - try: - self._compile(body % {'type': type_name, 'size': size}, - headers, include_dirs, 'c') - self._clean() - return size - except CompileError: - pass - - # this fails to *compile* if size > sizeof(type) - body = textwrap.dedent(r""" - typedef %(type)s npy_check_sizeof_type; - int main (void) - { - static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; - test_array [0] = 0 - - ; - return 0; - } - """) - - # The principle is simple: we first find low and high bounds of size - # for the type, where low/high are looked up on a log scale. Then, we - # do a binary search to find the exact size between low and high - low = 0 - mid = 0 - while True: - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - break - except CompileError: - #log.info("failure to test for bound %d" % mid) - low = mid + 1 - mid = 2 * mid + 1 - - high = mid - # Binary search: - while low != high: - mid = (high - low) // 2 + low - try: - self._compile(body % {'type': type_name, 'size': mid}, - headers, include_dirs, 'c') - self._clean() - high = mid - except CompileError: - low = mid + 1 - return low - - def check_func(self, func, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - # clean up distutils's config a bit: add void to main(), and - # return a value. - self._check_compiler() - body = [] - if decl: - if type(decl) == str: - body.append(decl) - else: - body.append("int %s (void);" % func) - # Handle MSVC intrinsics: force MS compiler to make a function call. - # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrinsic and our 'fake' test - # declaration do not match. - body.append("#ifdef _MSC_VER") - body.append("#pragma function(%s)" % func) - body.append("#endif") - body.append("int main (void) {") - if call: - if call_args is None: - call_args = '' - body.append(" %s(%s);" % (func, call_args)) - else: - body.append(" %s;" % func) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_funcs_once(self, funcs, - headers=None, include_dirs=None, - libraries=None, library_dirs=None, - decl=False, call=False, call_args=None): - """Check a list of functions at once. - - This is useful to speed up things, since all the functions in the funcs - list will be put in one compilation unit. - - Arguments - --------- - funcs : seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - library_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionary, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f. - """ - self._check_compiler() - body = [] - if decl: - for f, v in decl.items(): - if v: - body.append("int %s (void);" % f) - - # Handle MS intrinsics. See check_func for more info. - body.append("#ifdef _MSC_VER") - for func in funcs: - body.append("#pragma function(%s)" % func) - body.append("#endif") - - body.append("int main (void) {") - if call: - for f in funcs: - if f in call and call[f]: - if not (call_args and f in call_args and call_args[f]): - args = '' - else: - args = call_args[f] - body.append(" %s(%s);" % (f, args)) - else: - body.append(" %s;" % f) - else: - for f in funcs: - body.append(" %s;" % f) - body.append(" return 0;") - body.append("}") - body = '\n'.join(body) + "\n" - - return self.try_link(body, headers, include_dirs, - libraries, library_dirs) - - def check_inline(self): - """Return the inline keyword recognized by the compiler, empty string - otherwise.""" - return check_inline(self) - - def check_restrict(self): - """Return the restrict keyword recognized by the compiler, empty string - otherwise.""" - return check_restrict(self) - - def check_compiler_gcc(self): - """Return True if the C compiler is gcc""" - return check_compiler_gcc(self) - - def check_gcc_function_attribute(self, attribute, name): - return check_gcc_function_attribute(self, attribute, name) - - def check_gcc_function_attribute_with_intrinsics(self, attribute, name, - code, include): - return check_gcc_function_attribute_with_intrinsics(self, attribute, - name, code, include) - - def check_gcc_variable_attribute(self, attribute): - return check_gcc_variable_attribute(self, attribute) - - def check_gcc_version_at_least(self, major, minor=0, patchlevel=0): - """Return True if the GCC version is greater than or equal to the - specified version.""" - return check_gcc_version_at_least(self, major, minor, patchlevel) - - def get_output(self, body, headers=None, include_dirs=None, - libraries=None, library_dirs=None, - lang="c", use_tee=None): - """Try to compile, link to an executable, and run a program - built from 'body' and 'headers'. Returns the exit status code - of the program and its output. - """ - # 2008-11-16, RemoveMe - warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" - "Usage of get_output is deprecated: please do not \n" - "use it anymore, and avoid configuration checks \n" - "involving running executable on the target machine.\n" - "+++++++++++++++++++++++++++++++++++++++++++++++++\n", - DeprecationWarning, stacklevel=2) - self._check_compiler() - exitcode, output = 255, '' - try: - grabber = GrabStdout() - try: - src, obj, exe = self._link(body, headers, include_dirs, - libraries, library_dirs, lang) - grabber.restore() - except Exception: - output = grabber.data - grabber.restore() - raise - exe = os.path.join('.', exe) - try: - # specify cwd arg for consistency with - # historic usage pattern of exec_command() - # also, note that exe appears to be a string, - # which exec_command() handled, but we now - # use a list for check_output() -- this assumes - # that exe is always a single command - output = subprocess.check_output([exe], cwd='.') - except subprocess.CalledProcessError as exc: - exitstatus = exc.returncode - output = '' - except OSError: - # preserve the EnvironmentError exit status - # used historically in exec_command() - exitstatus = 127 - output = '' - else: - output = filepath_from_subprocess_output(output) - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") - except (CompileError, LinkError): - log.info("failure.") - self._clean() - return exitcode, output - -class GrabStdout: - - def __init__(self): - self.sys_stdout = sys.stdout - self.data = '' - sys.stdout = self - - def write (self, data): - self.sys_stdout.write(data) - self.data += data - - def flush (self): - self.sys_stdout.flush() - - def restore(self): - sys.stdout = self.sys_stdout diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py deleted file mode 100644 index ca4099886d8c..000000000000 --- a/numpy/distutils/command/config_compiler.py +++ /dev/null @@ -1,126 +0,0 @@ -from distutils.core import Command -from numpy.distutils import log - -#XXX: Linker flags - -def show_fortran_compilers(_cache=None): - # Using cache to prevent infinite recursion. - if _cache: - return - elif _cache is None: - _cache = [] - _cache.append(1) - from numpy.distutils.fcompiler import show_fcompilers - import distutils.core - dist = distutils.core._setup_distribution - show_fcompilers(dist) - -class config_fc(Command): - """ Distutils command to hold user specified options - to Fortran compilers. - - config_fc command is used by the FCompiler.customize() method. - """ - - description = "specify Fortran 77/Fortran 90 compiler information" - - user_options = [ - ('fcompiler=', None, "specify Fortran compiler type"), - ('f77exec=', None, "specify F77 compiler command"), - ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=', None, "specify F77 compiler flags"), - ('f90flags=', None, "specify F90 compiler flags"), - ('opt=', None, "specify optimization flags"), - ('arch=', None, "specify architecture specific optimization flags"), - ('debug', 'g', "compile with debugging information"), - ('noopt', None, "compile without optimization"), - ('noarch', None, "compile without arch-dependent optimization"), - ] - - help_options = [ - ('help-fcompiler', None, "list available Fortran compilers", - show_fortran_compilers), - ] - - boolean_options = ['debug', 'noopt', 'noarch'] - - def initialize_options(self): - self.fcompiler = None - self.f77exec = None - self.f90exec = None - self.f77flags = None - self.f90flags = None - self.opt = None - self.arch = None - self.debug = None - self.noopt = None - self.noarch = None - - def finalize_options(self): - log.info('unifying config_fc, config, build_clib, build_ext, build commands --fcompiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['fcompiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - - def run(self): - # Do nothing. - return - -class config_cc(Command): - """ Distutils command to hold user specified options - to C/C++ compilers. - """ - - description = "specify C/C++ compiler information" - - user_options = [ - ('compiler=', None, "specify C/C++ compiler type"), - ] - - def initialize_options(self): - self.compiler = None - - def finalize_options(self): - log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options') - build_clib = self.get_finalized_command('build_clib') - build_ext = self.get_finalized_command('build_ext') - config = self.get_finalized_command('config') - build = self.get_finalized_command('build') - cmd_list = [self, config, build_clib, build_ext, build] - for a in ['compiler']: - l = [] - for c in cmd_list: - v = getattr(c, a) - if v is not None: - if not isinstance(v, str): v = v.compiler_type - if v not in l: l.append(v) - if not l: v1 = None - else: v1 = l[0] - if len(l)>1: - log.warn(' commands have different --%s options: %s'\ - ', using first in list as default' % (a, l)) - if v1: - for c in cmd_list: - if getattr(c, a) is None: setattr(c, a, v1) - return - - def run(self): - # Do nothing. - return diff --git a/numpy/distutils/command/develop.py b/numpy/distutils/command/develop.py deleted file mode 100644 index af24baf2e7e1..000000000000 --- a/numpy/distutils/command/develop.py +++ /dev/null @@ -1,15 +0,0 @@ -""" Override the develop command from setuptools so we can ensure that our -generated files (from build_src or build_scripts) are properly converted to real -files with filenames. - -""" -from setuptools.command.develop import develop as old_develop - -class develop(old_develop): - __doc__ = old_develop.__doc__ - def install_for_development(self): - # Build sources in-place, too. - self.reinitialize_command('build_src', inplace=1) - # Make sure scripts are built. - self.run_command('build_scripts') - old_develop.install_for_development(self) diff --git a/numpy/distutils/command/egg_info.py b/numpy/distutils/command/egg_info.py deleted file mode 100644 index 14c62b4d1b90..000000000000 --- a/numpy/distutils/command/egg_info.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys - -from setuptools.command.egg_info import egg_info as _egg_info - -class egg_info(_egg_info): - def run(self): - if 'sdist' in sys.argv: - import warnings - import textwrap - msg = textwrap.dedent(""" - `build_src` is being run, this may lead to missing - files in your sdist! You want to use distutils.sdist - instead of the setuptools version: - - from distutils.command.sdist import sdist - cmdclass={'sdist': sdist}" - - See numpy's setup.py or gh-7131 for details.""") - warnings.warn(msg, UserWarning, stacklevel=2) - - # We need to ensure that build_src has been executed in order to give - # setuptools' egg_info command real filenames instead of functions which - # generate files. - self.run_command("build_src") - _egg_info.run(self) diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py deleted file mode 100644 index efa9b4740fc4..000000000000 --- a/numpy/distutils/command/install.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - import setuptools.command.install as old_install_mod - have_setuptools = True -else: - import distutils.command.install as old_install_mod - have_setuptools = False -from distutils.file_util import write_file - -old_install = old_install_mod.install - -class install(old_install): - - # Always run install_clib - the command is cheap, so no need to bypass it; - # but it's not run by setuptools -- so it's run again in install_data - sub_commands = old_install.sub_commands + [ - ('install_clib', lambda x: True) - ] - - def finalize_options (self): - old_install.finalize_options(self) - self.install_lib = self.install_libbase - - def setuptools_run(self): - """ The setuptools version of the .run() method. - - We must pull in the entire code so we can override the level used in the - _getframe() call since we wrap this call by one more level. - """ - from distutils.command.install import install as distutils_install - - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return distutils_install.run(self) - - # Attempt to detect whether we were called from setup() or by another - # command. If we were called by setup(), our caller will be the - # 'run_command' method in 'distutils.dist', and *its* caller will be - # the 'run_commands' method. If we were called any other way, our - # immediate caller *might* be 'run_command', but it won't have been - # called by 'run_commands'. This is slightly kludgy, but seems to - # work. - # - caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__', '') - caller_name = caller.f_code.co_name - - if caller_module != 'distutils.dist' or caller_name!='run_commands': - # We weren't called from the command line or setup(), so we - # should run in backward-compatibility mode to support bdist_* - # commands. - distutils_install.run(self) - else: - self.do_egg_install() - - def run(self): - if not have_setuptools: - r = old_install.run(self) - else: - r = self.setuptools_run() - if self.record: - # bdist_rpm fails when INSTALLED_FILES contains - # paths with spaces. Such paths must be enclosed - # with double-quotes. - with open(self.record) as f: - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - if need_rewrite: - self.execute(write_file, - (self.record, lines), - "re-writing list of installed files to '%s'" % - self.record) - return r diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py deleted file mode 100644 index aa2e5594c3c2..000000000000 --- a/numpy/distutils/command/install_clib.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from distutils.core import Command -from distutils.ccompiler import new_compiler -from numpy.distutils.misc_util import get_cmd - -class install_clib(Command): - description = "Command to install installable C libraries" - - user_options = [] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', ('install_lib', 'install_dir')) - - def run (self): - build_clib_cmd = get_cmd("build_clib") - if not build_clib_cmd.build_clib: - # can happen if the user specified `--skip-build` - build_clib_cmd.finalize_options() - build_dir = build_clib_cmd.build_clib - - # We need the compiler to get the library name -> filename association - if not build_clib_cmd.compiler: - compiler = new_compiler(compiler=None) - compiler.customize(self.distribution) - else: - compiler = build_clib_cmd.compiler - - for l in self.distribution.installed_libraries: - target_dir = os.path.join(self.install_dir, l.target_dir) - name = compiler.library_filename(l.name) - source = os.path.join(build_dir, name) - self.mkpath(target_dir) - self.outfiles.append(self.copy_file(source, target_dir)[0]) - - def get_outputs(self): - return self.outfiles diff --git a/numpy/distutils/command/install_data.py b/numpy/distutils/command/install_data.py deleted file mode 100644 index 0a2e68ae192a..000000000000 --- a/numpy/distutils/command/install_data.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys -have_setuptools = ('setuptools' in sys.modules) - -from distutils.command.install_data import install_data as old_install_data - -#data installer with improved intelligence over distutils -#data files are copied into the project directory instead -#of willy-nilly -class install_data (old_install_data): - - def run(self): - old_install_data.run(self) - - if have_setuptools: - # Run install_clib again, since setuptools does not run sub-commands - # of install automatically - self.run_command('install_clib') - - def finalize_options (self): - self.set_undefined_options('install', - ('install_lib', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py deleted file mode 100644 index 91eba6f17c29..000000000000 --- a/numpy/distutils/command/install_headers.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from distutils.command.install_headers import install_headers as old_install_headers - -class install_headers (old_install_headers): - - def run (self): - headers = self.distribution.headers - if not headers: - return - - prefix = os.path.dirname(self.install_dir) - for header in headers: - if isinstance(header, tuple): - # Kind of a hack, but I don't know where else to change this... - if header[0] == 'numpy._core': - header = ('numpy', header[1]) - if os.path.splitext(header[1])[1] == '.inc': - continue - d = os.path.join(*([prefix]+header[0].split('.'))) - header = header[1] - else: - d = self.install_dir - self.mkpath(d) - (out, _) = self.copy_file(header, d) - self.outfiles.append(out) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py deleted file mode 100644 index e34193883dea..000000000000 --- a/numpy/distutils/command/sdist.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys -if 'setuptools' in sys.modules: - from setuptools.command.sdist import sdist as old_sdist -else: - from distutils.command.sdist import sdist as old_sdist - -from numpy.distutils.misc_util import get_data_files - -class sdist(old_sdist): - - def add_defaults (self): - old_sdist.add_defaults(self) - - dist = self.distribution - - if dist.has_data_files(): - for data in dist.data_files: - self.filelist.extend(get_data_files(data)) - - if dist.has_headers(): - headers = [] - for h in dist.headers: - if isinstance(h, str): headers.append(h) - else: headers.append(h[1]) - self.filelist.extend(headers) - - return diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py deleted file mode 100644 index c4a14e59901f..000000000000 --- a/numpy/distutils/core.py +++ /dev/null @@ -1,215 +0,0 @@ -import sys -from distutils.core import Distribution - -if 'setuptools' in sys.modules: - have_setuptools = True - from setuptools import setup as old_setup - # easy_install imports math, it may be picked up from cwd - from setuptools.command import easy_install - try: - # very old versions of setuptools don't have this - from setuptools.command import bdist_egg - except ImportError: - have_setuptools = False -else: - from distutils.core import setup as old_setup - have_setuptools = False - -import warnings -import distutils.core -import distutils.dist - -from numpy.distutils.extension import Extension # noqa: F401 -from numpy.distutils.numpy_distribution import NumpyDistribution -from numpy.distutils.command import config, config_compiler, \ - build, build_py, build_ext, build_clib, build_src, build_scripts, \ - sdist, install_data, install_headers, install, bdist_rpm, \ - install_clib -from numpy.distutils.misc_util import is_sequence, is_string - -numpy_cmdclass = {'build': build.build, - 'build_src': build_src.build_src, - 'build_scripts': build_scripts.build_scripts, - 'config_cc': config_compiler.config_cc, - 'config_fc': config_compiler.config_fc, - 'config': config.config, - 'build_ext': build_ext.build_ext, - 'build_py': build_py.build_py, - 'build_clib': build_clib.build_clib, - 'sdist': sdist.sdist, - 'install_data': install_data.install_data, - 'install_headers': install_headers.install_headers, - 'install_clib': install_clib.install_clib, - 'install': install.install, - 'bdist_rpm': bdist_rpm.bdist_rpm, - } -if have_setuptools: - # Use our own versions of develop and egg_info to ensure that build_src is - # handled appropriately. - from numpy.distutils.command import develop, egg_info - numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg - numpy_cmdclass['develop'] = develop.develop - numpy_cmdclass['easy_install'] = easy_install.easy_install - numpy_cmdclass['egg_info'] = egg_info.egg_info - -def _dict_append(d, **kws): - for k, v in kws.items(): - if k not in d: - d[k] = v - continue - dv = d[k] - if isinstance(dv, tuple): - d[k] = dv + tuple(v) - elif isinstance(dv, list): - d[k] = dv + list(v) - elif isinstance(dv, dict): - _dict_append(dv, **v) - elif is_string(dv): - d[k] = dv + v - else: - raise TypeError(repr(type(dv))) - -def _command_line_ok(_cache=None): - """ Return True if command line does not contain any - help or display requests. - """ - if _cache: - return _cache[0] - elif _cache is None: - _cache = [] - ok = True - display_opts = ['--'+n for n in Distribution.display_option_names] - for o in Distribution.display_options: - if o[1]: - display_opts.append('-'+o[1]) - for arg in sys.argv: - if arg.startswith('--help') or arg=='-h' or arg in display_opts: - ok = False - break - _cache.append(ok) - return ok - -def get_distribution(always=False): - dist = distutils.core._setup_distribution - # XXX Hack to get numpy installable with easy_install. - # The problem is easy_install runs it's own setup(), which - # sets up distutils.core._setup_distribution. However, - # when our setup() runs, that gets overwritten and lost. - # We can't use isinstance, as the DistributionWithoutHelpCommands - # class is local to a function in setuptools.command.easy_install - if dist is not None and \ - 'DistributionWithoutHelpCommands' in repr(dist): - dist = None - if always and dist is None: - dist = NumpyDistribution() - return dist - -def setup(**attr): - - cmdclass = numpy_cmdclass.copy() - - new_attr = attr.copy() - if 'cmdclass' in new_attr: - cmdclass.update(new_attr['cmdclass']) - new_attr['cmdclass'] = cmdclass - - if 'configuration' in new_attr: - # To avoid calling configuration if there are any errors - # or help request in command in the line. - configuration = new_attr.pop('configuration') - - old_dist = distutils.core._setup_distribution - old_stop = distutils.core._setup_stop_after - distutils.core._setup_distribution = None - distutils.core._setup_stop_after = "commandline" - try: - dist = setup(**new_attr) - finally: - distutils.core._setup_distribution = old_dist - distutils.core._setup_stop_after = old_stop - if dist.help or not _command_line_ok(): - # probably displayed help, skip running any commands - return dist - - # create setup dictionary and append to new_attr - config = configuration() - if hasattr(config, 'todict'): - config = config.todict() - _dict_append(new_attr, **config) - - # Move extension source libraries to libraries - libraries = [] - for ext in new_attr.get('ext_modules', []): - new_libraries = [] - for item in ext.libraries: - if is_sequence(item): - lib_name, build_info = item - _check_append_ext_library(libraries, lib_name, build_info) - new_libraries.append(lib_name) - elif is_string(item): - new_libraries.append(item) - else: - raise TypeError("invalid description of extension module " - "library %r" % (item,)) - ext.libraries = new_libraries - if libraries: - if 'libraries' not in new_attr: - new_attr['libraries'] = [] - for item in libraries: - _check_append_library(new_attr['libraries'], item) - - # sources in ext_modules or libraries may contain header files - if ('ext_modules' in new_attr or 'libraries' in new_attr) \ - and 'headers' not in new_attr: - new_attr['headers'] = [] - - # Use our custom NumpyDistribution class instead of distutils' one - new_attr['distclass'] = NumpyDistribution - - return old_setup(**new_attr) - -def _check_append_library(libraries, item): - for libitem in libraries: - if is_sequence(libitem): - if is_sequence(item): - if item[0]==libitem[0]: - if item[1] is libitem[1]: - return - warnings.warn("[0] libraries list contains %r with" - " different build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem[0]: - warnings.warn("[1] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if is_sequence(item): - if item[0]==libitem: - warnings.warn("[2] libraries list contains %r with" - " no build_info" % (item[0],), - stacklevel=2) - break - else: - if item==libitem: - return - libraries.append(item) - -def _check_append_ext_library(libraries, lib_name, build_info): - for item in libraries: - if is_sequence(item): - if item[0]==lib_name: - if item[1] is build_info: - return - warnings.warn("[3] libraries list contains %r with" - " different build_info" % (lib_name,), - stacklevel=2) - break - elif item==lib_name: - warnings.warn("[4] libraries list contains %r with" - " no build_info" % (lib_name,), - stacklevel=2) - break - libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py deleted file mode 100644 index 77620210981d..000000000000 --- a/numpy/distutils/cpuinfo.py +++ /dev/null @@ -1,683 +0,0 @@ -#!/usr/bin/env python3 -""" -cpuinfo - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -Pearu Peterson - -""" -__all__ = ['cpu'] - -import os -import platform -import re -import sys -import types -import warnings - -from subprocess import getstatusoutput - - -def getoutput(cmd, successful_status=(0,), stacklevel=1): - try: - status, output = getstatusoutput(cmd) - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, "" - if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: - return True, output - return False, output - -def command_info(successful_status=(0,), stacklevel=1, **kw): - info = {} - for key in kw: - ok, output = getoutput(kw[key], successful_status=successful_status, - stacklevel=stacklevel+1) - if ok: - info[key] = output.strip() - return info - -def command_by_line(cmd, successful_status=(0,), stacklevel=1): - ok, output = getoutput(cmd, successful_status=successful_status, - stacklevel=stacklevel+1) - if not ok: - return - for line in output.splitlines(): - yield line.strip() - -def key_value_from_command(cmd, sep, successful_status=(0,), - stacklevel=1): - d = {} - for line in command_by_line(cmd, successful_status=successful_status, - stacklevel=stacklevel+1): - l = [s.strip() for s in line.split(sep, 1)] - if len(l) == 2: - d[l[0]] = l[1] - return d - -class CPUInfoBase: - """Holds CPU information and provides methods for requiring - the availability of various CPU features. - """ - - def _try_call(self, func): - try: - return func() - except Exception: - pass - - def __getattr__(self, name): - if not name.startswith('_'): - if hasattr(self, '_'+name): - attr = getattr(self, '_'+name) - if isinstance(attr, types.MethodType): - return lambda func=self._try_call,attr=attr : func(attr) - else: - return lambda : None - raise AttributeError(name) - - def _getNCPUs(self): - return 1 - - def __get_nbits(self): - abits = platform.architecture()[0] - nbits = re.compile(r'(\d+)bit').search(abits).group(1) - return nbits - - def _is_32bit(self): - return self.__get_nbits() == '32' - - def _is_64bit(self): - return self.__get_nbits() == '64' - -class LinuxCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = [ {} ] - ok, output = getoutput('uname -m') - if ok: - info[0]['uname_m'] = output.strip() - try: - fo = open('/proc/cpuinfo') - except OSError as e: - warnings.warn(str(e), UserWarning, stacklevel=2) - else: - for line in fo: - name_value = [s.strip() for s in line.split(':', 1)] - if len(name_value) != 2: - continue - name, value = name_value - if not info or name in info[-1]: # next processor - info.append({}) - info[-1][name] = value - fo.close() - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['vendor_id']=='AuthenticAMD' - - def _is_AthlonK6_2(self): - return self._is_AMD() and self.info[0]['model'] == '2' - - def _is_AthlonK6_3(self): - return self._is_AMD() and self.info[0]['model'] == '3' - - def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None - - def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None - - def _is_AthlonMP(self): - return re.match(r'.*?Athlon\(tm\) MP\b', - self.info[0]['model name']) is not None - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['family'] == '15' - - def _is_Athlon64(self): - return re.match(r'.*?Athlon\(tm\) 64\b', - self.info[0]['model name']) is not None - - def _is_AthlonHX(self): - return re.match(r'.*?Athlon HX\b', - self.info[0]['model name']) is not None - - def _is_Opteron(self): - return re.match(r'.*?Opteron\b', - self.info[0]['model name']) is not None - - def _is_Hammer(self): - return re.match(r'.*?Hammer\b', - self.info[0]['model name']) is not None - - # Alpha - - def _is_Alpha(self): - return self.info[0]['cpu']=='Alpha' - - def _is_EV4(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' - - def _is_EV5(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' - - def _is_EV56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' - - def _is_PCA56(self): - return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' - - # Intel - - #XXX - _is_i386 = _not_impl - - def _is_Intel(self): - return self.info[0]['vendor_id']=='GenuineIntel' - - def _is_i486(self): - return self.info[0]['cpu']=='i486' - - def _is_i586(self): - return self.is_Intel() and self.info[0]['cpu family'] == '5' - - def _is_i686(self): - return self.is_Intel() and self.info[0]['cpu family'] == '6' - - def _is_Celeron(self): - return re.match(r'.*?Celeron', - self.info[0]['model name']) is not None - - def _is_Pentium(self): - return re.match(r'.*?Pentium', - self.info[0]['model name']) is not None - - def _is_PentiumII(self): - return re.match(r'.*?Pentium.*?II\b', - self.info[0]['model name']) is not None - - def _is_PentiumPro(self): - return re.match(r'.*?PentiumPro\b', - self.info[0]['model name']) is not None - - def _is_PentiumMMX(self): - return re.match(r'.*?Pentium.*?MMX\b', - self.info[0]['model name']) is not None - - def _is_PentiumIII(self): - return re.match(r'.*?Pentium.*?III\b', - self.info[0]['model name']) is not None - - def _is_PentiumIV(self): - return re.match(r'.*?Pentium.*?(IV|4)\b', - self.info[0]['model name']) is not None - - def _is_PentiumM(self): - return re.match(r'.*?Pentium.*?M\b', - self.info[0]['model name']) is not None - - def _is_Prescott(self): - return self.is_PentiumIV() and self.has_sse3() - - def _is_Nocona(self): - return (self.is_Intel() - and (self.info[0]['cpu family'] == '6' - or self.info[0]['cpu family'] == '15') - and (self.has_sse3() and not self.has_ssse3()) - and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) - - def _is_Core2(self): - return (self.is_64bit() and self.is_Intel() and - re.match(r'.*?Core\(TM\)2\b', - self.info[0]['model name']) is not None) - - def _is_Itanium(self): - return re.match(r'.*?Itanium\b', - self.info[0]['family']) is not None - - def _is_XEON(self): - return re.match(r'.*?XEON\b', - self.info[0]['model name'], re.IGNORECASE) is not None - - _is_Xeon = _is_XEON - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_fdiv_bug(self): - return self.info[0]['fdiv_bug']=='yes' - - def _has_f00f_bug(self): - return self.info[0]['f00f_bug']=='yes' - - def _has_mmx(self): - return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None - - def _has_sse(self): - return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None - - def _has_sse2(self): - return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None - - def _has_sse3(self): - return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None - - def _has_ssse3(self): - return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None - - def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None - - def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None - -class IRIXCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = key_value_from_command('sysconf', sep=' ', - successful_status=(0, 1)) - self.__class__.info = info - - def _not_impl(self): pass - - def _is_singleCPU(self): - return self.info.get('NUM_PROCESSORS') == '1' - - def _getNCPUs(self): - return int(self.info.get('NUM_PROCESSORS', 1)) - - def __cputype(self, n): - return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) - def _is_r2000(self): return self.__cputype(2000) - def _is_r3000(self): return self.__cputype(3000) - def _is_r3900(self): return self.__cputype(3900) - def _is_r4000(self): return self.__cputype(4000) - def _is_r4100(self): return self.__cputype(4100) - def _is_r4300(self): return self.__cputype(4300) - def _is_r4400(self): return self.__cputype(4400) - def _is_r4600(self): return self.__cputype(4600) - def _is_r4650(self): return self.__cputype(4650) - def _is_r5000(self): return self.__cputype(5000) - def _is_r6000(self): return self.__cputype(6000) - def _is_r8000(self): return self.__cputype(8000) - def _is_r10000(self): return self.__cputype(10000) - def _is_r12000(self): return self.__cputype(12000) - def _is_rorion(self): return self.__cputype('orion') - - def get_ip(self): - try: return self.info.get('MACHINE') - except Exception: pass - def __machine(self, n): - return self.info.get('MACHINE').lower() == 'ip%s' % (n) - def _is_IP19(self): return self.__machine(19) - def _is_IP20(self): return self.__machine(20) - def _is_IP21(self): return self.__machine(21) - def _is_IP22(self): return self.__machine(22) - def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() - def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() - def _is_IP24(self): return self.__machine(24) - def _is_IP25(self): return self.__machine(25) - def _is_IP26(self): return self.__machine(26) - def _is_IP27(self): return self.__machine(27) - def _is_IP28(self): return self.__machine(28) - def _is_IP30(self): return self.__machine(30) - def _is_IP32(self): return self.__machine(32) - def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() - def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() - - -class DarwinCPUInfo(CPUInfoBase): - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - machine='machine') - info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') - self.__class__.info = info - - def _not_impl(self): pass - - def _getNCPUs(self): - return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) - - def _is_Power_Macintosh(self): - return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' - - def _is_i386(self): - return self.info['arch']=='i386' - def _is_ppc(self): - return self.info['arch']=='ppc' - - def __machine(self, n): - return self.info['machine'] == 'ppc%s'%n - def _is_ppc601(self): return self.__machine(601) - def _is_ppc602(self): return self.__machine(602) - def _is_ppc603(self): return self.__machine(603) - def _is_ppc603e(self): return self.__machine('603e') - def _is_ppc604(self): return self.__machine(604) - def _is_ppc604e(self): return self.__machine('604e') - def _is_ppc620(self): return self.__machine(620) - def _is_ppc630(self): return self.__machine(630) - def _is_ppc740(self): return self.__machine(740) - def _is_ppc7400(self): return self.__machine(7400) - def _is_ppc7450(self): return self.__machine(7450) - def _is_ppc750(self): return self.__machine(750) - def _is_ppc403(self): return self.__machine(403) - def _is_ppc505(self): return self.__machine(505) - def _is_ppc801(self): return self.__machine(801) - def _is_ppc821(self): return self.__machine(821) - def _is_ppc823(self): return self.__machine(823) - def _is_ppc860(self): return self.__machine(860) - - -class SunOSCPUInfo(CPUInfoBase): - - info = None - - def __init__(self): - if self.info is not None: - return - info = command_info(arch='arch', - mach='mach', - uname_i='uname_i', - isainfo_b='isainfo -b', - isainfo_n='isainfo -n', - ) - info['uname_X'] = key_value_from_command('uname -X', sep='=') - for line in command_by_line('psrinfo -v 0'): - m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) - if m: - info['processor'] = m.group('p') - break - self.__class__.info = info - - def _not_impl(self): pass - - def _is_i386(self): - return self.info['isainfo_n']=='i386' - def _is_sparc(self): - return self.info['isainfo_n']=='sparc' - def _is_sparcv9(self): - return self.info['isainfo_n']=='sparcv9' - - def _getNCPUs(self): - return int(self.info['uname_X'].get('NumCPU', 1)) - - def _is_sun4(self): - return self.info['arch']=='sun4' - - def _is_SUNW(self): - return re.match(r'SUNW', self.info['uname_i']) is not None - def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None - def _is_ultra1(self): - return re.match(r'.*Ultra-1', self.info['uname_i']) is not None - def _is_ultra250(self): - return re.match(r'.*Ultra-250', self.info['uname_i']) is not None - def _is_ultra2(self): - return re.match(r'.*Ultra-2', self.info['uname_i']) is not None - def _is_ultra30(self): - return re.match(r'.*Ultra-30', self.info['uname_i']) is not None - def _is_ultra4(self): - return re.match(r'.*Ultra-4', self.info['uname_i']) is not None - def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None - def _is_ultra5(self): - return re.match(r'.*Ultra-5', self.info['uname_i']) is not None - def _is_ultra60(self): - return re.match(r'.*Ultra-60', self.info['uname_i']) is not None - def _is_ultra80(self): - return re.match(r'.*Ultra-80', self.info['uname_i']) is not None - def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None - def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None - def _is_sunfire(self): - return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None - def _is_ultra(self): - return re.match(r'.*Ultra', self.info['uname_i']) is not None - - def _is_cpusparcv7(self): - return self.info['processor']=='sparcv7' - def _is_cpusparcv8(self): - return self.info['processor']=='sparcv8' - def _is_cpusparcv9(self): - return self.info['processor']=='sparcv9' - -class Win32CPUInfo(CPUInfoBase): - - info = None - pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" - # XXX: what does the value of - # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 - # mean? - - def __init__(self): - if self.info is not None: - return - info = [] - try: - #XXX: Bad style to use so long `try:...except:...`. Fix it! - import winreg - - prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" - r"\s+stepping\s+(?P\d+)", re.IGNORECASE) - chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) - pnum=0 - while True: - try: - proc=winreg.EnumKey(chnd, pnum) - except winreg.error: - break - else: - pnum+=1 - info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd, proc) - pidx=0 - while True: - try: - name, value, vtpe=winreg.EnumValue(phnd, pidx) - except winreg.error: - break - else: - pidx=pidx+1 - info[-1][name]=value - if name=="Identifier": - srch=prgx.search(value) - if srch: - info[-1]["Family"]=int(srch.group("FML")) - info[-1]["Model"]=int(srch.group("MDL")) - info[-1]["Stepping"]=int(srch.group("STP")) - except Exception as e: - print(e, '(ignoring)') - self.__class__.info = info - - def _not_impl(self): pass - - # Athlon - - def _is_AMD(self): - return self.info[0]['VendorIdentifier']=='AuthenticAMD' - - def _is_Am486(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_Am5x86(self): - return self.is_AMD() and self.info[0]['Family']==4 - - def _is_AMDK5(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0, 1, 2, 3] - - def _is_AMDK6(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6, 7] - - def _is_AMDK6_2(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==8 - - def _is_AMDK6_3(self): - return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==9 - - def _is_AMDK7(self): - return self.is_AMD() and self.info[0]['Family'] == 6 - - # To reliably distinguish between the different types of AMD64 chips - # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would - # require looking at the 'brand' from cpuid - - def _is_AMD64(self): - return self.is_AMD() and self.info[0]['Family'] == 15 - - # Intel - - def _is_Intel(self): - return self.info[0]['VendorIdentifier']=='GenuineIntel' - - def _is_i386(self): - return self.info[0]['Family']==3 - - def _is_i486(self): - return self.info[0]['Family']==4 - - def _is_i586(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_i686(self): - return self.is_Intel() and self.info[0]['Family']==6 - - def _is_Pentium(self): - return self.is_Intel() and self.info[0]['Family']==5 - - def _is_PentiumMMX(self): - return self.is_Intel() and self.info[0]['Family']==5 \ - and self.info[0]['Model']==4 - - def _is_PentiumPro(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model']==1 - - def _is_PentiumII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3, 5, 6] - - def _is_PentiumIII(self): - return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7, 8, 9, 10, 11] - - def _is_PentiumIV(self): - return self.is_Intel() and self.info[0]['Family']==15 - - def _is_PentiumM(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [9, 13, 14] - - def _is_Core2(self): - return self.is_Intel() and self.info[0]['Family'] == 6 \ - and self.info[0]['Model'] in [15, 16, 17] - - # Varia - - def _is_singleCPU(self): - return len(self.info) == 1 - - def _getNCPUs(self): - return len(self.info) - - def _has_mmx(self): - if self.is_Intel(): - return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6, 15]) - elif self.is_AMD(): - return self.info[0]['Family'] in [5, 6, 15] - else: - return False - - def _has_sse(self): - if self.is_Intel(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [7, 8, 9, 10, 11]) - or self.info[0]['Family']==15) - elif self.is_AMD(): - return ((self.info[0]['Family']==6 and - self.info[0]['Model'] in [6, 7, 8, 10]) - or self.info[0]['Family']==15) - else: - return False - - def _has_sse2(self): - if self.is_Intel(): - return self.is_Pentium4() or self.is_PentiumM() \ - or self.is_Core2() - elif self.is_AMD(): - return self.is_AMD64() - else: - return False - - def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] - - def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6, 15] - -if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) - cpuinfo = LinuxCPUInfo -elif sys.platform.startswith('irix'): - cpuinfo = IRIXCPUInfo -elif sys.platform == 'darwin': - cpuinfo = DarwinCPUInfo -elif sys.platform.startswith('sunos'): - cpuinfo = SunOSCPUInfo -elif sys.platform.startswith('win32'): - cpuinfo = Win32CPUInfo -elif sys.platform.startswith('cygwin'): - cpuinfo = LinuxCPUInfo -#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. -else: - cpuinfo = CPUInfoBase - -cpu = cpuinfo() - -#if __name__ == "__main__": -# -# cpu.is_blaa() -# cpu.is_Intel() -# cpu.is_Alpha() -# -# print('CPU information:'), -# for name in dir(cpuinfo): -# if name[0]=='_' and name[1]!='_': -# r = getattr(cpu,name[1:])() -# if r: -# if r!=1: -# print('%s=%s' %(name[1:],r)) -# else: -# print(name[1:]), -# print() diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py deleted file mode 100644 index c701465d9ade..000000000000 --- a/numpy/distutils/exec_command.py +++ /dev/null @@ -1,315 +0,0 @@ -""" -exec_command - -Implements exec_command function that is (almost) equivalent to -commands.getstatusoutput function but on NT, DOS systems the -returned status is actually correct (though, the returned status -values may be different by a factor). In addition, exec_command -takes keyword arguments for (re-)defining environment variables. - -Provides functions: - - exec_command --- execute command in a specified directory and - in the modified environment. - find_executable --- locate a command using info from environment - variable PATH. Equivalent to posix `which` - command. - -Author: Pearu Peterson -Created: 11 January 2003 - -Requires: Python 2.x - -Successfully tested on: - -======== ============ ================================================= -os.name sys.platform comments -======== ============ ================================================= -posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 - PyCrust 0.9.3, Idle 1.0.2 -posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 -posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 -posix darwin Darwin 7.2.0, Python 2.3 -nt win32 Windows Me - Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 - Python 2.1.1 Idle 0.8 -nt win32 Windows 98, Python 2.1.1. Idle 0.8 -nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests - fail i.e. redefining environment variables may - not work. FIXED: don't use cygwin echo! - Comment: also `cmd /c echo` will not work - but redefining environment variables do work. -posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) -nt win32 Windows XP, Python 2.3.3 -======== ============ ================================================= - -Known bugs: - -* Tests, that send messages to stderr, fail when executed from MSYS prompt - because the messages are lost at some point. - -""" -__all__ = ['exec_command', 'find_executable'] - -import os -import sys -import subprocess -import locale -import warnings - -from numpy.distutils.misc_util import is_sequence, make_temp_file -from numpy.distutils import log - -def filepath_from_subprocess_output(output): - """ - Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. - - Inherited from `exec_command`, and possibly incorrect. - """ - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - output = output.decode(mylocale, errors='replace') - output = output.replace('\r\n', '\n') - # Another historical oddity - if output[-1:] == '\n': - output = output[:-1] - return output - - -def forward_bytes_to_stdout(val): - """ - Forward bytes from a subprocess call to the console, without attempting to - decode them. - - The assumption is that the subprocess call already returned bytes in - a suitable encoding. - """ - if hasattr(sys.stdout, 'buffer'): - # use the underlying binary output if there is one - sys.stdout.buffer.write(val) - elif hasattr(sys.stdout, 'encoding'): - # round-trip the encoding if necessary - sys.stdout.write(val.decode(sys.stdout.encoding)) - else: - # make a best-guess at the encoding - sys.stdout.write(val.decode('utf8', errors='replace')) - - -def temp_file_name(): - # 2019-01-30, 1.17 - warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' - 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) - fo, name = make_temp_file() - fo.close() - return name - -def get_pythonexe(): - pythonexe = sys.executable - if os.name in ['nt', 'dos']: - fdir, fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW', 'PYTHON') - pythonexe = os.path.join(fdir, fn) - assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) - return pythonexe - -def find_executable(exe, path=None, _cache={}): - """Return full path of an executable or None. - - Symbolic links are not followed. - """ - key = exe, path - try: - return _cache[key] - except KeyError: - pass - log.debug('find_executable(%r)' % exe) - orig_exe = exe - - if path is None: - path = os.environ.get('PATH', os.defpath) - if os.name=='posix': - realpath = os.path.realpath - else: - realpath = lambda a:a - - if exe.startswith('"'): - exe = exe[1:-1] - - suffixes = [''] - if os.name in ['nt', 'dos', 'os2']: - fn, ext = os.path.splitext(exe) - extra_suffixes = ['.exe', '.com', '.bat'] - if ext.lower() not in extra_suffixes: - suffixes = extra_suffixes - - if os.path.isabs(exe): - paths = [''] - else: - paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] - - for path in paths: - fn = os.path.join(path, exe) - for s in suffixes: - f_ext = fn+s - if not os.path.islink(f_ext): - f_ext = realpath(f_ext) - if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): - log.info('Found executable %s' % f_ext) - _cache[key] = f_ext - return f_ext - - log.warn('Could not locate executable %s' % orig_exe) - return None - -############################################################ - -def _preserve_environment( names ): - log.debug('_preserve_environment(%r)' % (names)) - env = {name: os.environ.get(name) for name in names} - return env - -def _update_environment( **env ): - log.debug('_update_environment(...)') - for name, value in env.items(): - os.environ[name] = value or '' - -def exec_command(command, execute_in='', use_shell=None, use_tee=None, - _with_python = 1, **env ): - """ - Return (status,output) of executed command. - - .. deprecated:: 1.17 - Use subprocess.Popen instead - - Parameters - ---------- - command : str - A concatenated string of executable and arguments. - execute_in : str - Before running command ``cd execute_in`` and after ``cd -``. - use_shell : {bool, None}, optional - If True, execute ``sh -c command``. Default None (True) - use_tee : {bool, None}, optional - If True use tee. Default None (True) - - - Returns - ------- - res : str - Both stdout and stderr messages. - - Notes - ----- - On NT, DOS systems the returned status is correct for external commands. - Wild cards will not work for non-posix systems or when use_shell=0. - - """ - # 2019-01-30, 1.17 - warnings.warn('exec_command is deprecated since NumPy v1.17, use ' - 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) - log.debug('exec_command(%r,%s)' % (command, - ','.join(['%s=%r'%kv for kv in env.items()]))) - - if use_tee is None: - use_tee = os.name=='posix' - if use_shell is None: - use_shell = os.name=='posix' - execute_in = os.path.abspath(execute_in) - oldcwd = os.path.abspath(os.getcwd()) - - if __name__[-12:] == 'exec_command': - exec_dir = os.path.dirname(os.path.abspath(__file__)) - elif os.path.isfile('exec_command.py'): - exec_dir = os.path.abspath('.') - else: - exec_dir = os.path.abspath(sys.argv[0]) - if os.path.isfile(exec_dir): - exec_dir = os.path.dirname(exec_dir) - - if oldcwd!=execute_in: - os.chdir(execute_in) - log.debug('New cwd: %s' % execute_in) - else: - log.debug('Retaining cwd: %s' % oldcwd) - - oldenv = _preserve_environment( list(env.keys()) ) - _update_environment( **env ) - - try: - st = _exec_command(command, - use_shell=use_shell, - use_tee=use_tee, - **env) - finally: - if oldcwd!=execute_in: - os.chdir(oldcwd) - log.debug('Restored cwd to %s' % oldcwd) - _update_environment(**oldenv) - - return st - - -def _exec_command(command, use_shell=None, use_tee = None, **env): - """ - Internal workhorse for exec_command(). - """ - if use_shell is None: - use_shell = os.name=='posix' - if use_tee is None: - use_tee = os.name=='posix' - - if os.name == 'posix' and use_shell: - # On POSIX, subprocess always uses /bin/sh, override - sh = os.environ.get('SHELL', '/bin/sh') - if is_sequence(command): - command = [sh, '-c', ' '.join(command)] - else: - command = [sh, '-c', command] - use_shell = False - - elif os.name == 'nt' and is_sequence(command): - # On Windows, join the string for CreateProcess() ourselves as - # subprocess does it a bit differently - command = ' '.join(_quote_arg(arg) for arg in command) - - # Inherit environment by default - env = env or None - try: - # text is set to False so that communicate() - # will return bytes. We need to decode the output ourselves - # so that Python will not raise a UnicodeDecodeError when - # it encounters an invalid character; rather, we simply replace it - proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except OSError: - # Return 127, as os.spawn*() and /bin/sh do - return 127, '' - - text, err = proc.communicate() - mylocale = locale.getpreferredencoding(False) - if mylocale is None: - mylocale = 'ascii' - text = text.decode(mylocale, errors='replace') - text = text.replace('\r\n', '\n') - # Another historical oddity - if text[-1:] == '\n': - text = text[:-1] - - if use_tee and text: - print(text) - return proc.returncode, text - - -def _quote_arg(arg): - """ - Quote the argument for safe use in a shell command line. - """ - # If there is a quote in the string, assume relevant parts of the - # string are already quoted (e.g. '-I"C:\\Program Files\\..."') - if '"' not in arg and ' ' in arg: - return '"%s"' % arg - return arg - -############################################################ diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py deleted file mode 100644 index 06e6441e65df..000000000000 --- a/numpy/distutils/extension.py +++ /dev/null @@ -1,101 +0,0 @@ -"""distutils.extension - -Provides the Extension class, used to describe C/C++ extension -modules in setup scripts. - -Overridden to support f2py. - -""" -import re -from distutils.extension import Extension as old_Extension - - -cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match - - -class Extension(old_Extension): - """ - Parameters - ---------- - name : str - Extension name. - sources : list of str - List of source file locations relative to the top directory of - the package. - extra_compile_args : list of str - Extra command line arguments to pass to the compiler. - extra_f77_compile_args : list of str - Extra command line arguments to pass to the fortran77 compiler. - extra_f90_compile_args : list of str - Extra command line arguments to pass to the fortran90 compiler. - """ - def __init__( - self, name, sources, - include_dirs=None, - define_macros=None, - undef_macros=None, - library_dirs=None, - libraries=None, - runtime_library_dirs=None, - extra_objects=None, - extra_compile_args=None, - extra_link_args=None, - export_symbols=None, - swig_opts=None, - depends=None, - language=None, - f2py_options=None, - module_dirs=None, - extra_c_compile_args=None, - extra_cxx_compile_args=None, - extra_f77_compile_args=None, - extra_f90_compile_args=None,): - - old_Extension.__init__( - self, name, [], - include_dirs=include_dirs, - define_macros=define_macros, - undef_macros=undef_macros, - library_dirs=library_dirs, - libraries=libraries, - runtime_library_dirs=runtime_library_dirs, - extra_objects=extra_objects, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - export_symbols=export_symbols) - - # Avoid assert statements checking that sources contains strings: - self.sources = sources - - # Python 2.4 distutils new features - self.swig_opts = swig_opts or [] - # swig_opts is assumed to be a list. Here we handle the case where it - # is specified as a string instead. - if isinstance(self.swig_opts, str): - import warnings - msg = "swig_opts is specified as a string instead of a list" - warnings.warn(msg, SyntaxWarning, stacklevel=2) - self.swig_opts = self.swig_opts.split() - - # Python 2.3 distutils new features - self.depends = depends or [] - self.language = language - - # numpy_distutils features - self.f2py_options = f2py_options or [] - self.module_dirs = module_dirs or [] - self.extra_c_compile_args = extra_c_compile_args or [] - self.extra_cxx_compile_args = extra_cxx_compile_args or [] - self.extra_f77_compile_args = extra_f77_compile_args or [] - self.extra_f90_compile_args = extra_f90_compile_args or [] - - return - - def has_cxx_sources(self): - return any(cxx_ext_re(str(source)) for source in self.sources) - - def has_f2py_sources(self): - return any(fortran_pyf_ext_re(source) for source in self.sources) - -# class Extension diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py deleted file mode 100644 index 5160e2abf54f..000000000000 --- a/numpy/distutils/fcompiler/__init__.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""numpy.distutils.fcompiler - -Contains FCompiler, an abstract base class that defines the interface -for the numpy.distutils Fortran compiler abstraction model. - -Terminology: - -To be consistent, where the term 'executable' is used, it means the single -file, like 'gcc', that is executed, and should be a string. In contrast, -'command' means the entire command line, like ['gcc', '-c', 'file.c'], and -should be a list. - -But note that FCompiler.executables is actually a dictionary of commands. - -""" -__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', - 'dummy_fortran_file'] - -import os -import sys -import re -from pathlib import Path - -from distutils.sysconfig import get_python_lib -from distutils.fancy_getopt import FancyGetopt -from distutils.errors import DistutilsModuleError, \ - DistutilsExecError, CompileError, LinkError, DistutilsPlatformError -from distutils.util import split_quoted, strtobool - -from numpy.distutils.ccompiler import CCompiler, gen_lib_options -from numpy.distutils import log -from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ - make_temp_file, get_shared_lib_extension -from numpy.distutils.exec_command import find_executable -from numpy.distutils import _shell_utils - -from .environment import EnvironmentConfig - -__metaclass__ = type - - -FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] - - -class CompilerNotFound(Exception): - pass - -def flaglist(s): - if is_string(s): - return split_quoted(s) - else: - return s - -def str2bool(s): - if is_string(s): - return strtobool(s) - return bool(s) - -def is_sequence_of_strings(seq): - return is_sequence(seq) and all_strings(seq) - -class FCompiler(CCompiler): - """Abstract base class to define the interface that must be implemented - by real Fortran compiler classes. - - Methods that subclasses may redefine: - - update_executables(), find_executables(), get_version() - get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() - get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), - get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), - get_flags_arch_f90(), get_flags_debug_f90(), - get_flags_fix(), get_flags_linker_so() - - DON'T call these methods (except get_version) after - constructing a compiler instance or inside any other method. - All methods, except update_executables() and find_executables(), - may call the get_version() method. - - After constructing a compiler instance, always call customize(dist=None) - method that finalizes compiler construction and makes the following - attributes available: - compiler_f77 - compiler_f90 - compiler_fix - linker_so - archiver - ranlib - libraries - library_dirs - """ - - # These are the environment variables and distutils keys used. - # Each configuration description is - # (, , , , ) - # The hook names are handled by the self._environment_hook method. - # - names starting with 'self.' call methods in this class - # - names starting with 'exe.' return the key in the executables dict - # - names like 'flags.YYY' return self.get_flag_YYY() - # convert is either None or a function to convert a string to the - # appropriate type used. - - distutils_vars = EnvironmentConfig( - distutils_section='config_fc', - noopt = (None, None, 'noopt', str2bool, False), - noarch = (None, None, 'noarch', str2bool, False), - debug = (None, None, 'debug', str2bool, False), - verbose = (None, None, 'verbose', str2bool, False), - ) - - command_vars = EnvironmentConfig( - distutils_section='config_fc', - compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), - compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), - compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), - version_cmd = ('exe.version_cmd', None, None, None, False), - linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), - linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), - archiver = (None, 'AR', 'ar', None, False), - ranlib = (None, 'RANLIB', 'ranlib', None, False), - ) - - flag_vars = EnvironmentConfig( - distutils_section='config_fc', - f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), - f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), - free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), - fix = ('flags.fix', None, None, flaglist, False), - opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), - opt_f77 = ('flags.opt_f77', None, None, flaglist, False), - opt_f90 = ('flags.opt_f90', None, None, flaglist, False), - arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), - arch_f77 = ('flags.arch_f77', None, None, flaglist, False), - arch_f90 = ('flags.arch_f90', None, None, flaglist, False), - debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), - debug_f77 = ('flags.debug_f77', None, None, flaglist, False), - debug_f90 = ('flags.debug_f90', None, None, flaglist, False), - flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), - linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), - linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), - ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), - ) - - language_map = {'.f': 'f77', - '.for': 'f77', - '.F': 'f77', # XXX: needs preprocessor - '.ftn': 'f77', - '.f77': 'f77', - '.f90': 'f90', - '.F90': 'f90', # XXX: needs preprocessor - '.f95': 'f90', - } - language_order = ['f90', 'f77'] - - - # These will be set by the subclass - - compiler_type = None - compiler_aliases = () - version_pattern = None - - possible_executables = [] - executables = { - 'version_cmd': ["f77", "-v"], - 'compiler_f77': ["f77"], - 'compiler_f90': ["f90"], - 'compiler_fix': ["f90", "-fixed"], - 'linker_so': ["f90", "-shared"], - 'linker_exe': ["f90"], - 'archiver': ["ar", "-cr"], - 'ranlib': None, - } - - # If compiler does not support compiling Fortran 90 then it can - # suggest using another compiler. For example, gnu would suggest - # gnu95 compiler type when there are F90 sources. - suggested_f90_compiler = None - - compile_switch = "-c" - object_switch = "-o " # Ending space matters! It will be stripped - # but if it is missing then object_switch - # will be prefixed to object file name by - # string concatenation. - library_switch = "-o " # Ditto! - - # Switch to specify where module files are created and searched - # for USE statement. Normally it is a string and also here ending - # space matters. See above. - module_dir_switch = None - - # Switch to specify where module files are searched for USE statement. - module_include_switch = '-I' - - pic_flags = [] # Flags to create position-independent code - - src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] - obj_extension = ".o" - - shared_lib_extension = get_shared_lib_extension() - static_lib_extension = ".a" # or .lib - static_lib_format = "lib%s%s" # or %s%s - shared_lib_format = "%s%s" - exe_extension = "" - - _exe_cache = {} - - _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', - 'ranlib'] - - # This will be set by new_fcompiler when called in - # command/{build_ext.py, build_clib.py, config.py} files. - c_compiler = None - - # extra_{f77,f90}_compile_args are set by build_ext.build_extension method - extra_f77_compile_args = [] - extra_f90_compile_args = [] - - def __init__(self, *args, **kw): - CCompiler.__init__(self, *args, **kw) - self.distutils_vars = self.distutils_vars.clone(self._environment_hook) - self.command_vars = self.command_vars.clone(self._environment_hook) - self.flag_vars = self.flag_vars.clone(self._environment_hook) - self.executables = self.executables.copy() - for e in self._executable_keys: - if e not in self.executables: - self.executables[e] = None - - # Some methods depend on .customize() being called first, so - # this keeps track of whether that's happened yet. - self._is_customised = False - - def __copy__(self): - obj = self.__new__(self.__class__) - obj.__dict__.update(self.__dict__) - obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) - obj.command_vars = obj.command_vars.clone(obj._environment_hook) - obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) - obj.executables = obj.executables.copy() - return obj - - def copy(self): - return self.__copy__() - - # Use properties for the attributes used by CCompiler. Setting them - # as attributes from the self.executables dictionary is error-prone, - # so we get them from there each time. - def _command_property(key): - def fget(self): - assert self._is_customised - return self.executables[key] - return property(fget=fget) - version_cmd = _command_property('version_cmd') - compiler_f77 = _command_property('compiler_f77') - compiler_f90 = _command_property('compiler_f90') - compiler_fix = _command_property('compiler_fix') - linker_so = _command_property('linker_so') - linker_exe = _command_property('linker_exe') - archiver = _command_property('archiver') - ranlib = _command_property('ranlib') - - # Make our terminology consistent. - def set_executable(self, key, value): - self.set_command(key, value) - - def set_commands(self, **kw): - for k, v in kw.items(): - self.set_command(k, v) - - def set_command(self, key, value): - if not key in self._executable_keys: - raise ValueError( - "unknown executable '%s' for class %s" % - (key, self.__class__.__name__)) - if is_string(value): - value = split_quoted(value) - assert value is None or is_sequence_of_strings(value[1:]), (key, value) - self.executables[key] = value - - ###################################################################### - ## Methods that subclasses may redefine. But don't call these methods! - ## They are private to FCompiler class and may return unexpected - ## results if used elsewhere. So, you have been warned.. - - def find_executables(self): - """Go through the self.executables dictionary, and attempt to - find and assign appropriate executables. - - Executable names are looked for in the environment (environment - variables, the distutils.cfg, and command line), the 0th-element of - the command list, and the self.possible_executables list. - - Also, if the 0th element is "" or "", the Fortran 77 - or the Fortran 90 compiler executable is used, unless overridden - by an environment setting. - - Subclasses should call this if overridden. - """ - assert self._is_customised - exe_cache = self._exe_cache - def cached_find_executable(exe): - if exe in exe_cache: - return exe_cache[exe] - fc_exe = find_executable(exe) - exe_cache[exe] = exe_cache[fc_exe] = fc_exe - return fc_exe - def verify_command_form(name, value): - if value is not None and not is_sequence_of_strings(value): - raise ValueError( - "%s value %r is invalid in class %s" % - (name, value, self.__class__.__name__)) - def set_exe(exe_key, f77=None, f90=None): - cmd = self.executables.get(exe_key, None) - if not cmd: - return None - # Note that we get cmd[0] here if the environment doesn't - # have anything set - exe_from_environ = getattr(self.command_vars, exe_key) - if not exe_from_environ: - possibles = [f90, f77] + self.possible_executables - else: - possibles = [exe_from_environ] + self.possible_executables - - seen = set() - unique_possibles = [] - for e in possibles: - if e == '': - e = f77 - elif e == '': - e = f90 - if not e or e in seen: - continue - seen.add(e) - unique_possibles.append(e) - - for exe in unique_possibles: - fc_exe = cached_find_executable(exe) - if fc_exe: - cmd[0] = fc_exe - return fc_exe - self.set_command(exe_key, None) - return None - - ctype = self.compiler_type - f90 = set_exe('compiler_f90') - if not f90: - f77 = set_exe('compiler_f77') - if f77: - log.warn('%s: no Fortran 90 compiler found' % ctype) - else: - raise CompilerNotFound('%s: f90 nor f77' % ctype) - else: - f77 = set_exe('compiler_f77', f90=f90) - if not f77: - log.warn('%s: no Fortran 77 compiler found' % ctype) - set_exe('compiler_fix', f90=f90) - - set_exe('linker_so', f77=f77, f90=f90) - set_exe('linker_exe', f77=f77, f90=f90) - set_exe('version_cmd', f77=f77, f90=f90) - set_exe('archiver') - set_exe('ranlib') - - def update_executables(self): - """Called at the beginning of customisation. Subclasses should - override this if they need to set up the executables dictionary. - - Note that self.find_executables() is run afterwards, so the - self.executables dictionary values can contain or as - the command, which will be replaced by the found F77 or F90 - compiler. - """ - pass - - def get_flags(self): - """List of flags common to all compiler types.""" - return [] + self.pic_flags - - def _get_command_flags(self, key): - cmd = self.executables.get(key, None) - if cmd is None: - return [] - return cmd[1:] - - def get_flags_f77(self): - """List of Fortran 77 specific flags.""" - return self._get_command_flags('compiler_f77') - def get_flags_f90(self): - """List of Fortran 90 specific flags.""" - return self._get_command_flags('compiler_f90') - def get_flags_free(self): - """List of Fortran 90 free format specific flags.""" - return [] - def get_flags_fix(self): - """List of Fortran 90 fixed format specific flags.""" - return self._get_command_flags('compiler_fix') - def get_flags_linker_so(self): - """List of linker flags to build a shared library.""" - return self._get_command_flags('linker_so') - def get_flags_linker_exe(self): - """List of linker flags to build an executable.""" - return self._get_command_flags('linker_exe') - def get_flags_ar(self): - """List of archiver flags. """ - return self._get_command_flags('archiver') - def get_flags_opt(self): - """List of architecture independent compiler flags.""" - return [] - def get_flags_arch(self): - """List of architecture dependent compiler flags.""" - return [] - def get_flags_debug(self): - """List of compiler flags to compile with debugging information.""" - return [] - - get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt - get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch - get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug - - def get_libraries(self): - """List of compiler libraries.""" - return self.libraries[:] - def get_library_dirs(self): - """List of compiler library directories.""" - return self.library_dirs[:] - - def get_version(self, force=False, ok_status=[0]): - assert self._is_customised - version = CCompiler.get_version(self, force=force, ok_status=ok_status) - if version is None: - raise CompilerNotFound() - return version - - - ############################################################ - - ## Public methods: - - def customize(self, dist = None): - """Customize Fortran compiler. - - This method gets Fortran compiler specific information from - (i) class definition, (ii) environment, (iii) distutils config - files, and (iv) command line (later overrides earlier). - - This method should be always called after constructing a - compiler instance. But not in __init__ because Distribution - instance is needed for (iii) and (iv). - """ - log.info('customize %s' % (self.__class__.__name__)) - - self._is_customised = True - - self.distutils_vars.use_distribution(dist) - self.command_vars.use_distribution(dist) - self.flag_vars.use_distribution(dist) - - self.update_executables() - - # find_executables takes care of setting the compiler commands, - # version_cmd, linker_so, linker_exe, ar, and ranlib - self.find_executables() - - noopt = self.distutils_vars.get('noopt', False) - noarch = self.distutils_vars.get('noarch', noopt) - debug = self.distutils_vars.get('debug', False) - - f77 = self.command_vars.compiler_f77 - f90 = self.command_vars.compiler_f90 - - f77flags = [] - f90flags = [] - freeflags = [] - fixflags = [] - - if f77: - f77 = _shell_utils.NativeParser.split(f77) - f77flags = self.flag_vars.f77 - if f90: - f90 = _shell_utils.NativeParser.split(f90) - f90flags = self.flag_vars.f90 - freeflags = self.flag_vars.free - # XXX Assuming that free format is default for f90 compiler. - fix = self.command_vars.compiler_fix - # NOTE: this and similar examples are probably just - # excluding --coverage flag when F90 = gfortran --coverage - # instead of putting that flag somewhere more appropriate - # this and similar examples where a Fortran compiler - # environment variable has been customized by CI or a user - # should perhaps eventually be more thoroughly tested and more - # robustly handled - if fix: - fix = _shell_utils.NativeParser.split(fix) - fixflags = self.flag_vars.fix + f90flags - - oflags, aflags, dflags = [], [], [] - # examine get_flags__ for extra flags - # only add them if the method is different from get_flags_ - def get_flags(tag, flags): - # note that self.flag_vars. calls self.get_flags_() - flags.extend(getattr(self.flag_vars, tag)) - this_get = getattr(self, 'get_flags_' + tag) - for name, c, flagvar in [('f77', f77, f77flags), - ('f90', f90, f90flags), - ('f90', fix, fixflags)]: - t = '%s_%s' % (tag, name) - if c and this_get is not getattr(self, 'get_flags_' + t): - flagvar.extend(getattr(self.flag_vars, t)) - if not noopt: - get_flags('opt', oflags) - if not noarch: - get_flags('arch', aflags) - if debug: - get_flags('debug', dflags) - - fflags = self.flag_vars.flags + dflags + oflags + aflags - - if f77: - self.set_commands(compiler_f77=f77+f77flags+fflags) - if f90: - self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) - if fix: - self.set_commands(compiler_fix=fix+fixflags+fflags) - - - #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS - linker_so = self.linker_so - if linker_so: - linker_so_flags = self.flag_vars.linker_so - if sys.platform.startswith('aix'): - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - if sys.platform.startswith('os400'): - from distutils.sysconfig import get_config_var - python_config = get_config_var('LIBPL') - ld_so_aix = os.path.join(python_config, 'ld_so_aix') - python_exp = os.path.join(python_config, 'python.exp') - linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] - self.set_commands(linker_so=linker_so+linker_so_flags) - - linker_exe = self.linker_exe - if linker_exe: - linker_exe_flags = self.flag_vars.linker_exe - self.set_commands(linker_exe=linker_exe+linker_exe_flags) - - ar = self.command_vars.archiver - if ar: - arflags = self.flag_vars.ar - self.set_commands(archiver=[ar]+arflags) - - self.set_library_dirs(self.get_library_dirs()) - self.set_libraries(self.get_libraries()) - - def dump_properties(self): - """Print out the attributes of a compiler instance.""" - props = [] - for key in list(self.executables.keys()) + \ - ['version', 'libraries', 'library_dirs', - 'object_switch', 'compile_switch']: - if hasattr(self, key): - v = getattr(self, key) - props.append((key, None, '= '+repr(v))) - props.sort() - - pretty_printer = FancyGetopt(props) - for l in pretty_printer.generate_help("%s instance properties:" \ - % (self.__class__.__name__)): - if l[:4]==' --': - l = ' ' + l[4:] - print(l) - - ################### - - def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile 'src' to product 'obj'.""" - src_flags = {} - if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ - and not has_f90_header(src): - flavor = ':f77' - compiler = self.compiler_f77 - src_flags = get_f77flags(src) - extra_compile_args = self.extra_f77_compile_args or [] - elif is_free_format(src): - flavor = ':f90' - compiler = self.compiler_f90 - if compiler is None: - raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - else: - flavor = ':fix' - compiler = self.compiler_fix - if compiler is None: - raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__, src)) - extra_compile_args = self.extra_f90_compile_args or [] - if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(), obj] - else: - o_args = [self.object_switch.strip()+obj] - - assert self.compile_switch.strip() - s_args = [self.compile_switch, src] - - if extra_compile_args: - log.info('extra %s options: %r' \ - % (flavor[1:], ' '.join(extra_compile_args))) - - extra_flags = src_flags.get(self.compiler_type, []) - if extra_flags: - log.info('using compile options from source: %r' \ - % ' '.join(extra_flags)) - - command = compiler + cc_args + extra_flags + s_args + o_args \ - + extra_postargs + extra_compile_args - - display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, - src) - try: - self.spawn(command, display=display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - def module_options(self, module_dirs, module_build_dir): - options = [] - if self.module_dir_switch is not None: - if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(), module_build_dir]) - else: - options.append(self.module_dir_switch.strip()+module_build_dir) - else: - print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ', self.__class__.__name__) - if self.module_include_switch is not None: - for d in [module_build_dir]+module_dirs: - options.append('%s%s' % (self.module_include_switch, d)) - else: - print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ', self.__class__.__name__) - return options - - def library_option(self, lib): - return "-l" + lib - def library_dir_option(self, dir): - return "-L" + dir - - def link(self, target_desc, objects, - output_filename, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, - export_symbols=None, debug=0, extra_preargs=None, - extra_postargs=None, build_temp=None, target_lang=None): - objects, output_dir = self._fix_object_args(objects, output_dir) - libraries, library_dirs, runtime_library_dirs = \ - self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, - libraries) - if is_string(output_dir): - output_filename = os.path.join(output_dir, output_filename) - elif output_dir is not None: - raise TypeError("'output_dir' must be a string or None") - - if self._need_link(objects, output_filename): - if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(), output_filename] - else: - o_args = [self.library_switch.strip()+output_filename] - - if is_string(self.objects): - ld_args = objects + [self.objects] - else: - ld_args = objects + self.objects - ld_args = ld_args + lib_opts + o_args - if debug: - ld_args[:0] = ['-g'] - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - self.mkpath(os.path.dirname(output_filename)) - if target_desc == CCompiler.EXECUTABLE: - linker = self.linker_exe[:] - else: - linker = self.linker_so[:] - command = linker + ld_args - try: - self.spawn(command) - except DistutilsExecError as e: - msg = str(e) - raise LinkError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def _environment_hook(self, name, hook_name): - if hook_name is None: - return None - if is_string(hook_name): - if hook_name.startswith('self.'): - hook_name = hook_name[5:] - hook = getattr(self, hook_name) - return hook() - elif hook_name.startswith('exe.'): - hook_name = hook_name[4:] - var = self.executables[hook_name] - if var: - return var[0] - else: - return None - elif hook_name.startswith('flags.'): - hook_name = hook_name[6:] - hook = getattr(self, 'get_flags_' + hook_name) - return hook() - else: - return hook_name() - - def can_ccompiler_link(self, ccompiler): - """ - Check if the given C compiler can link objects produced by - this compiler. - """ - return True - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - - Parameters - ---------- - objects : list - List of object files to include. - output_dir : str - Output directory to place generated object files. - extra_dll_dir : str - Output directory to place extra DLL files that need to be - included on Windows. - - Returns - ------- - converted_objects : list of str - List of converted object files. - Note that the number of output files is not necessarily - the same as inputs. - - """ - raise NotImplementedError() - - ## class FCompiler - -_default_compilers = ( - # sys.platform mappings - ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem', 'flang')), - ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), - ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', - 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', - 'pathf95', 'nagfor', 'fujitsu')), - ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', - 'g95', 'pg')), - ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), - ('irix.*', ('mips', 'gnu', 'gnu95',)), - ('aix.*', ('ibm', 'gnu', 'gnu95',)), - # os.name mappings - ('posix', ('gnu', 'gnu95',)), - ('nt', ('gnu', 'gnu95',)), - ('mac', ('gnu95', 'gnu', 'pg')), - ) - -fcompiler_class = None -fcompiler_aliases = None - -def load_all_fcompiler_classes(): - """Cache all the FCompiler classes found in modules in the - numpy.distutils.fcompiler package. - """ - from glob import glob - global fcompiler_class, fcompiler_aliases - if fcompiler_class is not None: - return - pys = os.path.join(os.path.dirname(__file__), '*.py') - fcompiler_class = {} - fcompiler_aliases = {} - for fname in glob(pys): - module_name, ext = os.path.splitext(os.path.basename(fname)) - module_name = 'numpy.distutils.fcompiler.' + module_name - __import__ (module_name) - module = sys.modules[module_name] - if hasattr(module, 'compilers'): - for cname in module.compilers: - klass = getattr(module, cname) - desc = (klass.compiler_type, klass, klass.description) - fcompiler_class[klass.compiler_type] = desc - for alias in klass.compiler_aliases: - if alias in fcompiler_aliases: - raise ValueError("alias %r defined for both %s and %s" - % (alias, klass.__name__, - fcompiler_aliases[alias][1].__name__)) - fcompiler_aliases[alias] = desc - -def _find_existing_fcompiler(compiler_types, - osname=None, platform=None, - requiref90=False, - c_compiler=None): - from numpy.distutils.core import get_distribution - dist = get_distribution(always=True) - for compiler_type in compiler_types: - v = None - try: - c = new_fcompiler(plat=platform, compiler=compiler_type, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if requiref90 and c.compiler_f90 is None: - v = None - new_compiler = c.suggested_f90_compiler - if new_compiler: - log.warn('Trying %r compiler as suggested by %r ' - 'compiler for f90 support.' % (compiler_type, - new_compiler)) - c = new_fcompiler(plat=platform, compiler=new_compiler, - c_compiler=c_compiler) - c.customize(dist) - v = c.get_version() - if v is not None: - compiler_type = new_compiler - if requiref90 and c.compiler_f90 is None: - raise ValueError('%s does not support compiling f90 codes, ' - 'skipping.' % (c.__class__.__name__)) - except DistutilsModuleError: - log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) - except CompilerNotFound: - log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) - if v is not None: - return compiler_type - return None - -def available_fcompilers_for_platform(osname=None, platform=None): - if osname is None: - osname = os.name - if platform is None: - platform = sys.platform - matching_compiler_types = [] - for pattern, compiler_type in _default_compilers: - if re.match(pattern, platform) or re.match(pattern, osname): - for ct in compiler_type: - if ct not in matching_compiler_types: - matching_compiler_types.append(ct) - if not matching_compiler_types: - matching_compiler_types.append('gnu') - return matching_compiler_types - -def get_default_fcompiler(osname=None, platform=None, requiref90=False, - c_compiler=None): - """Determine the default Fortran compiler to use for the given - platform.""" - matching_compiler_types = available_fcompilers_for_platform(osname, - platform) - log.info("get_default_fcompiler: matching types: '%s'", - matching_compiler_types) - compiler_type = _find_existing_fcompiler(matching_compiler_types, - osname=osname, - platform=platform, - requiref90=requiref90, - c_compiler=c_compiler) - return compiler_type - -# Flag to avoid rechecking for Fortran compiler every time -failed_fcompilers = set() - -def new_fcompiler(plat=None, - compiler=None, - verbose=0, - dry_run=0, - force=0, - requiref90=False, - c_compiler = None): - """Generate an instance of some FCompiler subclass for the supplied - platform/compiler combination. - """ - global failed_fcompilers - fcompiler_key = (plat, compiler) - if fcompiler_key in failed_fcompilers: - return None - - load_all_fcompiler_classes() - if plat is None: - plat = os.name - if compiler is None: - compiler = get_default_fcompiler(plat, requiref90=requiref90, - c_compiler=c_compiler) - if compiler in fcompiler_class: - module_name, klass, long_description = fcompiler_class[compiler] - elif compiler in fcompiler_aliases: - module_name, klass, long_description = fcompiler_aliases[compiler] - else: - msg = "don't know how to compile Fortran code on platform '%s'" % plat - if compiler is not None: - msg = msg + " with '%s' compiler." % compiler - msg = msg + " Supported compilers are: %s)" \ - % (','.join(fcompiler_class.keys())) - log.warn(msg) - failed_fcompilers.add(fcompiler_key) - return None - - compiler = klass(verbose=verbose, dry_run=dry_run, force=force) - compiler.c_compiler = c_compiler - return compiler - -def show_fcompilers(dist=None): - """Print list of available compilers (used by the "--help-fcompiler" - option to "config_fc"). - """ - if dist is None: - from distutils.dist import Distribution - from numpy.distutils.command.config_compiler import config_fc - dist = Distribution() - dist.script_name = os.path.basename(sys.argv[0]) - dist.script_args = ['config_fc'] + sys.argv[1:] - try: - dist.script_args.remove('--help-fcompiler') - except ValueError: - pass - dist.cmdclass['config_fc'] = config_fc - dist.parse_config_files() - dist.parse_command_line() - compilers = [] - compilers_na = [] - compilers_ni = [] - if not fcompiler_class: - load_all_fcompiler_classes() - platform_compilers = available_fcompilers_for_platform() - for compiler in platform_compilers: - v = None - log.set_verbosity(-2) - try: - c = new_fcompiler(compiler=compiler, verbose=dist.verbose) - c.customize(dist) - v = c.get_version() - except (DistutilsModuleError, CompilerNotFound) as e: - log.debug("show_fcompilers: %s not found" % (compiler,)) - log.debug(repr(e)) - - if v is None: - compilers_na.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2])) - else: - c.dump_properties() - compilers.append(("fcompiler="+compiler, None, - fcompiler_class[compiler][2] + ' (%s)' % v)) - - compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) - compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) - for fc in compilers_ni] - - compilers.sort() - compilers_na.sort() - compilers_ni.sort() - pretty_printer = FancyGetopt(compilers) - pretty_printer.print_help("Fortran compilers found:") - pretty_printer = FancyGetopt(compilers_na) - pretty_printer.print_help("Compilers available for this " - "platform, but not found:") - if compilers_ni: - pretty_printer = FancyGetopt(compilers_ni) - pretty_printer.print_help("Compilers not available on this platform:") - print("For compiler details, run 'config_fc --verbose' setup command.") - - -def dummy_fortran_file(): - fo, name = make_temp_file(suffix='.f') - fo.write(" subroutine dummy()\n end\n") - fo.close() - return name[:-2] - - -_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search -_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search -_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match - -def is_free_format(file): - """Check if file is in free format Fortran.""" - # f90 allows both fixed and free format, assuming fixed unless - # signs of free format are detected. - result = 0 - with open(file, encoding='latin1') as f: - line = f.readline() - n = 10000 # the number of non-comment lines to scan for hints - if _has_f_header(line) or _has_fix_header(line): - n = 0 - elif _has_f90_header(line): - n = 0 - result = 1 - while n>0 and line: - line = line.rstrip() - if line and line[0]!='!': - n -= 1 - if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': - result = 1 - break - line = f.readline() - return result - -def has_f90_header(src): - with open(src, encoding='latin1') as f: - line = f.readline() - return _has_f90_header(line) or _has_fix_header(line) - -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) -def get_f77flags(src): - """ - Search the first 20 lines of fortran 77 code for line pattern - `CF77FLAGS()=` - Return a dictionary {:}. - """ - flags = {} - with open(src, encoding='latin1') as f: - i = 0 - for line in f: - i += 1 - if i>20: break - m = _f77flags_re.match(line) - if not m: continue - fcname = m.group('fcname').strip() - fflags = m.group('fflags').strip() - flags[fcname] = split_quoted(fflags) - return flags - -# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags - -if __name__ == '__main__': - show_fcompilers() diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py deleted file mode 100644 index e013def5d1a4..000000000000 --- a/numpy/distutils/fcompiler/absoft.py +++ /dev/null @@ -1,158 +0,0 @@ - -# Absoft Corporation ceased operations on 12/31/2022. -# Thus, all links to are invalid. - -# Notes: -# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py -# generated extension modules (works for f2py v2.45.241_1936 and up) -import os - -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from numpy.distutils.misc_util import cyg2win32 - -compilers = ['AbsoftFCompiler'] - -class AbsoftFCompiler(FCompiler): - - compiler_type = 'absoft' - description = 'Absoft Corp Fortran Compiler' - #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' - version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler'\ - r'|Absoft Fortran Compiler Version'\ - r'|Copyright Absoft Corporation.*?Version))'\ - r' (?P[^\s*,]*)(.*?Absoft Corp|)' - - # on windows: f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 - - # samt5735(8)$ f90 -V -c dummy.f - # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 - # Note that fink installs g77 as f77, so need to use f90 for detection. - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : ["f77"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - if os.name=='nt': - library_switch = '/out:' #No space after /out:! - - module_dir_switch = None - module_include_switch = '-p' - - def update_executables(self): - f = cyg2win32(dummy_fortran_file()) - self.executables['version_cmd'] = ['', '-V', '-c', - f+'.f', '-o', f+'.o'] - - def get_flags_linker_so(self): - if os.name=='nt': - opt = ['/dll'] - # The "-K shared" switches are being left in for pre-9.0 versions - # of Absoft though I don't think versions earlier than 9 can - # actually be used to build shared libraries. In fact, version - # 8 of Absoft doesn't recognize "-K shared" and will fail. - elif self.get_version() >= '9.0': - opt = ['-shared'] - else: - opt = ["-K", "shared"] - return opt - - def library_dir_option(self, dir): - if os.name=='nt': - return ['-link', '/PATH:%s' % (dir)] - return "-L" + dir - - def library_option(self, lib): - if os.name=='nt': - return '%s.lib' % (lib) - return "-l" + lib - - def get_library_dirs(self): - opt = FCompiler.get_library_dirs(self) - d = os.environ.get('ABSOFT') - if d: - if self.get_version() >= '10.0': - # use shared libraries, the static libraries were not compiled -fPIC - prefix = 'sh' - else: - prefix = '' - if cpu.is_64bit(): - suffix = '64' - else: - suffix = '' - opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) - return opt - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - if self.get_version() >= '11.0': - opt.extend(['af90math', 'afio', 'af77math', 'amisc']) - elif self.get_version() >= '10.0': - opt.extend(['af90math', 'afio', 'af77math', 'U77']) - elif self.get_version() >= '8.0': - opt.extend(['f90math', 'fio', 'f77math', 'U77']) - else: - opt.extend(['fio', 'f90math', 'fmath', 'U77']) - if os.name =='nt': - opt.append('COMDLG32') - return opt - - def get_flags(self): - opt = FCompiler.get_flags(self) - if os.name != 'nt': - opt.extend(['-s']) - if self.get_version(): - if self.get_version()>='8.2': - opt.append('-fpic') - return opt - - def get_flags_f77(self): - opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22', '-N90', '-N110']) - v = self.get_version() - if os.name == 'nt': - if v and v>='8.0': - opt.extend(['-f', '-N15']) - else: - opt.append('-f') - if v: - if v<='4.6': - opt.append('-B108') - else: - # Though -N15 is undocumented, it works with - # Absoft 8.0 on Linux - opt.append('-N15') - return opt - - def get_flags_f90(self): - opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - if self.get_version(): - if self.get_version()>'4.6': - opt.extend(["-YDEALLOC=ALL"]) - return opt - - def get_flags_fix(self): - opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", - "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) - opt.extend(["-f", "fixed"]) - return opt - - def get_flags_opt(self): - opt = ['-O'] - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/numpy/distutils/fcompiler/arm.py b/numpy/distutils/fcompiler/arm.py deleted file mode 100644 index 3eb7e9af9c8c..000000000000 --- a/numpy/distutils/fcompiler/arm.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys - -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['ArmFlangCompiler'] - -import functools - -class ArmFlangCompiler(FCompiler): - compiler_type = 'arm' - description = 'Arm Compiler' - version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['armflang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["armflang", "-fPIC"], - 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], - 'compiler_f90': ["armflang", "-fPIC"], - 'linker_so': ["armflang", "-fPIC", "-shared"], - 'archiver': ["ar", "-cr"], - 'ranlib': None - } - - pic_flags = ["-fPIC", "-DPIC"] - c_compiler = 'arm' - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - return '-Wl,-rpath=%s' % dir - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='armflang').get_version()) - diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py deleted file mode 100644 index 01314c136acf..000000000000 --- a/numpy/distutils/fcompiler/compaq.py +++ /dev/null @@ -1,120 +0,0 @@ - -#http://www.compaq.com/fortran/docs/ -import os -import sys - -from numpy.distutils.fcompiler import FCompiler -from distutils.errors import DistutilsPlatformError - -compilers = ['CompaqFCompiler'] -if os.name != 'posix' or sys.platform[:6] == 'cygwin' : - # Otherwise we'd get a false positive on posix systems with - # case-insensitive filesystems (like darwin), because we'll pick - # up /bin/df - compilers.append('CompaqVisualFCompiler') - -class CompaqFCompiler(FCompiler): - - compiler_type = 'compaq' - description = 'Compaq Fortran Compiler' - version_pattern = r'Compaq Fortran (?P[^\s]*).*' - - if sys.platform[:5]=='linux': - fc_exe = 'fort' - else: - fc_exe = 'f90' - - executables = { - 'version_cmd' : ['', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], - 'compiler_fix' : [fc_exe, "-fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = '-module ' # not tested - module_include_switch = '-I' - - def get_flags(self): - return ['-assume no2underscore', '-nomixed_str_len_arg'] - def get_flags_debug(self): - return ['-g', '-check bounds'] - def get_flags_opt(self): - return ['-O4', '-align dcommons', '-assume bigarrays', - '-assume nozsize', '-math_library fast'] - def get_flags_arch(self): - return ['-arch host', '-tune host'] - def get_flags_linker_so(self): - if sys.platform[:5]=='linux': - return ['-shared'] - return ['-shared', '-Wl,-expect_unresolved,*'] - -class CompaqVisualFCompiler(FCompiler): - - compiler_type = 'compaqv' - description = 'DIGITAL or Compaq Visual Fortran Compiler' - version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' - r' Version (?P[^\s]*).*') - - compile_switch = '/compile_only' - object_switch = '/object:' - library_switch = '/OUT:' #No space after /OUT:! - - static_lib_extension = ".lib" - static_lib_format = "%s%s" - module_dir_switch = '/module:' - module_include_switch = '/I' - - ar_exe = 'lib.exe' - fc_exe = 'DF' - - if sys.platform=='win32': - from numpy.distutils.msvccompiler import MSVCCompiler - - try: - m = MSVCCompiler() - m.initialize() - ar_exe = m.lib - except DistutilsPlatformError: - pass - except AttributeError as e: - if '_MSVCCompiler__root' in str(e): - print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) - else: - raise - except OSError as e: - if not "vcvarsall.bat" in str(e): - print("Unexpected OSError in", __file__) - raise - except ValueError as e: - if not "'path'" in str(e): - print("Unexpected ValueError in", __file__) - raise - - executables = { - 'version_cmd' : ['', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], - 'compiler_fix' : [fc_exe, "/fixed"], - 'compiler_f90' : [fc_exe], - 'linker_so' : [''], - 'archiver' : [ar_exe, "/OUT:"], - 'ranlib' : None - } - - def get_flags(self): - return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase', '/assume:underscore'] - def get_flags_opt(self): - return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] - def get_flags_arch(self): - return ['/threads'] - def get_flags_debug(self): - return ['/debug'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py deleted file mode 100644 index ecd4d9989279..000000000000 --- a/numpy/distutils/fcompiler/environment.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from distutils.dist import Distribution - -__metaclass__ = type - -class EnvironmentConfig: - def __init__(self, distutils_section='ALL', **kw): - self._distutils_section = distutils_section - self._conf_keys = kw - self._conf = None - self._hook_handler = None - - def dump_variable(self, name): - conf_desc = self._conf_keys[name] - hook, envvar, confvar, convert, append = conf_desc - if not convert: - convert = lambda x : x - print('%s.%s:' % (self._distutils_section, name)) - v = self._hook_handler(name, hook) - print(' hook : %s' % (convert(v),)) - if envvar: - v = os.environ.get(envvar, None) - print(' environ: %s' % (convert(v),)) - if confvar and self._conf: - v = self._conf.get(confvar, (None, None))[1] - print(' config : %s' % (convert(v),)) - - def dump_variables(self): - for name in self._conf_keys: - self.dump_variable(name) - - def __getattr__(self, name): - try: - conf_desc = self._conf_keys[name] - except KeyError: - raise AttributeError( - f"'EnvironmentConfig' object has no attribute '{name}'" - ) from None - - return self._get_var(name, conf_desc) - - def get(self, name, default=None): - try: - conf_desc = self._conf_keys[name] - except KeyError: - return default - var = self._get_var(name, conf_desc) - if var is None: - var = default - return var - - def _get_var(self, name, conf_desc): - hook, envvar, confvar, convert, append = conf_desc - if convert is None: - convert = lambda x: x - var = self._hook_handler(name, hook) - if envvar is not None: - envvar_contents = os.environ.get(envvar) - if envvar_contents is not None: - envvar_contents = convert(envvar_contents) - if var and append: - if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': - var.extend(envvar_contents) - else: - # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 - # to keep old (overwrite flags rather than append to - # them) behavior - var = envvar_contents - else: - var = envvar_contents - if confvar is not None and self._conf: - if confvar in self._conf: - source, confvar_contents = self._conf[confvar] - var = convert(confvar_contents) - return var - - - def clone(self, hook_handler): - ec = self.__class__(distutils_section=self._distutils_section, - **self._conf_keys) - ec._hook_handler = hook_handler - return ec - - def use_distribution(self, dist): - if isinstance(dist, Distribution): - self._conf = dist.get_option_dict(self._distutils_section) - else: - self._conf = dist diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py deleted file mode 100644 index ddce67456d18..000000000000 --- a/numpy/distutils/fcompiler/fujitsu.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -fujitsu - -Supports Fujitsu compiler function. -This compiler is developed by Fujitsu and is used in A64FX on Fugaku. -""" -from numpy.distutils.fcompiler import FCompiler - -compilers = ['FujitsuFCompiler'] - -class FujitsuFCompiler(FCompiler): - compiler_type = 'fujitsu' - description = 'Fujitsu Fortran Compiler' - - possible_executables = ['frt'] - version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' - # $ frt --version - # frt (FRT) x.x.x yyyymmdd - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["frt", "-Fixed"], - 'compiler_fix' : ["frt", "-Fixed"], - 'compiler_f90' : ["frt"], - 'linker_so' : ["frt", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-KPIC'] - module_dir_switch = '-M' - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - def runtime_library_dir_option(self, dir): - return f'-Wl,-rpath={dir}' - def get_libraries(self): - return ['fj90f', 'fj90i', 'fjsrcinfo'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('fujitsu').get_version()) diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py deleted file mode 100644 index e109a972a872..000000000000 --- a/numpy/distutils/fcompiler/g95.py +++ /dev/null @@ -1,42 +0,0 @@ -# http://g95.sourceforge.net/ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['G95FCompiler'] - -class G95FCompiler(FCompiler): - compiler_type = 'g95' - description = 'G95 Fortran Compiler' - -# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95!) May 22 2006) - - version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' - # $ g95 --version - # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["g95", "-ffixed-form"], - 'compiler_fix' : ["g95", "-ffixed-form"], - 'compiler_f90' : ["g95"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fpic'] - module_dir_switch = '-fmod=' - module_include_switch = '-I' - - def get_flags(self): - return ['-fno-second-underscore'] - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler('g95').get_version()) diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py deleted file mode 100644 index 474ee35945b2..000000000000 --- a/numpy/distutils/fcompiler/gnu.py +++ /dev/null @@ -1,555 +0,0 @@ -import re -import os -import sys -import warnings -import platform -import tempfile -import hashlib -import base64 -import subprocess -from subprocess import Popen, PIPE, STDOUT -from numpy.distutils.exec_command import filepath_from_subprocess_output -from numpy.distutils.fcompiler import FCompiler -from distutils.version import LooseVersion - -compilers = ['GnuFCompiler', 'Gnu95FCompiler'] - -TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") - -# XXX: handle cross compilation - - -def is_win64(): - return sys.platform == "win32" and platform.architecture()[0] == "64bit" - - -class GnuFCompiler(FCompiler): - compiler_type = 'gnu' - compiler_aliases = ('g77', ) - description = 'GNU Fortran 77 compiler' - - def gnu_version_match(self, version_string): - """Handle the different versions of GNU fortran compilers""" - # Strip warning(s) that may be emitted by gfortran - while version_string.startswith('gfortran: warning'): - version_string =\ - version_string[version_string.find('\n') + 1:].strip() - - # Gfortran versions from after 2010 will output a simple string - # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older - # gfortrans may still return long version strings (``-dumpversion`` was - # an alias for ``--version``) - if len(version_string) <= 20: - # Try to find a valid version string - m = re.search(r'([0-9.]+)', version_string) - if m: - # g77 provides a longer version string that starts with GNU - # Fortran - if version_string.startswith('GNU Fortran'): - return ('g77', m.group(1)) - - # gfortran only outputs a version string such as #.#.#, so check - # if the match is at the start of the string - elif m.start() == 0: - return ('gfortran', m.group(1)) - else: - # Output probably from --version, try harder: - m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) - if m: - return ('gfortran', m.group(1)) - m = re.search( - r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) - if m: - v = m.group(1) - if v.startswith(('0', '2', '3')): - # the '0' is for early g77's - return ('g77', v) - else: - # at some point in the 4.x series, the ' 95' was dropped - # from the version string - return ('gfortran', v) - - # If still nothing, raise an error to make the problem easy to find. - err = 'A valid Fortran version was not found in this string:\n' - raise ValueError(err + version_string) - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'g77': - return None - return v[1] - - possible_executables = ['g77', 'f77'] - executables = { - 'version_cmd' : [None, "-dumpversion"], - 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], - 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes - 'compiler_fix' : None, - 'linker_so' : [None, "-g", "-Wall"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-g", "-Wall"] - } - module_dir_switch = None - module_include_switch = None - - # Cygwin: f771: warning: -fPIC ignored for target (all code is - # position independent) - if os.name != 'nt' and sys.platform != 'cygwin': - pic_flags = ['-fPIC'] - - # use -mno-cygwin for g77 when Python is not Cygwin-Python - if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: - executables[key].append('-mno-cygwin') - - g2c = 'g2c' - suggested_f90_compiler = 'gnu95' - - def get_flags_linker_so(self): - opt = self.linker_so[1:] - if sys.platform == 'darwin': - target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value - # and leave it alone. But, distutils will complain if the - # environment's value is different from the one in the Python - # Makefile used to build Python. We let distutils handle this - # error checking. - if not target: - # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, - # we try to get it first from sysconfig and then - # fall back to setting it to 10.9 This is a reasonable default - # even when using the official Python dist and those derived - # from it. - import sysconfig - target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') - if not target: - target = '10.9' - s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' - warnings.warn(s, stacklevel=2) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) - opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) - else: - opt.append("-shared") - if sys.platform.startswith('sunos'): - # SunOS often has dynamically loaded symbols defined in the - # static library libg2c.a The linker doesn't like this. To - # ignore the problem, use the -mimpure-text flag. It isn't - # the safest thing, but seems to work. 'man gcc' says: - # ".. Instead of using -mimpure-text, you should compile all - # source code with -fpic or -fPIC." - opt.append('-mimpure-text') - return opt - - def get_libgcc_dir(self): - try: - output = subprocess.check_output(self.compiler_f77 + - ['-print-libgcc-file-name']) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - return os.path.dirname(output) - return None - - def get_libgfortran_dir(self): - if sys.platform[:5] == 'linux': - libgfortran_name = 'libgfortran.so' - elif sys.platform == 'darwin': - libgfortran_name = 'libgfortran.dylib' - else: - libgfortran_name = None - - libgfortran_dir = None - if libgfortran_name: - find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] - try: - output = subprocess.check_output( - self.compiler_f77 + find_lib_arg) - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - libgfortran_dir = os.path.dirname(output) - return libgfortran_dir - - def get_library_dirs(self): - opt = [] - if sys.platform[:5] != 'linux': - d = self.get_libgcc_dir() - if d: - # if windows and not cygwin, libg2c lies in a different folder - if sys.platform == 'win32' and not d.startswith('/usr/lib'): - d = os.path.normpath(d) - path = os.path.join(d, "lib%s.a" % self.g2c) - if not os.path.exists(path): - root = os.path.join(d, *((os.pardir, ) * 4)) - d2 = os.path.abspath(os.path.join(root, 'lib')) - path = os.path.join(d2, "lib%s.a" % self.g2c) - if os.path.exists(path): - opt.append(d2) - opt.append(d) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = [] - d = self.get_libgcc_dir() - if d is not None: - g2c = self.g2c + '-pic' - f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d, f)): - g2c = self.g2c - else: - g2c = self.g2c - - if g2c is not None: - opt.append(g2c) - c_compiler = self.c_compiler - if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - opt.append('gcc') - if sys.platform == 'darwin': - opt.append('cc_dynamic') - return opt - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - v = self.get_version() - if v and v <= '3.3.3': - # With this compiler version building Fortran BLAS/LAPACK - # with -O3 caused failures in lib.lapack heevr,syevr tests. - opt = ['-O2'] - else: - opt = ['-O3'] - opt.append('-funroll-loops') - return opt - - def _c_arch_flags(self): - """ Return detected arch flags from CFLAGS """ - import sysconfig - try: - cflags = sysconfig.get_config_vars()['CFLAGS'] - except KeyError: - return [] - arch_re = re.compile(r"-arch\s+(\w+)") - arch_flags = [] - for arch in arch_re.findall(cflags): - arch_flags += ['-arch', arch] - return arch_flags - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - if sys.platform == 'win32' or sys.platform == 'cygwin': - # Linux/Solaris/Unix support RPATH, Windows does not - raise NotImplementedError - - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - if sys.platform == 'darwin': - return f'-Wl,-rpath,{dir}' - elif sys.platform.startswith(('aix', 'os400')): - # AIX RPATH is called LIBPATH - return f'-Wl,-blibpath:{dir}' - else: - return f'-Wl,-rpath={dir}' - - -class Gnu95FCompiler(GnuFCompiler): - compiler_type = 'gnu95' - compiler_aliases = ('gfortran', ) - description = 'GNU Fortran 95 compiler' - - def version_match(self, version_string): - v = self.gnu_version_match(version_string) - if not v or v[0] != 'gfortran': - return None - v = v[1] - if LooseVersion(v) >= "4": - # gcc-4 series releases do not support -mno-cygwin option - pass - else: - # use -mno-cygwin flag for gfortran when Python is not - # Cygwin-Python - if sys.platform == 'win32': - for key in [ - 'version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe' - ]: - self.executables[key].append('-mno-cygwin') - return v - - possible_executables = ['gfortran', 'f95'] - executables = { - 'version_cmd' : ["", "-dumpversion"], - 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", - "-fno-second-underscore"], - 'compiler_f90' : [None, "-Wall", "-g", - "-fno-second-underscore"], - 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", - "-fno-second-underscore"], - 'linker_so' : ["", "-Wall", "-g"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"], - 'linker_exe' : [None, "-Wall"] - } - - module_dir_switch = '-J' - module_include_switch = '-I' - - if sys.platform.startswith(('aix', 'os400')): - executables['linker_so'].append('-lpthread') - if platform.architecture()[0][:2] == '64': - for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: - executables[key].append('-maix64') - - g2c = 'gfortran' - - def _universal_flags(self, cmd): - """Return a list of -arch flags for every supported architecture.""" - if not sys.platform == 'darwin': - return [] - arch_flags = [] - # get arches the C compiler gets. - c_archs = self._c_arch_flags() - if "i386" in c_archs: - c_archs[c_archs.index("i386")] = "i686" - # check the arches the Fortran compiler supports, and compare with - # arch flags from C compiler - for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: - if _can_target(cmd, arch) and arch in c_archs: - arch_flags.extend(["-arch", arch]) - return arch_flags - - def get_flags(self): - flags = GnuFCompiler.get_flags(self) - arch_flags = self._universal_flags(self.compiler_f90) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_flags_linker_so(self): - flags = GnuFCompiler.get_flags_linker_so(self) - arch_flags = self._universal_flags(self.linker_so) - if arch_flags: - flags[:0] = arch_flags - return flags - - def get_library_dirs(self): - opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: - d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir, ) * 4)) - path = os.path.join(root, "lib") - mingwdir = os.path.normpath(path) - if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): - opt.append(mingwdir) - # For Macports / Linux, libgfortran and libgcc are not co-located - lib_gfortran_dir = self.get_libgfortran_dir() - if lib_gfortran_dir: - opt.append(lib_gfortran_dir) - return opt - - def get_libraries(self): - opt = GnuFCompiler.get_libraries(self) - if sys.platform == 'darwin': - opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i + 1, "mingwex") - opt.insert(i + 1, "mingw32") - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass - return opt - - def get_target(self): - try: - p = subprocess.Popen( - self.compiler_f77 + ['-v'], - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - output = (stdout or b"") + (stderr or b"") - except (OSError, subprocess.CalledProcessError): - pass - else: - output = filepath_from_subprocess_output(output) - m = TARGET_R.search(output) - if m: - return m.group(1) - return "" - - def _hash_files(self, filenames): - h = hashlib.sha1() - for fn in filenames: - with open(fn, 'rb') as f: - while True: - block = f.read(131072) - if not block: - break - h.update(block) - text = base64.b32encode(h.digest()) - text = text.decode('ascii') - return text.rstrip('=') - - def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, - chained_dlls, is_archive): - """Create a wrapper shared library for the given objects - - Return an MSVC-compatible lib - """ - - c_compiler = self.c_compiler - if c_compiler.compiler_type != "msvc": - raise ValueError("This method only supports MSVC") - - object_hash = self._hash_files(list(objects) + list(chained_dlls)) - - if is_win64(): - tag = 'win_amd64' - else: - tag = 'win32' - - basename = 'lib' + os.path.splitext( - os.path.basename(objects[0]))[0][:8] - root_name = basename + '.' + object_hash + '.gfortran-' + tag - dll_name = root_name + '.dll' - def_name = root_name + '.def' - lib_name = root_name + '.lib' - dll_path = os.path.join(extra_dll_dir, dll_name) - def_path = os.path.join(output_dir, def_name) - lib_path = os.path.join(output_dir, lib_name) - - if os.path.isfile(lib_path): - # Nothing to do - return lib_path, dll_path - - if is_archive: - objects = (["-Wl,--whole-archive"] + list(objects) + - ["-Wl,--no-whole-archive"]) - self.link_shared_object( - objects, - dll_name, - output_dir=extra_dll_dir, - extra_postargs=list(chained_dlls) + [ - '-Wl,--allow-multiple-definition', - '-Wl,--output-def,' + def_path, - '-Wl,--export-all-symbols', - '-Wl,--enable-auto-import', - '-static', - '-mlong-double-64', - ]) - - # No PowerPC! - if is_win64(): - specifier = '/MACHINE:X64' - else: - specifier = '/MACHINE:X86' - - # MSVC specific code - lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] - if not c_compiler.initialized: - c_compiler.initialize() - c_compiler.spawn([c_compiler.lib] + lib_args) - - return lib_path, dll_path - - def can_ccompiler_link(self, compiler): - # MSVC cannot link objects compiled by GNU fortran - return compiler.compiler_type not in ("msvc", ) - - def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): - """ - Convert a set of object files that are not compatible with the default - linker, to a file that is compatible. - """ - if self.c_compiler.compiler_type == "msvc": - # Compile a DLL and return the lib for the DLL as - # the object. Also keep track of previous DLLs that - # we have compiled so that we can link against them. - - # If there are .a archives, assume they are self-contained - # static libraries, and build separate DLLs for each - archives = [] - plain_objects = [] - for obj in objects: - if obj.lower().endswith('.a'): - archives.append(obj) - else: - plain_objects.append(obj) - - chained_libs = [] - chained_dlls = [] - for archive in archives[::-1]: - lib, dll = self._link_wrapper_lib( - [archive], - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=True) - chained_libs.insert(0, lib) - chained_dlls.insert(0, dll) - - if not plain_objects: - return chained_libs - - lib, dll = self._link_wrapper_lib( - plain_objects, - output_dir, - extra_dll_dir, - chained_dlls=chained_dlls, - is_archive=False) - return [lib] + chained_libs - else: - raise ValueError("Unsupported C compiler") - - -def _can_target(cmd, arch): - """Return true if the architecture supports the -arch flag""" - newcmd = cmd[:] - fid, filename = tempfile.mkstemp(suffix=".f") - os.close(fid) - try: - d = os.path.dirname(filename) - output = os.path.splitext(filename)[0] + ".o" - try: - newcmd.extend(["-arch", arch, "-c", filename]) - p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) - p.communicate() - return p.returncode == 0 - finally: - if os.path.exists(output): - os.remove(output) - finally: - os.remove(filename) - - -if __name__ == '__main__': - from distutils import log - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - - print(customized_fcompiler('gnu').get_version()) - try: - print(customized_fcompiler('g95').get_version()) - except Exception as e: - print(e) diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5ad..000000000000 --- a/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py deleted file mode 100644 index 29927518c703..000000000000 --- a/numpy/distutils/fcompiler/ibm.py +++ /dev/null @@ -1,97 +0,0 @@ -import os -import re -import sys -import subprocess - -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.misc_util import make_temp_file -from distutils import log - -compilers = ['IBMFCompiler'] - -class IBMFCompiler(FCompiler): - compiler_type = 'ibm' - description = 'IBM XL Fortran Compiler' - version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' - #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 - - executables = { - 'version_cmd' : ["", "-qversion"], - 'compiler_f77' : ["xlf"], - 'compiler_fix' : ["xlf90", "-qfixed"], - 'compiler_f90' : ["xlf90"], - 'linker_so' : ["xlf95"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_version(self,*args,**kwds): - version = FCompiler.get_version(self,*args,**kwds) - - if version is None and sys.platform.startswith('aix'): - # use lslpp to find out xlf version - lslpp = find_executable('lslpp') - xlf = find_executable('xlf') - if os.path.exists(xlf) and os.path.exists(lslpp): - try: - o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) - except (OSError, subprocess.CalledProcessError): - pass - else: - m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) - if m: version = m.group('version') - - xlf_dir = '/etc/opt/ibmcmp/xlf' - if version is None and os.path.isdir(xlf_dir): - # linux: - # If the output of xlf does not contain version info - # (that's the case with xlf 8.1, for instance) then - # let's try another method: - l = sorted(os.listdir(xlf_dir)) - l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] - if l: - from distutils.version import LooseVersion - self.version = version = LooseVersion(l[0]) - return version - - def get_flags(self): - return ['-qextname'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - opt = [] - if sys.platform=='darwin': - opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') - else: - opt.append('-bshared') - version = self.get_version(ok_status=[0, 40]) - if version is not None: - if sys.platform.startswith('aix'): - xlf_cfg = '/etc/xlf.cfg' - else: - xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version - fo, new_cfg = make_temp_file(suffix='_xlf.cfg') - log.info('Creating '+new_cfg) - with open(xlf_cfg) as fi: - crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fo.close() - opt.append('-F'+new_cfg) - return opt - - def get_flags_opt(self): - return ['-O3'] - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - log.set_verbosity(2) - print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py deleted file mode 100644 index 1d6065904110..000000000000 --- a/numpy/distutils/fcompiler/intel.py +++ /dev/null @@ -1,211 +0,0 @@ -# http://developer.intel.com/software/products/compilers/flin/ -import sys - -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file - -compilers = ['IntelFCompiler', 'IntelVisualFCompiler', - 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', - 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] - - -def intel_version_match(type): - # Match against the important stuff in the version string - return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) - - -class BaseIntelFCompiler(FCompiler): - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '-FI', '-V', '-c', - f + '.f', '-o', f + '.o'] - - def runtime_library_dir_option(self, dir): - # TODO: could use -Xlinker here, if it's supported - assert "," not in dir - - return '-Wl,-rpath=%s' % dir - - -class IntelFCompiler(BaseIntelFCompiler): - - compiler_type = 'intel' - compiler_aliases = ('ifort',) - description = 'Intel Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - possible_executables = ['ifort', 'ifc'] - - executables = { - 'version_cmd' : None, # set by update_executables - 'compiler_f77' : [None, "-72", "-w90", "-w95"], - 'compiler_f90' : [None], - 'compiler_fix' : [None, "-FI"], - 'linker_so' : ["", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_free(self): - return ['-FR'] - - def get_flags(self): - return ['-fPIC'] - - def get_flags_opt(self): # Scipy test failures with -O2 - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - return ['-fp-model', 'strict', '-O1', - '-assume', 'minus0', '-{}'.format(mpopt)] - - def get_flags_arch(self): - return [] - - def get_flags_linker_so(self): - opt = FCompiler.get_flags_linker_so(self) - v = self.get_version() - if v and v >= '8.0': - opt.append('-nofor_main') - if sys.platform == 'darwin': - # Here, it's -dynamiclib - try: - idx = opt.index('-shared') - opt.remove('-shared') - except ValueError: - idx = 0 - opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] - return opt - - -class IntelItaniumFCompiler(IntelFCompiler): - compiler_type = 'intele' - compiler_aliases = () - description = 'Intel Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium|IA-64') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - -class IntelEM64TFCompiler(IntelFCompiler): - compiler_type = 'intelem' - compiler_aliases = () - description = 'Intel Fortran Compiler for 64-bit apps' - - version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') - - possible_executables = ['ifort', 'efort', 'efc'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI"], - 'compiler_fix' : [None, "-FI"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - -# Is there no difference in the version string between the above compilers -# and the Visual compilers? - - -class IntelVisualFCompiler(BaseIntelFCompiler): - compiler_type = 'intelv' - description = 'Intel Visual Fortran Compiler for 32-bit apps' - version_match = intel_version_match('32-bit|IA-32') - - def update_executables(self): - f = dummy_fortran_file() - self.executables['version_cmd'] = ['', '/FI', '/c', - f + '.f', '/o', f + '.o'] - - ar_exe = 'lib.exe' - possible_executables = ['ifort', 'ifl'] - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None], - 'compiler_fix' : [None], - 'compiler_f90' : [None], - 'linker_so' : [None], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - compile_switch = '/c ' - object_switch = '/Fo' # No space after /Fo! - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '/module:' # No space after /module: - module_include_switch = '/I' - - def get_flags(self): - opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', - '/assume:underscore', '/fpp'] - return opt - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['/4Yb', '/d2'] - - def get_flags_opt(self): - return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 - - def get_flags_arch(self): - return ["/arch:IA32", "/QaxSSE3"] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -class IntelItaniumVisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelev' - description = 'Intel Visual Fortran Compiler for Itanium apps' - - version_match = intel_version_match('Itanium') - - possible_executables = ['efl'] # XXX this is a wild guess - ar_exe = IntelVisualFCompiler.ar_exe - - executables = { - 'version_cmd' : None, - 'compiler_f77' : [None, "-FI", "-w90", "-w95"], - 'compiler_fix' : [None, "-FI", "-4L72", "-w"], - 'compiler_f90' : [None], - 'linker_so' : ['', "-shared"], - 'archiver' : [ar_exe, "/verbose", "/OUT:"], - 'ranlib' : None - } - - -class IntelEM64VisualFCompiler(IntelVisualFCompiler): - compiler_type = 'intelvem' - description = 'Intel Visual Fortran Compiler for 64-bit apps' - - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - - def get_flags_arch(self): - return [] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='intel').get_version()) diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py deleted file mode 100644 index e925838268b8..000000000000 --- a/numpy/distutils/fcompiler/lahey.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -from numpy.distutils.fcompiler import FCompiler - -compilers = ['LaheyFCompiler'] - -class LaheyFCompiler(FCompiler): - - compiler_type = 'lahey' - description = 'Lahey/Fujitsu Fortran 95 Compiler' - version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' - - executables = { - 'version_cmd' : ["", "--version"], - 'compiler_f77' : ["lf95", "--fix"], - 'compiler_fix' : ["lf95", "--fix"], - 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def get_flags_opt(self): - return ['-O'] - def get_flags_debug(self): - return ['-g', '--chk', '--chkglobal'] - def get_library_dirs(self): - opt = [] - d = os.environ.get('LAHEY') - if d: - opt.append(os.path.join(d, 'lib')) - return opt - def get_libraries(self): - opt = [] - opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py deleted file mode 100644 index a0973804571b..000000000000 --- a/numpy/distutils/fcompiler/mips.py +++ /dev/null @@ -1,54 +0,0 @@ -from numpy.distutils.cpuinfo import cpu -from numpy.distutils.fcompiler import FCompiler - -compilers = ['MIPSFCompiler'] - -class MIPSFCompiler(FCompiler): - - compiler_type = 'mips' - description = 'MIPSpro Fortran Compiler' - version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["", "-version"], - 'compiler_f77' : ["f77", "-f77"], - 'compiler_fix' : ["f90", "-fixedform"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["f90", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['-KPIC'] - - def get_flags(self): - return self.pic_flags + ['-n32'] - def get_flags_opt(self): - return ['-O3'] - def get_flags_arch(self): - opt = [] - for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu, 'is_IP%s'%a)(): - opt.append('-TARG:platform=IP%s' % a) - break - return opt - def get_flags_arch_f77(self): - r = None - if cpu.is_r10000(): r = 10000 - elif cpu.is_r12000(): r = 12000 - elif cpu.is_r8000(): r = 8000 - elif cpu.is_r5000(): r = 5000 - elif cpu.is_r4000(): r = 4000 - if r is not None: - return ['r%s' % (r)] - return [] - def get_flags_arch_f90(self): - r = self.get_flags_arch_f77() - if r: - r[0] = '-' + r[0] - return r - -if __name__ == '__main__': - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='mips').get_version()) diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py deleted file mode 100644 index 939201f44e02..000000000000 --- a/numpy/distutils/fcompiler/nag.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys -import re -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NAGFCompiler', 'NAGFORCompiler'] - -class BaseNAGFCompiler(FCompiler): - version_pattern = r'NAG.* Release (?P[^(\s]*)' - - def version_match(self, version_string): - m = re.search(self.version_pattern, version_string) - if m: - return m.group('version') - else: - return None - - def get_flags_linker_so(self): - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] - def get_flags_arch(self): - return [] - -class NAGFCompiler(BaseNAGFCompiler): - - compiler_type = 'nag' - description = 'NAGWare Fortran 95 Compiler' - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f95", "-fixed"], - 'compiler_fix' : ["f95", "-fixed"], - 'compiler_f90' : ["f95"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_arch(self): - version = self.get_version() - if version and version < '5.1': - return ['-target=native'] - else: - return BaseNAGFCompiler.get_flags_arch(self) - def get_flags_debug(self): - return ['-g', '-gline', '-g90', '-nan', '-C'] - -class NAGFORCompiler(BaseNAGFCompiler): - - compiler_type = 'nagfor' - description = 'NAG Fortran Compiler' - - executables = { - 'version_cmd' : ["nagfor", "-V"], - 'compiler_f77' : ["nagfor", "-fixed"], - 'compiler_fix' : ["nagfor", "-fixed"], - 'compiler_f90' : ["nagfor"], - 'linker_so' : ["nagfor"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - - def get_flags_linker_so(self): - if sys.platform == 'darwin': - return ['-unsharedrts', - '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return BaseNAGFCompiler.get_flags_linker_so(self) - def get_flags_debug(self): - version = self.get_version() - if version and version > '6.1': - return ['-g', '-u', '-nan', '-C=all', '-thread_safe', - '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] - else: - return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - compiler = customized_fcompiler(compiler='nagfor') - print(compiler.get_version()) - print(compiler.get_flags_debug()) diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py deleted file mode 100644 index ef411fffc7cb..000000000000 --- a/numpy/distutils/fcompiler/none.py +++ /dev/null @@ -1,28 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler -from numpy.distutils import customized_fcompiler - -compilers = ['NoneFCompiler'] - -class NoneFCompiler(FCompiler): - - compiler_type = 'none' - description = 'Fake Fortran compiler' - - executables = {'compiler_f77': None, - 'compiler_f90': None, - 'compiler_fix': None, - 'linker_so': None, - 'linker_exe': None, - 'archiver': None, - 'ranlib': None, - 'version_cmd': None, - } - - def find_executables(self): - pass - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - print(customized_fcompiler(compiler='none').get_version()) diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py deleted file mode 100644 index f518c8b0027a..000000000000 --- a/numpy/distutils/fcompiler/nv.py +++ /dev/null @@ -1,53 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['NVHPCFCompiler'] - -class NVHPCFCompiler(FCompiler): - """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler - - https://developer.nvidia.com/hpc-sdk - - Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, - https://www.pgroup.com/index.htm. - See also `numpy.distutils.fcompiler.pg`. - """ - - compiler_type = 'nv' - description = 'NVIDIA HPC SDK' - version_pattern = r'\s*(nvfortran|.+ \(aka nvfortran\)) (?P[\d.-]+).*' - - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["nvfortran"], - 'compiler_fix': ["nvfortran", "-Mfixed"], - 'compiler_f90': ["nvfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='nv').get_version()) diff --git a/numpy/distutils/fcompiler/pathf95.py b/numpy/distutils/fcompiler/pathf95.py deleted file mode 100644 index 0768cb12e87a..000000000000 --- a/numpy/distutils/fcompiler/pathf95.py +++ /dev/null @@ -1,33 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['PathScaleFCompiler'] - -class PathScaleFCompiler(FCompiler): - - compiler_type = 'pathf95' - description = 'PathScale Fortran Compiler' - version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' - - executables = { - 'version_cmd' : ["pathf95", "-version"], - 'compiler_f77' : ["pathf95", "-fixedform"], - 'compiler_fix' : ["pathf95", "-fixedform"], - 'compiler_f90' : ["pathf95"], - 'linker_so' : ["pathf95", "-shared"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - pic_flags = ['-fPIC'] - module_dir_switch = '-module ' # Don't remove ending space! - module_include_switch = '-I' - - def get_flags_opt(self): - return ['-O3'] - def get_flags_debug(self): - return ['-g'] - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py deleted file mode 100644 index 72442c4fec61..000000000000 --- a/numpy/distutils/fcompiler/pg.py +++ /dev/null @@ -1,128 +0,0 @@ -# http://www.pgroup.com -import sys - -from numpy.distutils.fcompiler import FCompiler -from sys import platform -from os.path import join, dirname, normpath - -compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] - - -class PGroupFCompiler(FCompiler): - - compiler_type = 'pg' - description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' - - if platform == 'darwin': - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran", "-dynamiclib"], - 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90': ["pgfortran", "-dynamiclib"], - 'linker_so': ["libtool"], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = [''] - else: - executables = { - 'version_cmd': ["", "-V"], - 'compiler_f77': ["pgfortran"], - 'compiler_fix': ["pgfortran", "-Mfixed"], - 'compiler_f90': ["pgfortran"], - 'linker_so': [""], - 'archiver': ["ar", "-cr"], - 'ranlib': ["ranlib"] - } - pic_flags = ['-fpic'] - - module_dir_switch = '-module ' - module_include_switch = '-I' - - def get_flags(self): - opt = ['-Minform=inform', '-Mnosecond_underscore'] - return self.pic_flags + opt - - def get_flags_opt(self): - return ['-fast'] - - def get_flags_debug(self): - return ['-g'] - - if platform == 'darwin': - def get_flags_linker_so(self): - return ["-dynamic", '-undefined', 'dynamic_lookup'] - - else: - def get_flags_linker_so(self): - return ["-shared", '-fpic'] - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - - -import functools - -class PGroupFlangCompiler(FCompiler): - compiler_type = 'flang' - description = 'Portland Group Fortran LLVM Compiler' - version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' - - ar_exe = 'lib.exe' - possible_executables = ['flang'] - - executables = { - 'version_cmd': ["", "--version"], - 'compiler_f77': ["flang"], - 'compiler_fix': ["flang"], - 'compiler_f90': ["flang"], - 'linker_so': [None], - 'archiver': [ar_exe, "/verbose", "/OUT:"], - 'ranlib': None - } - - library_switch = '/OUT:' # No space after /OUT:! - module_dir_switch = '-module ' # Don't remove ending space! - - def get_libraries(self): - opt = FCompiler.get_libraries(self) - opt.extend(['flang', 'flangrti', 'ompstub']) - return opt - - @functools.lru_cache(maxsize=128) - def get_library_dirs(self): - """List of compiler library directories.""" - opt = FCompiler.get_library_dirs(self) - flang_dir = dirname(self.executables['compiler_f77'][0]) - opt.append(normpath(join(flang_dir, '..', 'lib'))) - - return opt - - def get_flags(self): - return [] - - def get_flags_free(self): - return [] - - def get_flags_debug(self): - return ['-g'] - - def get_flags_opt(self): - return ['-O3'] - - def get_flags_arch(self): - return [] - - def runtime_library_dir_option(self, dir): - raise NotImplementedError - - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - if 'flang' in sys.argv: - print(customized_fcompiler(compiler='flang').get_version()) - else: - print(customized_fcompiler(compiler='pg').get_version()) diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py deleted file mode 100644 index d039f0b25705..000000000000 --- a/numpy/distutils/fcompiler/sun.py +++ /dev/null @@ -1,51 +0,0 @@ -from numpy.distutils.ccompiler import simple_version_match -from numpy.distutils.fcompiler import FCompiler - -compilers = ['SunFCompiler'] - -class SunFCompiler(FCompiler): - - compiler_type = 'sun' - description = 'Sun or Forte Fortran 95 Compiler' - # ex: - # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 - version_match = simple_version_match( - start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') - - executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["", "-Bdynamic", "-G"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = '-moddir=' - module_include_switch = '-M' - pic_flags = ['-xcode=pic32'] - - def get_flags_f77(self): - ret = ["-ftrap=%none"] - if (self.get_version() or '') >= '7': - ret.append("-f77") - else: - ret.append("-fixed") - return ret - def get_opt(self): - return ['-fast', '-dalign'] - def get_arch(self): - return ['-xtarget=generic'] - def get_libraries(self): - opt = [] - opt.extend(['fsu', 'sunmath', 'mvec']) - return opt - - def runtime_library_dir_option(self, dir): - return '-R%s' % dir - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='sun').get_version()) diff --git a/numpy/distutils/fcompiler/vast.py b/numpy/distutils/fcompiler/vast.py deleted file mode 100644 index 92a1647ba437..000000000000 --- a/numpy/distutils/fcompiler/vast.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -from numpy.distutils.fcompiler.gnu import GnuFCompiler - -compilers = ['VastFCompiler'] - -class VastFCompiler(GnuFCompiler): - compiler_type = 'vast' - compiler_aliases = () - description = 'Pacific-Sierra Research Fortran 90 Compiler' - version_pattern = (r'\s*Pacific-Sierra Research vf90 ' - r'(Personal|Professional)\s+(?P[^\s]*)') - - # VAST f90 does not support -o with -c. So, object files are created - # to the current directory and then moved to build directory - object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' - - executables = { - 'version_cmd' : ["vf90", "-v"], - 'compiler_f77' : ["g77"], - 'compiler_fix' : ["f90", "-Wv,-ya"], - 'compiler_f90' : ["f90"], - 'linker_so' : [""], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX Fix me - module_include_switch = None #XXX Fix me - - def find_executables(self): - pass - - def get_version_cmd(self): - f90 = self.compiler_f90[0] - d, b = os.path.split(f90) - vf90 = os.path.join(d, 'v'+b) - return vf90 - - def get_flags_arch(self): - vast_version = self.get_version() - gnu = GnuFCompiler() - gnu.customize(None) - self.version = gnu.get_version() - opt = GnuFCompiler.get_flags_arch(self) - self.version = vast_version - return opt - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(2) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='vast').get_version()) diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py deleted file mode 100644 index 90d1f4c384c7..000000000000 --- a/numpy/distutils/from_template.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python3 -""" - -process_file(filename) - - takes templated file .xxx.src and produces .xxx file where .xxx - is .pyf .f90 or .f using the following template rules: - - '<..>' denotes a template. - - All function and subroutine blocks in a source file with names that - contain '<..>' will be replicated according to the rules in '<..>'. - - The number of comma-separated words in '<..>' will determine the number of - replicates. - - '<..>' may have two different forms, named and short. For example, - - named: - where anywhere inside a block '

' will be replaced with - 'd', 's', 'z', and 'c' for each replicate of the block. - - <_c> is already defined: <_c=s,d,c,z> - <_t> is already defined: <_t=real,double precision,complex,double complex> - - short: - , a short form of the named, useful when no

appears inside - a block. - - In general, '<..>' contains a comma separated list of arbitrary - expressions. If these expression must contain a comma|leftarrow|rightarrow, - then prepend the comma|leftarrow|rightarrow with a backslash. - - If an expression matches '\\' then it will be replaced - by -th expression. - - Note that all '<..>' forms in a block must have the same number of - comma-separated entries. - - Predefined named template rules: - - - - - - -""" -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) - -def parse_structure(astr): - """ Return a list of tuples for each function or subroutine each - tuple is the start and end of a subroutine or function to be - expanded. - """ - - spanlist = [] - ind = 0 - while True: - m = routine_start_re.search(astr, ind) - if m is None: - break - start = m.start() - if function_start_re.match(astr, start, m.end()): - while True: - i = astr.rfind('\n', ind, start) - if i==-1: - break - start = i - if astr[i:i+7]!='\n $': - break - start += 1 - m = routine_end_re.search(astr, m.end()) - ind = end = m and m.end()-1 or len(astr) - spanlist.append((start, end)) - return spanlist - -template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") -named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") -list_re = re.compile(r"<\s*((.*?))\s*>") - -def find_repl_patterns(astr): - reps = named_re.findall(astr) - names = {} - for rep in reps: - name = rep[0].strip() or unique_key(names) - repl = rep[1].replace(r'\,', '@comma@') - thelist = conv(repl) - names[name] = thelist - return names - -def find_and_remove_repl_patterns(astr): - names = find_repl_patterns(astr) - astr = re.subn(named_re, '', astr)[0] - return astr, names - -item_re = re.compile(r"\A\\(?P\d+)\Z") -def conv(astr): - b = astr.split(',') - l = [x.strip() for x in b] - for i in range(len(l)): - m = item_re.match(l[i]) - if m: - j = int(m.group('index')) - l[i] = l[j] - return ','.join(l) - -def unique_key(adict): - """ Obtain a unique key given a dictionary.""" - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = '__l%s' % (n) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr, names): - substr = substr.replace(r'\>', '@rightarrow@') - substr = substr.replace(r'\<', '@leftarrow@') - lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>", substr) # get rid of definition templates - - def listrepl(mobj): - thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) - if template_name_re.match(thelist): - return "<%s>" % (thelist) - name = None - for key in lnames.keys(): # see if list is already in dictionary - if lnames[key] == thelist: - name = key - if name is None: # this list is not in the dictionary yet - name = unique_key(lnames) - lnames[name] = thelist - return "<%s>" % name - - substr = list_re.sub(listrepl, substr) # convert all lists to named templates - # newnames are constructed as needed - - numsubs = None - base_rule = None - rules = {} - for r in template_re.findall(substr): - if r not in rules: - thelist = lnames.get(r, names.get(r, None)) - if thelist is None: - raise ValueError('No replicates found for <%s>' % (r)) - if r not in names and not thelist.startswith('_'): - names[r] = thelist - rule = [i.replace('@comma@', ',') for i in thelist.split(',')] - num = len(rule) - - if numsubs is None: - numsubs = num - rules[r] = rule - base_rule = r - elif num == numsubs: - rules[r] = rule - else: - print("Mismatch in number of replacements (base <%s=%s>)" - " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r, thelist)) - if not rules: - return substr - - def namerepl(mobj): - name = mobj.group(1) - return rules.get(name, (k+1)*[name])[k] - - newstr = '' - for k in range(numsubs): - newstr += template_re.sub(namerepl, substr) + '\n\n' - - newstr = newstr.replace('@rightarrow@', '>') - newstr = newstr.replace('@leftarrow@', '<') - return newstr - -def process_str(allstr): - newstr = allstr - writestr = '' - - struct = parse_structure(newstr) - - oldend = 0 - names = {} - names.update(_special_names) - for sub in struct: - cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) - writestr += cleanedstr - names.update(defs) - writestr += expand_sub(newstr[sub[0]:sub[1]], names) - oldend = sub[1] - writestr += newstr[oldend:] - - return writestr - -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - return process_str(''.join(lines)) - -_special_names = find_repl_patterns(''' -<_c=s,d,c,z> -<_t=real,double precision,complex,double complex> - - - - - -''') - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - writestr = process_str(allstr) - outfile.write(writestr) - - -if __name__ == "__main__": - main() diff --git a/numpy/distutils/fujitsuccompiler.py b/numpy/distutils/fujitsuccompiler.py deleted file mode 100644 index c25900b34f1d..000000000000 --- a/numpy/distutils/fujitsuccompiler.py +++ /dev/null @@ -1,28 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class FujitsuCCompiler(UnixCCompiler): - - """ - Fujitsu compiler. - """ - - compiler_type = 'fujitsu' - cc_exe = 'fcc' - cxx_exe = 'FCC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables( - compiler=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_so=cc_compiler + - ' -O3 -Nclang -fPIC', - compiler_cxx=cxx_compiler + - ' -O3 -Nclang -fPIC', - linker_exe=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', - linker_so=cc_compiler + - ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' - ) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py deleted file mode 100644 index 77fb39889a29..000000000000 --- a/numpy/distutils/intelccompiler.py +++ /dev/null @@ -1,106 +0,0 @@ -import platform - -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.exec_command import find_executable -from numpy.distutils.ccompiler import simple_version_match -if platform.system() == 'Windows': - from numpy.distutils.msvc9compiler import MSVCCompiler - - -class IntelCCompiler(UnixCCompiler): - """A modified Intel compiler compatible with a GCC-built Python.""" - compiler_type = 'intel' - cc_exe = 'icc' - cc_args = 'fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -class IntelItaniumCCompiler(IntelCCompiler): - compiler_type = 'intele' - cc_exe = 'icc' - - -class IntelEM64TCCompiler(UnixCCompiler): - """ - A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. - """ - compiler_type = 'intelem' - cc_exe = 'icc -m64' - cc_args = '-fPIC' - - def __init__(self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__(self, verbose, dry_run, force) - - v = self.get_version() - mpopt = 'openmp' if v and v < '15' else 'qopenmp' - self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' - '-fomit-frame-pointer -{}').format(mpopt) - compiler = self.cc_exe - - if platform.system() == 'Darwin': - shared_flag = '-Wl,-undefined,dynamic_lookup' - else: - shared_flag = '-shared' - self.set_executables(compiler=compiler, - compiler_so=compiler, - compiler_cxx=compiler, - archiver='xiar' + ' cru', - linker_exe=compiler + ' -shared-intel', - linker_so=compiler + ' ' + shared_flag + - ' -shared-intel') - - -if platform.system() == 'Windows': - class IntelCCompilerW(MSVCCompiler): - """ - A modified Intel compiler compatible with an MSVC-built Python. - """ - compiler_type = 'intelw' - compiler_cxx = 'icl' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?32,') - self.__version = version_match - - def initialize(self, plat_name=None): - MSVCCompiler.initialize(self, plat_name) - self.cc = self.find_exe('icl.exe') - self.lib = self.find_exe('xilib') - self.linker = self.find_exe('xilink') - self.compile_options = ['/nologo', '/O3', '/MD', '/W3', - '/Qstd=c99'] - self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', - '/Qstd=c99', '/Z7', '/D_DEBUG'] - - class IntelEM64TCCompilerW(IntelCCompilerW): - """ - A modified Intel x86_64 compiler compatible with - a 64bit MSVC-built Python. - """ - compiler_type = 'intelemw' - - def __init__(self, verbose=0, dry_run=0, force=0): - MSVCCompiler.__init__(self, verbose, dry_run, force) - version_match = simple_version_match(start=r'Intel\(R\).*?64,') - self.__version = version_match diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py deleted file mode 100644 index 851682c63310..000000000000 --- a/numpy/distutils/lib2def.py +++ /dev/null @@ -1,116 +0,0 @@ -import re -import sys -import subprocess - -__doc__ = """This module generates a DEF file from the symbols in -an MSVC-compiled DLL import library. It correctly discriminates between -data and functions. The data is collected from the output of the program -nm(1). - -Usage: - python lib2def.py [libname.lib] [output.def] -or - python lib2def.py [libname.lib] > output.def - -libname.lib defaults to python.lib and output.def defaults to stdout - -Author: Robert Kern -Last Update: April 30, 1999 -""" - -__version__ = '0.1a' - -py_ver = "%d%d" % tuple(sys.version_info[:2]) - -DEFAULT_NM = ['nm', '-Cs'] - -DEF_HEADER = """LIBRARY python%s.dll -;CODE PRELOAD MOVEABLE DISCARDABLE -;DATA PRELOAD SINGLE - -EXPORTS -""" % py_ver -# the header of the DEF file - -FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) -DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) - -def parse_cmd(): - """Parses the command-line arguments. - -libfile, deffile = parse_cmd()""" - if len(sys.argv) == 3: - if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': - libfile, deffile = sys.argv[1:] - elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': - deffile, libfile = sys.argv[1:] - else: - print("I'm assuming that your first argument is the library") - print("and the second is the DEF file.") - elif len(sys.argv) == 2: - if sys.argv[1][-4:] == '.def': - deffile = sys.argv[1] - libfile = 'python%s.lib' % py_ver - elif sys.argv[1][-4:] == '.lib': - deffile = None - libfile = sys.argv[1] - else: - libfile = 'python%s.lib' % py_ver - deffile = None - return libfile, deffile - -def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): - """Returns the output of nm_cmd via a pipe. - -nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" - p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, text=True) - nm_output, nm_err = p.communicate() - if p.returncode != 0: - raise RuntimeError('failed to run "%s": "%s"' % ( - ' '.join(nm_cmd), nm_err)) - return nm_output - -def parse_nm(nm_output): - """Returns a tuple of lists: dlist for the list of data -symbols and flist for the list of function symbols. - -dlist, flist = parse_nm(nm_output)""" - data = DATA_RE.findall(nm_output) - func = FUNC_RE.findall(nm_output) - - flist = [] - for sym in data: - if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): - flist.append(sym) - - dlist = [] - for sym in data: - if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): - dlist.append(sym) - - dlist.sort() - flist.sort() - return dlist, flist - -def output_def(dlist, flist, header, file = sys.stdout): - """Outputs the final DEF file to a file defaulting to stdout. - -output_def(dlist, flist, header, file = sys.stdout)""" - for data_sym in dlist: - header = header + '\t%s DATA\n' % data_sym - header = header + '\n' # blank line - for func_sym in flist: - header = header + '\t%s\n' % func_sym - file.write(header) - -if __name__ == '__main__': - libfile, deffile = parse_cmd() - if deffile is None: - deffile = sys.stdout - else: - deffile = open(deffile, 'w') - nm_cmd = DEFAULT_NM + [str(libfile)] - nm_output = getnm(nm_cmd, shell=False) - dlist, flist = parse_nm(nm_output) - output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937f..000000000000 --- a/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py deleted file mode 100644 index 3347f56d6fe9..000000000000 --- a/numpy/distutils/log.py +++ /dev/null @@ -1,111 +0,0 @@ -# Colored log -import sys -from distutils.log import * # noqa: F403 -from distutils.log import Log as old_Log -from distutils.log import _global_log - -from numpy.distutils.misc_util import (red_text, default_text, cyan_text, - green_text, is_sequence, is_string) - - -def _fix_args(args,flag=1): - if is_string(args): - return args.replace('%', '%%') - if flag and is_sequence(args): - return tuple([_fix_args(a, flag=0) for a in args]) - return args - - -class Log(old_Log): - def _log(self, level, msg, args): - if level >= self.threshold: - if args: - msg = msg % _fix_args(args) - if 0: - if msg.startswith('copying ') and msg.find(' -> ') != -1: - return - if msg.startswith('byte-compiling '): - return - print(_global_color_map[level](msg)) - sys.stdout.flush() - - def good(self, msg, *args): - """ - If we log WARN messages, log this message as a 'nice' anti-warn - message. - - """ - if WARN >= self.threshold: - if args: - print(green_text(msg % _fix_args(args))) - else: - print(green_text(msg)) - sys.stdout.flush() - - -_global_log.__class__ = Log - -good = _global_log.good - -def set_threshold(level, force=False): - prev_level = _global_log.threshold - if prev_level > DEBUG or force: - # If we're running at DEBUG, don't change the threshold, as there's - # likely a good reason why we're running at this level. - _global_log.threshold = level - if level <= DEBUG: - info('set_threshold: setting threshold to DEBUG level,' - ' it can be changed only with force argument') - else: - info('set_threshold: not changing threshold from DEBUG level' - ' %s to %s' % (prev_level, level)) - return prev_level - -def get_threshold(): - return _global_log.threshold - -def set_verbosity(v, force=False): - prev_level = _global_log.threshold - if v < 0: - set_threshold(ERROR, force) - elif v == 0: - set_threshold(WARN, force) - elif v == 1: - set_threshold(INFO, force) - elif v >= 2: - set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) - - -_global_color_map = { - DEBUG:cyan_text, - INFO:default_text, - WARN:red_text, - ERROR:red_text, - FATAL:red_text -} - -# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. -set_verbosity(0, force=True) - - -_error = error -_warn = warn -_info = info -_debug = debug - - -def error(msg, *a, **kw): - _error(f"ERROR: {msg}", *a, **kw) - - -def warn(msg, *a, **kw): - _warn(f"WARN: {msg}", *a, **kw) - - -def info(msg, *a, **kw): - _info(f"INFO: {msg}", *a, **kw) - - -def debug(msg, *a, **kw): - _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/numpy/distutils/mingw/gfortran_vs2003_hack.c b/numpy/distutils/mingw/gfortran_vs2003_hack.c deleted file mode 100644 index 485a675d8a1f..000000000000 --- a/numpy/distutils/mingw/gfortran_vs2003_hack.c +++ /dev/null @@ -1,6 +0,0 @@ -int _get_output_format(void) -{ - return 0; -} - -int _imp____lc_codepage = 0; diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py deleted file mode 100644 index 944ba2d03b33..000000000000 --- a/numpy/distutils/mingw32ccompiler.py +++ /dev/null @@ -1,620 +0,0 @@ -""" -Support code for building Python extensions on Windows. - - # NT stuff - # 1. Make sure libpython.a exists for gcc. If not, build it. - # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) - # 3. Force windows to use g77 - -""" -import os -import sys -import subprocess -import re -import textwrap - -# Overwrite certain distutils.ccompiler functions: -import numpy.distutils.ccompiler # noqa: F401 -from numpy.distutils import log -# NT stuff -# 1. Make sure libpython.a exists for gcc. If not, build it. -# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) -# --> this is done in numpy/distutils/ccompiler.py -# 3. Force windows to use g77 - -import distutils.cygwinccompiler -from distutils.unixccompiler import UnixCCompiler - -try: - from distutils.msvccompiler import get_build_version as get_build_msvc_version -except ImportError: - def get_build_msvc_version(): - return None - -from distutils.errors import UnknownFileError -from numpy.distutils.misc_util import (msvc_runtime_library, - msvc_runtime_version, - msvc_runtime_major, - get_build_architecture) - -def get_msvcr_replacement(): - """Replacement for outdated version of get_msvcr from cygwinccompiler""" - msvcr = msvc_runtime_library() - return [] if msvcr is None else [msvcr] - - -# Useful to generate table of symbols from a dll -_START = re.compile(r'\[Ordinal/Name Pointer\] Table') -_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') - -# the same as cygwin plus some additional parameters -class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): - """ A modified MingW32 compiler compatible with an MSVC built Python. - - """ - - compiler_type = 'mingw32' - - def __init__ (self, - verbose=0, - dry_run=0, - force=0): - - distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, - dry_run, force) - - # **changes: eric jones 4/11/01 - # 1. Check for import library on Windows. Build if it doesn't exist. - - build_import_library() - - # Check for custom msvc runtime library on Windows. Build if it doesn't exist. - msvcr_success = build_msvcr_library() - msvcr_dbg_success = build_msvcr_library(debug=True) - if msvcr_success or msvcr_dbg_success: - # add preprocessor statement for using customized msvcr lib - self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') - - # Define the MSVC version as hint for MinGW - msvcr_version = msvc_runtime_version() - if msvcr_version: - self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) - - # MS_WIN64 should be defined when building for amd64 on windows, - # but python headers define it only for MS compilers, which has all - # kind of bad consequences, like using Py_ModuleInit4 instead of - # Py_ModuleInit4_64, etc... So we add it here - if get_build_architecture() == 'AMD64': - self.set_executables( - compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', - compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' - '-Wstrict-prototypes', - linker_exe='gcc -g', - linker_so='gcc -g -shared') - else: - self.set_executables( - compiler='gcc -O2 -Wall', - compiler_so='gcc -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ ', - linker_so='g++ -shared') - # added for python2.3 support - # we can't pass it through set_executables because pre 2.2 would fail - self.compiler_cxx = ['g++'] - - # Maybe we should also append -mthreads, but then the finished dlls - # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support - # thread-safe exception handling on `Mingw32') - - # no additional libraries needed - #self.dll_libraries=[] - return - - # __init__ () - - def link(self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - export_symbols = None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - # Include the appropriate MSVC runtime library if Python was built - # with MSVC >= 7.0 (MinGW standard is msvcrt) - runtime_library = msvc_runtime_library() - if runtime_library: - if not libraries: - libraries = [] - libraries.append(runtime_library) - args = (self, - target_desc, - objects, - output_filename, - output_dir, - libraries, - library_dirs, - runtime_library_dirs, - None, #export_symbols, we do this in our def-file - debug, - extra_preargs, - extra_postargs, - build_temp, - target_lang) - func = UnixCCompiler.link - func(*args[:func.__code__.co_argcount]) - return - - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): - if output_dir is None: output_dir = '' - obj_names = [] - for src_name in source_filenames: - # use normcase to make sure '.rc' is really '.rc' and not '.RC' - (base, ext) = os.path.splitext (os.path.normcase(src_name)) - - # added these lines to strip off windows drive letters - # without it, .o files are placed next to .c files - # instead of the build directory - drv, base = os.path.splitdrive(base) - if drv: - base = base[1:] - - if ext not in (self.src_extensions + ['.rc', '.res']): - raise UnknownFileError( - "unknown file type '%s' (from '%s')" % \ - (ext, src_name)) - if strip_dir: - base = os.path.basename (base) - if ext == '.res' or ext == '.rc': - # these need to be compiled to object files - obj_names.append (os.path.join (output_dir, - base + ext + self.obj_extension)) - else: - obj_names.append (os.path.join (output_dir, - base + self.obj_extension)) - return obj_names - - # object_filenames () - - -def find_python_dll(): - # We can't do much here: - # - find it in the virtualenv (sys.prefix) - # - find it in python main dir (sys.base_prefix, if in a virtualenv) - # - in system32, - # - otherwise (Sxs), I don't know how to get it. - stems = [sys.prefix] - if sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - - sub_dirs = ['', 'lib', 'bin'] - # generate possible combinations of directory trees and sub-directories - lib_dirs = [] - for stem in stems: - for folder in sub_dirs: - lib_dirs.append(os.path.join(stem, folder)) - - # add system directory as well - if 'SYSTEMROOT' in os.environ: - lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) - - # search in the file system for possible candidates - major_version, minor_version = tuple(sys.version_info[:2]) - implementation = sys.implementation.name - if implementation == 'cpython': - dllname = f'python{major_version}{minor_version}.dll' - elif implementation == 'pypy': - dllname = f'libpypy{major_version}.{minor_version}-c.dll' - else: - dllname = f'Unknown platform {implementation}' - print("Looking for %s" % dllname) - for folder in lib_dirs: - dll = os.path.join(folder, dllname) - if os.path.exists(dll): - return dll - - raise ValueError("%s not found in %s" % (dllname, lib_dirs)) - -def dump_table(dll): - st = subprocess.check_output(["objdump.exe", "-p", dll]) - return st.split(b'\n') - -def generate_def(dll, dfile): - """Given a dll file location, get all its exported symbols and dump them - into the given def file. - - The .def file will be overwritten""" - dump = dump_table(dll) - for i in range(len(dump)): - if _START.match(dump[i].decode()): - break - else: - raise ValueError("Symbol table not found") - - syms = [] - for j in range(i+1, len(dump)): - m = _TABLE.match(dump[j].decode()) - if m: - syms.append((int(m.group(1).strip()), m.group(2))) - else: - break - - if len(syms) == 0: - log.warn('No symbols found in %s' % dll) - - with open(dfile, 'w') as d: - d.write('LIBRARY %s\n' % os.path.basename(dll)) - d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') - d.write(';DATA PRELOAD SINGLE\n') - d.write('\nEXPORTS\n') - for s in syms: - #d.write('@%d %s\n' % (s[0], s[1])) - d.write('%s\n' % s[1]) - -def find_dll(dll_name): - - arch = {'AMD64' : 'amd64', - 'ARM64' : 'arm64', - 'Intel' : 'x86'}[get_build_architecture()] - - def _find_dll_in_winsxs(dll_name): - # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), - 'winsxs') - if not os.path.exists(winsxs_path): - return None - for root, dirs, files in os.walk(winsxs_path): - if dll_name in files and arch in root: - return os.path.join(root, dll_name) - return None - - def _find_dll_in_path(dll_name): - # First, look in the Python directory, then scan PATH for - # the given dll name. - for path in [sys.prefix] + os.environ['PATH'].split(';'): - filepath = os.path.join(path, dll_name) - if os.path.exists(filepath): - return os.path.abspath(filepath) - - return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) - -def build_msvcr_library(debug=False): - if os.name != 'nt': - return False - - # If the version number is None, then we couldn't find the MSVC runtime at - # all, because we are running on a Python distribution which is customed - # compiled; trust that the compiler is the same as the one available to us - # now, and that it is capable of linking with the correct runtime without - # any extra options. - msvcr_ver = msvc_runtime_major() - if msvcr_ver is None: - log.debug('Skip building import library: ' - 'Runtime is not compiled with MSVC') - return False - - # Skip using a custom library for versions < MSVC 8.0 - if msvcr_ver < 80: - log.debug('Skip building msvcr library:' - ' custom functionality not present') - return False - - msvcr_name = msvc_runtime_library() - if debug: - msvcr_name += 'd' - - # Skip if custom library already exists - out_name = "lib%s.a" % msvcr_name - out_file = os.path.join(sys.prefix, 'libs', out_name) - if os.path.isfile(out_file): - log.debug('Skip building msvcr library: "%s" exists' % - (out_file,)) - return True - - # Find the msvcr dll - msvcr_dll_name = msvcr_name + '.dll' - dll_file = find_dll(msvcr_dll_name) - if not dll_file: - log.warn('Cannot build msvcr library: "%s" not found' % - msvcr_dll_name) - return False - - def_name = "lib%s.def" % msvcr_name - def_file = os.path.join(sys.prefix, 'libs', def_name) - - log.info('Building msvcr library: "%s" (from %s)' \ - % (out_file, dll_file)) - - # Generate a symbol definition file from the msvcr dll - generate_def(dll_file, def_file) - - # Create a custom mingw library for the given symbol definitions - cmd = ['dlltool', '-d', def_file, '-l', out_file] - retcode = subprocess.call(cmd) - - # Clean up symbol definitions - os.remove(def_file) - - return (not retcode) - -def build_import_library(): - if os.name != 'nt': - return - - arch = get_build_architecture() - if arch == 'AMD64': - return _build_import_library_amd64() - if arch == 'ARM64': - return _build_import_library_arm64() - elif arch == 'Intel': - return _build_import_library_x86() - else: - raise ValueError("Unhandled arch %s" % arch) - -def _check_for_import_lib(): - """Check if an import library for the Python runtime already exists.""" - major_version, minor_version = tuple(sys.version_info[:2]) - - # patterns for the file name of the library itself - patterns = ['libpython%d%d.a', - 'libpython%d%d.dll.a', - 'libpython%d.%d.dll.a'] - - # directory trees that may contain the library - stems = [sys.prefix] - if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: - stems.append(sys.base_prefix) - elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: - stems.append(sys.real_prefix) - - # possible subdirectories within those trees where it is placed - sub_dirs = ['libs', 'lib'] - - # generate a list of candidate locations - candidates = [] - for pat in patterns: - filename = pat % (major_version, minor_version) - for stem_dir in stems: - for folder in sub_dirs: - candidates.append(os.path.join(stem_dir, folder, filename)) - - # test the filesystem to see if we can find any of these - for fullname in candidates: - if os.path.isfile(fullname): - # already exists, in location given - return (True, fullname) - - # needs to be built, preferred location given first - return (False, candidates[0]) - -def _build_import_library_amd64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=AMD64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_arm64(): - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - # get the runtime dll for which we are building import library - dll_file = find_python_dll() - log.info('Building import library (arch=ARM64): "%s" (from %s)' % - (out_file, dll_file)) - - # generate symbol list from this library - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - generate_def(dll_file, def_file) - - # generate import library from this symbol list - cmd = ['dlltool', '-d', def_file, '-l', out_file] - subprocess.check_call(cmd) - -def _build_import_library_x86(): - """ Build the import libraries for Mingw32-gcc on Windows - """ - out_exists, out_file = _check_for_import_lib() - if out_exists: - log.debug('Skip building import library: "%s" exists', out_file) - return - - lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix, 'libs', lib_name) - if not os.path.isfile(lib_file): - # didn't find library file in virtualenv, try base distribution, too, - # and use that instead if found there. for Python 2.7 venvs, the base - # directory is in attribute real_prefix instead of base_prefix. - if hasattr(sys, 'base_prefix'): - base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) - elif hasattr(sys, 'real_prefix'): - base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) - else: - base_lib = '' # os.path.isfile('') == False - - if os.path.isfile(base_lib): - lib_file = base_lib - else: - log.warn('Cannot build import library: "%s" not found', lib_file) - return - log.info('Building import library (ARCH=x86): "%s"', out_file) - - from numpy.distutils import lib2def - - def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix, 'libs', def_name) - nm_output = lib2def.getnm( - lib2def.DEFAULT_NM + [lib_file], shell=False) - dlist, flist = lib2def.parse_nm(nm_output) - with open(def_file, 'w') as fid: - lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) - - dll_name = find_python_dll () - - cmd = ["dlltool", - "--dllname", dll_name, - "--def", def_file, - "--output-lib", out_file] - status = subprocess.check_output(cmd) - if status: - log.warn('Failed to build import library for gcc. Linking will fail.') - return - -#===================================== -# Dealing with Visual Studio MANIFESTS -#===================================== - -# Functions to deal with visual studio manifests. Manifest are a mechanism to -# enforce strong DLL versioning on windows, and has nothing to do with -# distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL not in the -# system path; in particular, official python 2.6 binary is built against the -# MS runtime 9 (the one from VS 2008), which is not available on most windows -# systems; python 2.6 installer does install it in the Win SxS (Side by side) -# directory, but this requires the manifest for this to work. This is a big -# mess, thanks MS for a wonderful system. - -# XXX: ideally, we should use exactly the same version as used by python. I -# submitted a patch to get this version, but it was only included for python -# 2.6.1 and above. So for versions below, we use a "best guess". -_MSVCRVER_TO_FULLVER = {} -if sys.platform == 'win32': - try: - import msvcrt - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" - _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" - # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 - # on Windows XP: - _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" - crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) - if crt_ver is not None: # Available at least back to Python 3.3 - maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() - _MSVCRVER_TO_FULLVER[maj + min] = crt_ver - del maj, min - del crt_ver - except ImportError: - # If we are here, means python was not built with MSVC. Not sure what - # to do in that case: manifest building will fail, but it should not be - # used in that case anyway - log.warn('Cannot import msvcrt: using manifest will not be possible') - -def msvc_manifest_xml(maj, min): - """Given a major and minor version of the MSVCR, returns the - corresponding XML file.""" - try: - fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] - except KeyError: - raise ValueError("Version %d,%d of MSVCRT not supported yet" % - (maj, min)) from None - # Don't be fooled, it looks like an XML, but it is not. In particular, it - # should not have any space before starting, and its size should be - # divisible by 4, most likely for alignment constraints when the xml is - # embedded in the binary... - # This template was copied directly from the python 2.6 binary (using - # strings.exe from mingw on python.exe). - template = textwrap.dedent("""\ - - - - - - - - - - - - - - """) - - return template % {'fullver': fullver, 'maj': maj, 'min': min} - -def manifest_rc(name, type='dll'): - """Return the rc file used to generate the res file which will be embedded - as manifest for given manifest file name, of given type ('dll' or - 'exe'). - - Parameters - ---------- - name : str - name of the manifest file to embed - type : str {'dll', 'exe'} - type of the binary which will embed the manifest - - """ - if type == 'dll': - rctype = 2 - elif type == 'exe': - rctype = 1 - else: - raise ValueError("Type %s not supported" % type) - - return """\ -#include "winuser.h" -%d RT_MANIFEST %s""" % (rctype, name) - -def check_embedded_msvcr_match_linked(msver): - """msver is the ms runtime version used for the MANIFEST.""" - # check msvcr major version are the same for linking and - # embedding - maj = msvc_runtime_major() - if maj: - if not maj == int(msver): - raise ValueError( - "Discrepancy between linked msvcr " \ - "(%d) and the one about to be embedded " \ - "(%d)" % (int(msver), maj)) - -def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) - return os.path.splitext(base)[0] - -def manifest_name(config): - # Get configest name (including suffix) - root = configtest_name(config) - exext = config.compiler.exe_extension - return root + exext + ".manifest" - -def rc_name(config): - # Get configtest name (including suffix) - root = configtest_name(config) - return root + ".rc" - -def generate_manifest(config): - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma_str, mi_str = str(msver).split('.') - # Write the manifest file - manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) - with open(manifest_name(config), "w") as man: - config.temp_files.append(manifest_name(config)) - man.write(manxml) diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py deleted file mode 100644 index ca7bcf0fbdd0..000000000000 --- a/numpy/distutils/misc_util.py +++ /dev/null @@ -1,2484 +0,0 @@ -import os -import re -import sys -import copy -import glob -import atexit -import tempfile -import subprocess -import shutil -import multiprocessing -import textwrap -import importlib.util -from threading import local as tlocal -from functools import reduce - -import distutils -from distutils.errors import DistutilsError - -# stores temporary directory of each thread to only create one per thread -_tdata = tlocal() - -# store all created temporary directories so they can be deleted on exit -_tmpdirs = [] -def clean_up_temporary_directory(): - if _tmpdirs is not None: - for d in _tmpdirs: - try: - shutil.rmtree(d) - except OSError: - pass - -atexit.register(clean_up_temporary_directory) - -__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', - 'dict_append', 'appendpath', 'generate_config_py', - 'get_cmd', 'allpath', 'get_mathlibs', - 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', - 'has_f_sources', 'has_cxx_sources', 'filter_sources', - 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', - 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath', 'njoin', - 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', - 'get_build_architecture', 'get_info', 'get_pkg_info', - 'get_num_build_jobs', 'sanitize_cxx_flags', - 'exec_mod_from_location'] - -class InstallableLib: - """ - Container to hold information on an installable library. - - Parameters - ---------- - name : str - Name of the installed library. - build_info : dict - Dictionary holding build information. - target_dir : str - Absolute path specifying where to install the library. - - See Also - -------- - Configuration.add_installed_library - - Notes - ----- - The three parameters are stored as attributes with the same names. - - """ - def __init__(self, name, build_info, target_dir): - self.name = name - self.build_info = build_info - self.target_dir = target_dir - - -def get_num_build_jobs(): - """ - Get number of parallel build jobs set by the --parallel command line - argument of setup.py - If the command did not receive a setting the environment variable - NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of - processors on the system, with a maximum of 8 (to prevent - overloading the system if there a lot of CPUs). - - Returns - ------- - out : int - number of parallel jobs that can be run - - """ - from numpy.distutils.core import get_distribution - try: - cpu_count = len(os.sched_getaffinity(0)) - except AttributeError: - cpu_count = multiprocessing.cpu_count() - cpu_count = min(cpu_count, 8) - envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) - dist = get_distribution() - # may be None during configuration - if dist is None: - return envjobs - - # any of these three may have the job set, take the largest - cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), - getattr(dist.get_command_obj('build_ext'), 'parallel', None), - getattr(dist.get_command_obj('build_clib'), 'parallel', None)) - if all(x is None for x in cmdattr): - return envjobs - else: - return max(x for x in cmdattr if x is not None) - -def quote_args(args): - """Quote list of arguments. - - .. deprecated:: 1.22. - """ - import warnings - warnings.warn('"quote_args" is deprecated.', - DeprecationWarning, stacklevel=2) - # don't used _nt_quote_args as it does not check if - # args items already have quotes or not. - args = list(args) - for i in range(len(args)): - a = args[i] - if ' ' in a and a[0] not in '"\'': - args[i] = '"%s"' % (a) - return args - -def allpath(name): - "Convert a /-separated pathname to one using the OS's path separator." - split = name.split('/') - return os.path.join(*split) - -def rel_path(path, parent_path): - """Return path relative to parent_path.""" - # Use realpath to avoid issues with symlinked dirs (see gh-7707) - pd = os.path.realpath(os.path.abspath(parent_path)) - apath = os.path.realpath(os.path.abspath(path)) - if len(apath) < len(pd): - return path - if apath == pd: - return '' - if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) - path = apath[len(pd)+1:] - return path - -def get_path_from_frame(frame, parent_path=None): - """Return path of the module given a frame object from the call stack. - - Returned path is relative to parent_path when given, - otherwise it is absolute path. - """ - - # First, try to find if the file name is in the frame. - try: - caller_file = eval('__file__', frame.f_globals, frame.f_locals) - d = os.path.dirname(os.path.abspath(caller_file)) - except NameError: - # __file__ is not defined, so let's try __name__. We try this second - # because setuptools spoofs __name__ to be '__main__' even though - # sys.modules['__main__'] might be something else, like easy_install(1). - caller_name = eval('__name__', frame.f_globals, frame.f_locals) - __import__(caller_name) - mod = sys.modules[caller_name] - if hasattr(mod, '__file__'): - d = os.path.dirname(os.path.abspath(mod.__file__)) - else: - # we're probably running setup.py as execfile("setup.py") - # (likely we're building an egg) - d = os.path.abspath('.') - - if parent_path is not None: - d = rel_path(d, parent_path) - - return d or '.' - -def njoin(*path): - """Join two or more pathname components + - - convert a /-separated pathname to one using the OS's path separator. - - resolve `..` and `.` from path. - - Either passing n arguments as in njoin('a','b'), or a sequence - of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. - """ - paths = [] - for p in path: - if is_sequence(p): - # njoin(['a', 'b'], 'c') - paths.append(njoin(*p)) - else: - assert is_string(p) - paths.append(p) - path = paths - if not path: - # njoin() - joined = '' - else: - # njoin('a', 'b') - joined = os.path.join(*path) - if os.path.sep != '/': - joined = joined.replace('/', os.path.sep) - return minrelpath(joined) - -def get_mathlibs(path=None): - """Return the MATHLIB line from numpyconfig.h - """ - if path is not None: - config_file = os.path.join(path, '_numpyconfig.h') - else: - # Look for the file in each of the numpy include directories. - dirs = get_numpy_include_dirs() - for path in dirs: - fn = os.path.join(path, '_numpyconfig.h') - if os.path.exists(fn): - config_file = fn - break - else: - raise DistutilsError('_numpyconfig.h not found in numpy include ' - 'dirs %r' % (dirs,)) - - with open(config_file) as fid: - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - return mathlibs - -def minrelpath(path): - """Resolve `..` and '.' from path. - """ - if not is_string(path): - return path - if '.' not in path: - return path - l = path.split(os.sep) - while l: - try: - i = l.index('.', 1) - except ValueError: - break - del l[i] - j = 1 - while l: - try: - i = l.index('..', j) - except ValueError: - break - if l[i-1]=='..': - j += 1 - else: - del l[i], l[i-1] - j = 1 - if not l: - return '' - return os.sep.join(l) - -def sorted_glob(fileglob): - """sorts output of python glob for https://bugs.python.org/issue30461 - to allow extensions to have reproducible build results""" - return sorted(glob.glob(fileglob)) - -def _fix_paths(paths, local_path, include_non_existing): - assert is_sequence(paths), repr(type(paths)) - new_paths = [] - assert not is_string(paths), repr(paths) - for n in paths: - if is_string(n): - if '*' in n or '?' in n: - p = sorted_glob(n) - p2 = sorted_glob(njoin(local_path, n)) - if p2: - new_paths.extend(p2) - elif p: - new_paths.extend(p) - else: - if include_non_existing: - new_paths.append(n) - print('could not resolve pattern in %r: %r' % - (local_path, n)) - else: - n2 = njoin(local_path, n) - if os.path.exists(n2): - new_paths.append(n2) - else: - if os.path.exists(n): - new_paths.append(n) - elif include_non_existing: - new_paths.append(n) - if not os.path.exists(n): - print('non-existing path in %r: %r' % - (local_path, n)) - - elif is_sequence(n): - new_paths.extend(_fix_paths(n, local_path, include_non_existing)) - else: - new_paths.append(n) - return [minrelpath(p) for p in new_paths] - -def gpaths(paths, local_path='', include_non_existing=True): - """Apply glob to paths and prepend local_path if needed. - """ - if is_string(paths): - paths = (paths,) - return _fix_paths(paths, local_path, include_non_existing) - -def make_temp_file(suffix='', prefix='', text=True): - if not hasattr(_tdata, 'tempdir'): - _tdata.tempdir = tempfile.mkdtemp() - _tmpdirs.append(_tdata.tempdir) - fid, name = tempfile.mkstemp(suffix=suffix, - prefix=prefix, - dir=_tdata.tempdir, - text=text) - fo = os.fdopen(fid, 'w') - return fo, name - -# Hooks for colored terminal output. -# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle -def terminal_has_colors(): - if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: - # Avoid importing curses that causes illegal operation - # with a message: - # PYTHON2 caused an invalid page fault in - # module CYGNURSES7.DLL as 015f:18bbfc28 - # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] - # ssh to Win32 machine from debian - # curses.version is 2.2 - # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) - return 0 - if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): - try: - import curses - curses.setupterm() - if (curses.tigetnum("colors") >= 0 - and curses.tigetnum("pairs") >= 0 - and ((curses.tigetstr("setf") is not None - and curses.tigetstr("setb") is not None) - or (curses.tigetstr("setaf") is not None - and curses.tigetstr("setab") is not None) - or curses.tigetstr("scp") is not None)): - return 1 - except Exception: - pass - return 0 - -if terminal_has_colors(): - _colour_codes = dict(black=0, red=1, green=2, yellow=3, - blue=4, magenta=5, cyan=6, white=7, default=9) - def colour_text(s, fg=None, bg=None, bold=False): - seq = [] - if bold: - seq.append('1') - if fg: - fgcode = 30 + _colour_codes.get(fg.lower(), 0) - seq.append(str(fgcode)) - if bg: - bgcode = 40 + _colour_codes.get(bg.lower(), 7) - seq.append(str(bgcode)) - if seq: - return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) - else: - return s -else: - def colour_text(s, fg=None, bg=None): - return s - -def default_text(s): - return colour_text(s, 'default') -def red_text(s): - return colour_text(s, 'red') -def green_text(s): - return colour_text(s, 'green') -def yellow_text(s): - return colour_text(s, 'yellow') -def cyan_text(s): - return colour_text(s, 'cyan') -def blue_text(s): - return colour_text(s, 'blue') - -######################### - -def cyg2win32(path: str) -> str: - """Convert a path from Cygwin-native to Windows-native. - - Uses the cygpath utility (part of the Base install) to do the - actual conversion. Falls back to returning the original path if - this fails. - - Handles the default ``/cygdrive`` mount prefix as well as the - ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such - as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or - ``/home/username`` - - Parameters - ---------- - path : str - The path to convert - - Returns - ------- - converted_path : str - The converted path - - Notes - ----- - Documentation for cygpath utility: - https://cygwin.com/cygwin-ug-net/cygpath.html - Documentation for the C function it wraps: - https://cygwin.com/cygwin-api/func-cygwin-conv-path.html - - """ - if sys.platform != "cygwin": - return path - return subprocess.check_output( - ["/usr/bin/cygpath", "--windows", path], text=True - ) - - -def mingw32(): - """Return true when using mingw32 environment. - """ - if sys.platform=='win32': - if os.environ.get('OSTYPE', '')=='msys': - return True - if os.environ.get('MSYSTEM', '')=='MINGW32': - return True - return False - -def msvc_runtime_version(): - "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) - else: - msc_ver = None - return msc_ver - -def msvc_runtime_library(): - "Return name of MSVC runtime library if Python was built with MSVC >= 7" - ver = msvc_runtime_major () - if ver: - if ver < 140: - return "msvcr%i" % ver - else: - return "vcruntime%i" % ver - else: - return None - -def msvc_runtime_major(): - "Return major version of MSVC runtime coded like get_build_msvc_version" - major = {1300: 70, # MSVC 7.0 - 1310: 71, # MSVC 7.1 - 1400: 80, # MSVC 8 - 1500: 90, # MSVC 9 (aka 2008) - 1600: 100, # MSVC 10 (aka 2010) - 1900: 140, # MSVC 14 (aka 2015) - }.get(msvc_runtime_version(), None) - return major - -######################### - -#XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match -fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match -f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match -def _get_f90_modules(source): - """Return a list of Fortran f90 module names that - given source file defines. - """ - if not f90_ext_match(source): - return [] - modules = [] - with open(source) as f: - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - return modules - -def is_string(s): - return isinstance(s, str) - -def all_strings(lst): - """Return True if all items in lst are string objects. """ - return all(is_string(item) for item in lst) - -def is_sequence(seq): - if is_string(seq): - return False - try: - len(seq) - except Exception: - return False - return True - -def is_glob_pattern(s): - return is_string(s) and ('*' in s or '?' in s) - -def as_list(seq): - if is_sequence(seq): - return list(seq) - else: - return [seq] - -def get_language(sources): - # not used in numpy/scipy packages, use build_ext.detect_language instead - """Determine language value (c,f77,f90) from sources """ - language = None - for source in sources: - if isinstance(source, str): - if f90_ext_match(source): - language = 'f90' - break - elif fortran_ext_match(source): - language = 'f77' - return language - -def has_f_sources(sources): - """Return True if sources contains Fortran files """ - return any(fortran_ext_match(source) for source in sources) - -def has_cxx_sources(sources): - """Return True if sources contains C++ files """ - return any(cxx_ext_match(source) for source in sources) - -def filter_sources(sources): - """Return four lists of filenames containing - C, C++, Fortran, and Fortran 90 module sources, - respectively. - """ - c_sources = [] - cxx_sources = [] - f_sources = [] - fmodule_sources = [] - for source in sources: - if fortran_ext_match(source): - modules = _get_f90_modules(source) - if modules: - fmodule_sources.append(source) - else: - f_sources.append(source) - elif cxx_ext_match(source): - cxx_sources.append(source) - else: - c_sources.append(source) - return c_sources, cxx_sources, f_sources, fmodule_sources - - -def _get_headers(directory_list): - # get *.h files from list of directories - headers = [] - for d in directory_list: - head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? - headers.extend(head) - return headers - -def _get_directories(list_of_sources): - # get unique directories from list of sources. - direcs = [] - for f in list_of_sources: - d = os.path.split(f) - if d[0] != '' and not d[0] in direcs: - direcs.append(d[0]) - return direcs - -def _commandline_dep_string(cc_args, extra_postargs, pp_opts): - """ - Return commandline representation used to determine if a file needs - to be recompiled - """ - cmdline = 'commandline: ' - cmdline += ' '.join(cc_args) - cmdline += ' '.join(extra_postargs) - cmdline += ' '.join(pp_opts) + '\n' - return cmdline - - -def get_dependencies(sources): - #XXX scan sources for include statements - return _get_headers(_get_directories(sources)) - -def is_local_src_dir(directory): - """Return true if directory is local directory. - """ - if not is_string(directory): - return False - abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(), abs_dir]) - new_dir = abs_dir[len(c):].split(os.sep) - if new_dir and not new_dir[0]: - new_dir = new_dir[1:] - if new_dir and new_dir[0]=='build': - return False - new_dir = os.sep.join(new_dir) - return os.path.isdir(new_dir) - -def general_source_files(top_path): - pruned_directories = {'CVS':1, '.svn':1, 'build':1} - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for f in filenames: - if not prune_file_pat.search(f): - yield os.path.join(dirpath, f) - -def general_source_directories_files(top_path): - """Return a directory name relative to top_path and - files contained. - """ - pruned_directories = ['CVS', '.svn', 'build'] - prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') - for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): - pruned = [ d for d in dirnames if d not in pruned_directories ] - dirnames[:] = pruned - for d in dirnames: - dpath = os.path.join(dirpath, d) - rpath = rel_path(dpath, top_path) - files = [] - for f in os.listdir(dpath): - fn = os.path.join(dpath, f) - if os.path.isfile(fn) and not prune_file_pat.search(fn): - files.append(fn) - yield rpath, files - dpath = top_path - rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ - if not prune_file_pat.search(f)] - files = [f for f in filenames if os.path.isfile(f)] - yield rpath, files - - -def get_ext_source_files(ext): - # Get sources and any include files in the same directory. - filenames = [] - sources = [_m for _m in ext.sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - for d in ext.depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_script_files(scripts): - scripts = [_m for _m in scripts if is_string(_m)] - return scripts - -def get_lib_source_files(lib): - filenames = [] - sources = lib[1].get('sources', []) - sources = [_m for _m in sources if is_string(_m)] - filenames.extend(sources) - filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends', []) - for d in depends: - if is_local_src_dir(d): - filenames.extend(list(general_source_files(d))) - elif os.path.isfile(d): - filenames.append(d) - return filenames - -def get_shared_lib_extension(is_python_ext=False): - """Return the correct file extension for shared libraries. - - Parameters - ---------- - is_python_ext : bool, optional - Whether the shared library is a Python extension. Default is False. - - Returns - ------- - so_ext : str - The shared library extension. - - Notes - ----- - For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, - and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on - POSIX systems according to PEP 3149. - - """ - confvars = distutils.sysconfig.get_config_vars() - so_ext = confvars.get('EXT_SUFFIX', '') - - if not is_python_ext: - # hardcode known values, config vars (including SHLIB_SUFFIX) are - # unreliable (see #3182) - # darwin, windows and debug linux are wrong in 3.3.1 and older - if (sys.platform.startswith('linux') or - sys.platform.startswith('gnukfreebsd')): - so_ext = '.so' - elif sys.platform.startswith('darwin'): - so_ext = '.dylib' - elif sys.platform.startswith('win'): - so_ext = '.dll' - else: - # fall back to config vars for unknown platforms - # fix long extension for Python >=3.2, see PEP 3149. - if 'SOABI' in confvars: - # Does nothing unless SOABI config var exists - so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) - - return so_ext - -def get_data_files(data): - if is_string(data): - return [data] - sources = data[1] - filenames = [] - for s in sources: - if hasattr(s, '__call__'): - continue - if is_local_src_dir(s): - filenames.extend(list(general_source_files(s))) - elif is_string(s): - if os.path.isfile(s): - filenames.append(s) - else: - print('Not existing data file:', s) - else: - raise TypeError(repr(s)) - return filenames - -def dot_join(*args): - return '.'.join([a for a in args if a]) - -def get_frame(level=0): - """Return frame object from call stack with given level. - """ - try: - return sys._getframe(level+1) - except AttributeError: - frame = sys.exc_info()[2].tb_frame - for _ in range(level+1): - frame = frame.f_back - return frame - - -###################### - -class Configuration: - - _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', - 'libraries', 'headers', 'scripts', 'py_modules', - 'installed_libraries', 'define_macros'] - _dict_keys = ['package_dir', 'installed_pkg_config'] - _extra_keys = ['name', 'version'] - - numpy_include_dirs = [] - - def __init__(self, - package_name=None, - parent_name=None, - top_path=None, - package_path=None, - caller_level=1, - setup_name='setup.py', - **attrs): - """Construct configuration instance of a package. - - package_name -- name of the package - Ex.: 'distutils' - parent_name -- name of the parent package - Ex.: 'numpy' - top_path -- directory of the toplevel package - Ex.: the directory where the numpy package source sits - package_path -- directory of package. Will be computed by magic from the - directory of the caller module if not specified - Ex.: the directory where numpy.distutils is - caller_level -- frame level to caller namespace, internal parameter. - """ - self.name = dot_join(parent_name, package_name) - self.version = None - - caller_frame = get_frame(caller_level) - self.local_path = get_path_from_frame(caller_frame, top_path) - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - # local_path -- directory of a file (usually setup.py) that - # defines a configuration() function. - if top_path is None: - top_path = self.local_path - self.local_path = '' - if package_path is None: - package_path = self.local_path - elif os.path.isdir(njoin(self.local_path, package_path)): - package_path = njoin(self.local_path, package_path) - if not os.path.isdir(package_path or '.'): - raise ValueError("%r is not a directory" % (package_path,)) - self.top_path = top_path - self.package_path = package_path - # this is the relative path in the installed package - self.path_in_package = os.path.join(*self.name.split('.')) - - self.list_keys = self._list_keys[:] - self.dict_keys = self._dict_keys[:] - - for n in self.list_keys: - v = copy.copy(attrs.get(n, [])) - setattr(self, n, as_list(v)) - - for n in self.dict_keys: - v = copy.copy(attrs.get(n, {})) - setattr(self, n, v) - - known_keys = self.list_keys + self.dict_keys - self.extra_keys = self._extra_keys[:] - for n in attrs.keys(): - if n in known_keys: - continue - a = attrs[n] - setattr(self, n, a) - if isinstance(a, list): - self.list_keys.append(n) - elif isinstance(a, dict): - self.dict_keys.append(n) - else: - self.extra_keys.append(n) - - if os.path.exists(njoin(package_path, '__init__.py')): - self.packages.append(self.name) - self.package_dir[self.name] = package_path - - self.options = dict( - ignore_setup_xxx_py = False, - assume_default_configuration = False, - delegate_options_to_subpackages = False, - quiet = False, - ) - - caller_instance = None - for i in range(1, 3): - try: - f = get_frame(i) - except ValueError: - break - try: - caller_instance = eval('self', f.f_globals, f.f_locals) - break - except NameError: - pass - if isinstance(caller_instance, self.__class__): - if caller_instance.options['delegate_options_to_subpackages']: - self.set_options(**caller_instance.options) - - self.setup_name = setup_name - - def todict(self): - """ - Return a dictionary compatible with the keyword arguments of distutils - setup function. - - Examples - -------- - >>> setup(**config.todict()) #doctest: +SKIP - """ - - self._optimize_data_files() - d = {} - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for n in known_keys: - a = getattr(self, n) - if a: - d[n] = a - return d - - def info(self, message): - if not self.options['quiet']: - print(message) - - def warn(self, message): - sys.stderr.write('Warning: %s\n' % (message,)) - - def set_options(self, **options): - """ - Configure Configuration instance. - - The following options are available: - - ignore_setup_xxx_py - - assume_default_configuration - - delegate_options_to_subpackages - - quiet - - """ - for key, value in options.items(): - if key in self.options: - self.options[key] = value - else: - raise ValueError('Unknown option: '+key) - - def get_distribution(self): - """Return the distutils distribution object for self.""" - from numpy.distutils.core import get_distribution - return get_distribution() - - def _wildcard_get_subpackage(self, subpackage_name, - parent_name, - caller_level = 1): - l = subpackage_name.split('.') - subpackage_path = njoin([self.local_path]+l) - dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] - config_list = [] - for d in dirs: - if not os.path.isfile(njoin(d, '__init__.py')): - continue - if 'build' in d.split(os.sep): - continue - n = '.'.join(d.split(os.sep)[-len(l):]) - c = self.get_subpackage(n, - parent_name = parent_name, - caller_level = caller_level+1) - config_list.extend(c) - return config_list - - def _get_configuration_from_setup_py(self, setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = 1): - # In case setup_py imports local modules: - sys.path.insert(0, os.path.dirname(setup_py)) - try: - setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name, subpackage_name, setup_name) - setup_module = exec_mod_from_location( - '_'.join(n.split('.')), setup_py) - if not hasattr(setup_module, 'configuration'): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s does not define configuration())'\ - % (setup_module)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level + 1) - else: - pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) - args = (pn,) - if setup_module.configuration.__code__.co_argcount > 1: - args = args + (self.top_path,) - config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name, subpackage_name): - self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name, subpackage_name), config.name)) - finally: - del sys.path[0] - return config - - def get_subpackage(self,subpackage_name, - subpackage_path=None, - parent_name=None, - caller_level = 1): - """Return list of subpackage configurations. - - Parameters - ---------- - subpackage_name : str or None - Name of the subpackage to get the configuration. '*' in - subpackage_name is handled as a wildcard. - subpackage_path : str - If None, then the path is assumed to be the local path plus the - subpackage_name. If a setup.py file is not found in the - subpackage_path, then a default configuration is used. - parent_name : str - Parent name. - """ - if subpackage_name is None: - if subpackage_path is None: - raise ValueError( - "either subpackage_name or subpackage_path must be specified") - subpackage_name = os.path.basename(subpackage_path) - - # handle wildcards - l = subpackage_name.split('.') - if subpackage_path is None and '*' in subpackage_name: - return self._wildcard_get_subpackage(subpackage_name, - parent_name, - caller_level = caller_level+1) - assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) - if subpackage_path is None: - subpackage_path = njoin([self.local_path] + l) - else: - subpackage_path = njoin([subpackage_path] + l[:-1]) - subpackage_path = self.paths([subpackage_path])[0] - setup_py = njoin(subpackage_path, self.setup_name) - if not self.options['ignore_setup_xxx_py']: - if not os.path.isfile(setup_py): - setup_py = njoin(subpackage_path, - 'setup_%s.py' % (subpackage_name)) - if not os.path.isfile(setup_py): - if not self.options['assume_default_configuration']: - self.warn('Assuming default configuration '\ - '(%s/{setup_%s,setup}.py was not found)' \ - % (os.path.dirname(setup_py), subpackage_name)) - config = Configuration(subpackage_name, parent_name, - self.top_path, subpackage_path, - caller_level = caller_level+1) - else: - config = self._get_configuration_from_setup_py( - setup_py, - subpackage_name, - subpackage_path, - parent_name, - caller_level = caller_level + 1) - if config: - return [config] - else: - return [] - - def add_subpackage(self,subpackage_name, - subpackage_path=None, - standalone = False): - """Add a sub-package to the current Configuration instance. - - This is useful in a setup.py script for adding sub-packages to a - package. - - Parameters - ---------- - subpackage_name : str - name of the subpackage - subpackage_path : str - if given, the subpackage path such as the subpackage is in - subpackage_path / subpackage_name. If None,the subpackage is - assumed to be located in the local path / subpackage_name. - standalone : bool - """ - - if standalone: - parent_name = None - else: - parent_name = self.name - config_list = self.get_subpackage(subpackage_name, subpackage_path, - parent_name = parent_name, - caller_level = 2) - if not config_list: - self.warn('No configuration returned, assuming unavailable.') - for config in config_list: - d = config - if isinstance(config, Configuration): - d = config.todict() - assert isinstance(d, dict), repr(type(d)) - - self.info('Appending %s configuration to %s' \ - % (d.get('name'), self.name)) - self.dict_append(**d) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a subpackage '+ subpackage_name) - - def add_data_dir(self, data_path): - """Recursively add files under data_path to data_files list. - - Recursively add files under data_path to the list of data_files to be - installed (and distributed). The data_path can be either a relative - path-name, or an absolute path-name, or a 2-tuple where the first - argument shows where in the install directory the data directory - should be installed to. - - Parameters - ---------- - data_path : seq or str - Argument can be either - - * 2-sequence (, ) - * path to data directory where python datadir suffix defaults - to package dir. - - Notes - ----- - Rules for installation paths:: - - foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar - (gun, foo/bar) -> parent/gun - foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b - (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun - (gun/*, foo/*) -> parent/gun/a, parent/gun/b - /foo/bar -> (bar, /foo/bar) -> parent/bar - (gun, /foo/bar) -> parent/gun - (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar - - Examples - -------- - For example suppose the source directory contains fun/foo.dat and - fun/bar/car.dat: - - >>> self.add_data_dir('fun') #doctest: +SKIP - >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP - >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP - - Will install data-files to the locations:: - - / - fun/ - foo.dat - bar/ - car.dat - sun/ - foo.dat - bar/ - car.dat - gun/ - foo.dat - car.dat - - """ - if is_sequence(data_path): - d, data_path = data_path - else: - d = None - if is_sequence(data_path): - [self.add_data_dir((d, p)) for p in data_path] - return - if not is_string(data_path): - raise TypeError("not a string: %r" % (data_path,)) - if d is None: - if os.path.isabs(data_path): - return self.add_data_dir((os.path.basename(data_path), data_path)) - return self.add_data_dir((data_path, data_path)) - paths = self.paths(data_path, include_non_existing=False) - if is_glob_pattern(data_path): - if is_glob_pattern(d): - pattern_list = allpath(d).split(os.sep) - pattern_list.reverse() - # /a/*//b/ -> /a/*/b - rl = list(range(len(pattern_list)-1)); rl.reverse() - for i in rl: - if not pattern_list[i]: - del pattern_list[i] - # - for path in paths: - if not os.path.isdir(path): - print('Not a directory, skipping', path) - continue - rpath = rel_path(path, self.local_path) - path_list = rpath.split(os.sep) - path_list.reverse() - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - if i>=len(path_list): - raise ValueError('cannot fill pattern %r with %r' \ - % (d, path)) - target_list.append(path_list[i]) - else: - assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) - target_list.append(s) - i += 1 - if path_list[i:]: - self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list, path_list)) - target_list.reverse() - self.add_data_dir((os.sep.join(target_list), path)) - else: - for path in paths: - self.add_data_dir((d, path)) - return - assert not is_glob_pattern(d), repr(d) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - for path in paths: - for d1, f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package, d, d1) - data_files.append((target_path, f)) - - def _optimize_data_files(self): - data_dict = {} - for p, files in self.data_files: - if p not in data_dict: - data_dict[p] = set() - for f in files: - data_dict[p].add(f) - self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] - - def add_data_files(self,*files): - """Add data files to configuration data_files. - - Parameters - ---------- - files : sequence - Argument(s) can be either - - * 2-sequence (,) - * paths to data files where python datadir prefix defaults - to package dir. - - Notes - ----- - The form of each element of the files sequence is very flexible - allowing many combinations of where to get the files from the package - and where they should ultimately be installed on the system. The most - basic usage is for an element of the files argument sequence to be a - simple filename. This will cause that file from the local path to be - installed to the installation path of the self.name package (package - path). The file argument can also be a relative path in which case the - entire relative path will be installed into the package directory. - Finally, the file can be an absolute path name in which case the file - will be found at the absolute path name but installed to the package - path. - - This basic behavior can be augmented by passing a 2-tuple in as the - file argument. The first element of the tuple should specify the - relative path (under the package install directory) where the - remaining sequence of files should be installed to (it has nothing to - do with the file-names in the source distribution). The second element - of the tuple is the sequence of files that should be installed. The - files in this sequence can be filenames, relative paths, or absolute - paths. For absolute paths the file will be installed in the top-level - package installation directory (regardless of the first argument). - Filenames and relative path names will be installed in the package - install directory under the path name given as the first element of - the tuple. - - Rules for installation paths: - - #. file.txt -> (., file.txt)-> parent/file.txt - #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt - #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt - #. ``*``.txt -> parent/a.txt, parent/b.txt - #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt - #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt - #. (sun, file.txt) -> parent/sun/file.txt - #. (sun, bar/file.txt) -> parent/sun/file.txt - #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt - #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt - #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt - - An additional feature is that the path to a data-file can actually be - a function that takes no arguments and returns the actual path(s) to - the data-files. This is useful when the data files are generated while - building the package. - - Examples - -------- - Add files to the list of data_files to be included with the package. - - >>> self.add_data_files('foo.dat', - ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), - ... 'bar/cat.dat', - ... '/full/path/to/can.dat') #doctest: +SKIP - - will install these data files to:: - - / - foo.dat - fun/ - gun.dat - nun/ - pun.dat - sun.dat - bar/ - car.dat - can.dat - - where is the package (or sub-package) - directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage') or - '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: - \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). - """ - - if len(files)>1: - for f in files: - self.add_data_files(f) - return - assert len(files)==1 - if is_sequence(files[0]): - d, files = files[0] - else: - d = None - if is_string(files): - filepat = files - elif is_sequence(files): - if len(files)==1: - filepat = files[0] - else: - for f in files: - self.add_data_files((d, f)) - return - else: - raise TypeError(repr(type(files))) - - if d is None: - if hasattr(filepat, '__call__'): - d = '' - elif os.path.isabs(filepat): - d = '' - else: - d = os.path.dirname(filepat) - self.add_data_files((d, files)) - return - - paths = self.paths(filepat, include_non_existing=False) - if is_glob_pattern(filepat): - if is_glob_pattern(d): - pattern_list = d.split(os.sep) - pattern_list.reverse() - for path in paths: - path_list = path.split(os.sep) - path_list.reverse() - path_list.pop() # filename - target_list = [] - i = 0 - for s in pattern_list: - if is_glob_pattern(s): - target_list.append(path_list[i]) - i += 1 - else: - target_list.append(s) - target_list.reverse() - self.add_data_files((os.sep.join(target_list), path)) - else: - self.add_data_files((d, paths)) - return - assert not is_glob_pattern(d), repr((d, filepat)) - - dist = self.get_distribution() - if dist is not None and dist.data_files is not None: - data_files = dist.data_files - else: - data_files = self.data_files - - data_files.append((os.path.join(self.path_in_package, d), paths)) - - ### XXX Implement add_py_modules - - def add_define_macros(self, macros): - """Add define macros to configuration - - Add the given sequence of macro name and value duples to the beginning - of the define_macros list This list will be visible to all extension - modules of the current package. - """ - dist = self.get_distribution() - if dist is not None: - if not hasattr(dist, 'define_macros'): - dist.define_macros = [] - dist.define_macros.extend(macros) - else: - self.define_macros.extend(macros) - - - def add_include_dirs(self,*paths): - """Add paths to configuration include directories. - - Add the given sequence of paths to the beginning of the include_dirs - list. This list will be visible to all extension modules of the - current package. - """ - include_dirs = self.paths(paths) - dist = self.get_distribution() - if dist is not None: - if dist.include_dirs is None: - dist.include_dirs = [] - dist.include_dirs.extend(include_dirs) - else: - self.include_dirs.extend(include_dirs) - - def add_headers(self,*files): - """Add installable headers to configuration. - - Add the given sequence of files to the beginning of the headers list. - By default, headers will be installed under // directory. If an item of files - is a tuple, then its first argument specifies the actual installation - location relative to the path. - - Parameters - ---------- - files : str or seq - Argument(s) can be either: - - * 2-sequence (,) - * path(s) to header file(s) where python includedir suffix will - default to package name. - """ - headers = [] - for path in files: - if is_string(path): - [headers.append((self.name, p)) for p in self.paths(path)] - else: - if not isinstance(path, (tuple, list)) or len(path) != 2: - raise TypeError(repr(path)) - [headers.append((path[0], p)) for p in self.paths(path[1])] - dist = self.get_distribution() - if dist is not None: - if dist.headers is None: - dist.headers = [] - dist.headers.extend(headers) - else: - self.headers.extend(headers) - - def paths(self,*paths,**kws): - """Apply glob to paths and prepend local_path if needed. - - Applies glob.glob(...) to each path in the sequence (if needed) and - prepends the local_path if needed. Because this is called on all - source lists, this allows wildcard characters to be specified in lists - of sources for extension modules and libraries and scripts and allows - path-names be relative to the source directory. - - """ - include_non_existing = kws.get('include_non_existing', True) - return gpaths(paths, - local_path = self.local_path, - include_non_existing=include_non_existing) - - def _fix_paths_dict(self, kw): - for k in kw.keys(): - v = kw[k] - if k in ['sources', 'depends', 'include_dirs', 'library_dirs', - 'module_dirs', 'extra_objects']: - new_v = self.paths(v) - kw[k] = new_v - - def add_extension(self,name,sources,**kw): - """Add extension to configuration. - - Create and add an Extension instance to the ext_modules list. This - method also takes the following optional keyword arguments that are - passed on to the Extension constructor. - - Parameters - ---------- - name : str - name of the extension - sources : seq - list of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - include_dirs : - define_macros : - undef_macros : - library_dirs : - libraries : - runtime_library_dirs : - extra_objects : - extra_compile_args : - extra_link_args : - extra_f77_compile_args : - extra_f90_compile_args : - export_symbols : - swig_opts : - depends : - The depends list contains paths to files or directories that the - sources of the extension module depend on. If any path in the - depends list is newer than the extension module, then the module - will be rebuilt. - language : - f2py_options : - module_dirs : - extra_info : dict or list - dict or list of dict of keywords to be appended to keywords. - - Notes - ----- - The self.paths(...) method is applied to all lists that may contain - paths. - """ - ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name, name) - ext_args['sources'] = sources - - if 'extra_info' in ext_args: - extra_info = ext_args['extra_info'] - del ext_args['extra_info'] - if isinstance(extra_info, dict): - extra_info = [extra_info] - for info in extra_info: - assert isinstance(info, dict), repr(info) - dict_append(ext_args,**info) - - self._fix_paths_dict(ext_args) - - # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries', []) - libnames = [] - ext_args['libraries'] = [] - for libname in libraries: - if isinstance(libname, tuple): - self._fix_paths_dict(libname[1]) - - # Handle library names of the form libname@relative/path/to/library - if '@' in libname: - lname, lpath = libname.split('@', 1) - lpath = os.path.abspath(njoin(self.local_path, lpath)) - if os.path.isdir(lpath): - c = self.get_subpackage(None, lpath, - caller_level = 2) - if isinstance(c, Configuration): - c = c.todict() - for l in [l[0] for l in c.get('libraries', [])]: - llname = l.split('__OF__', 1)[0] - if llname == lname: - c.pop('name', None) - dict_append(ext_args,**c) - break - continue - libnames.append(libname) - - ext_args['libraries'] = libnames + ext_args['libraries'] - ext_args['define_macros'] = \ - self.define_macros + ext_args.get('define_macros', []) - - from numpy.distutils.core import Extension - ext = Extension(**ext_args) - self.ext_modules.append(ext) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add an extension '+name) - return ext - - def add_library(self,name,sources,**build_info): - """ - Add library to configuration. - - Parameters - ---------- - name : str - Name of the extension. - sources : sequence - List of the sources. The list of sources may contain functions - (called source generators) which must take an extension instance - and a build directory as inputs and return a source file or list of - source files or None. If None is returned then no sources are - generated. If the Extension instance has no sources after - processing all source generators, then no extension module is - built. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - """ - self._add_library(name, sources, None, build_info) - - dist = self.get_distribution() - if dist is not None: - self.warn('distutils distribution has been initialized,'\ - ' it may be too late to add a library '+ name) - - def _add_library(self, name, sources, install_dir, build_info): - """Common implementation for add_library and add_installed_library. Do - not use directly""" - build_info = copy.copy(build_info) - build_info['sources'] = sources - - # Sometimes, depends is not set up to an empty list by default, and if - # depends is not given to add_library, distutils barfs (#1134) - if not 'depends' in build_info: - build_info['depends'] = [] - - self._fix_paths_dict(build_info) - - # Add to libraries list so that it is build with build_clib - self.libraries.append((name, build_info)) - - def add_installed_library(self, name, sources, install_dir, build_info=None): - """ - Similar to add_library, but the specified library is installed. - - Most C libraries used with ``distutils`` are only used to build python - extensions, but libraries built through this method will be installed - so that they can be reused by third-party packages. - - Parameters - ---------- - name : str - Name of the installed library. - sources : sequence - List of the library's source files. See `add_library` for details. - install_dir : str - Path to install the library, relative to the current sub-package. - build_info : dict, optional - The following keys are allowed: - - * depends - * macros - * include_dirs - * extra_compiler_args - * extra_f77_compile_args - * extra_f90_compile_args - * f2py_options - * language - - Returns - ------- - None - - See Also - -------- - add_library, add_npy_pkg_config, get_info - - Notes - ----- - The best way to encode the options required to link against the specified - C libraries is to use a "libname.ini" file, and use `get_info` to - retrieve the required options (see `add_npy_pkg_config` for more - information). - - """ - if not build_info: - build_info = {} - - install_dir = os.path.join(self.package_path, install_dir) - self._add_library(name, sources, install_dir, build_info) - self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) - - def add_npy_pkg_config(self, template, install_dir, subst_dict=None): - """ - Generate and install a npy-pkg config file from a template. - - The config file generated from `template` is installed in the - given install directory, using `subst_dict` for variable substitution. - - Parameters - ---------- - template : str - The path of the template, relatively to the current package path. - install_dir : str - Where to install the npy-pkg config file, relatively to the current - package path. - subst_dict : dict, optional - If given, any string of the form ``@key@`` will be replaced by - ``subst_dict[key]`` in the template file when installed. The install - prefix is always available through the variable ``@prefix@``, since the - install prefix is not easy to get reliably from setup.py. - - See also - -------- - add_installed_library, get_info - - Notes - ----- - This works for both standard installs and in-place builds, i.e. the - ``@prefix@`` refer to the source directory for in-place builds. - - Examples - -------- - :: - - config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) - - Assuming the foo.ini.in file has the following content:: - - [meta] - Name=@foo@ - Version=1.0 - Description=dummy description - - [default] - Cflags=-I@prefix@/include - Libs= - - The generated file will have the following content:: - - [meta] - Name=bar - Version=1.0 - Description=dummy description - - [default] - Cflags=-Iprefix_dir/include - Libs= - - and will be installed as foo.ini in the 'lib' subpath. - - When cross-compiling with numpy distutils, it might be necessary to - use modified npy-pkg-config files. Using the default/generated files - will link with the host libraries (i.e. libnpymath.a). For - cross-compilation you of-course need to link with target libraries, - while using the host Python installation. - - You can copy out the numpy/_core/lib/npy-pkg-config directory, add a - pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment - variable to point to the directory with the modified npy-pkg-config - files. - - Example npymath.ini modified for cross-compilation:: - - [meta] - Name=npymath - Description=Portable, core math library implementing C99 standard - Version=0.1 - - [variables] - pkgname=numpy._core - pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/_core - prefix=${pkgdir} - libdir=${prefix}/lib - includedir=${prefix}/include - - [default] - Libs=-L${libdir} -lnpymath - Cflags=-I${includedir} - Requires=mlib - - [msvc] - Libs=/LIBPATH:${libdir} npymath.lib - Cflags=/INCLUDE:${includedir} - Requires=mlib - - """ - if subst_dict is None: - subst_dict = {} - template = os.path.join(self.package_path, template) - - if self.name in self.installed_pkg_config: - self.installed_pkg_config[self.name].append((template, install_dir, - subst_dict)) - else: - self.installed_pkg_config[self.name] = [(template, install_dir, - subst_dict)] - - - def add_scripts(self,*files): - """Add scripts to configuration. - - Add the sequence of files to the beginning of the scripts list. - Scripts will be installed under the /bin/ directory. - - """ - scripts = self.paths(files) - dist = self.get_distribution() - if dist is not None: - if dist.scripts is None: - dist.scripts = [] - dist.scripts.extend(scripts) - else: - self.scripts.extend(scripts) - - def dict_append(self,**dict): - for key in self.list_keys: - a = getattr(self, key) - a.extend(dict.get(key, [])) - for key in self.dict_keys: - a = getattr(self, key) - a.update(dict.get(key, {})) - known_keys = self.list_keys + self.dict_keys + self.extra_keys - for key in dict.keys(): - if key not in known_keys: - a = getattr(self, key, None) - if a and a==dict[key]: continue - self.warn('Inheriting attribute %r=%r from %r' \ - % (key, dict[key], dict.get('name', '?'))) - setattr(self, key, dict[key]) - self.extra_keys.append(key) - elif key in self.extra_keys: - self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self, key), dict[key])) - elif key in known_keys: - # key is already processed above - pass - else: - raise ValueError("Don't know about key=%r" % (key)) - - def __str__(self): - from pprint import pformat - known_keys = self.list_keys + self.dict_keys + self.extra_keys - s = '<'+5*'-' + '\n' - s += 'Configuration of '+self.name+':\n' - known_keys.sort() - for k in known_keys: - a = getattr(self, k, None) - if a: - s += '%s = %s\n' % (k, pformat(a)) - s += 5*'-' + '>' - return s - - def get_config_cmd(self): - """ - Returns the numpy.distutils config command instance. - """ - cmd = get_cmd('config') - cmd.ensure_finalized() - cmd.dump_source = 0 - cmd.noisy = 0 - old_path = os.environ.get('PATH') - if old_path: - path = os.pathsep.join(['.', old_path]) - os.environ['PATH'] = path - return cmd - - def get_build_temp_dir(self): - """ - Return a path to a temporary directory where temporary files should be - placed. - """ - cmd = get_cmd('build') - cmd.ensure_finalized() - return cmd.build_temp - - def have_f77c(self): - """Check for availability of Fortran 77 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 77 compiler is available (because a simple Fortran 77 - code was able to be compiled successfully). - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') - return flag - - def have_f90c(self): - """Check for availability of Fortran 90 compiler. - - Use it inside source generating function to ensure that - setup distribution instance has been initialized. - - Notes - ----- - True if a Fortran 90 compiler is available (because a simple Fortran - 90 code was able to be compiled successfully) - """ - simple_fortran_subroutine = ''' - subroutine simple - end - ''' - config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') - return flag - - def append_to(self, extlib): - """Append libraries, include_dirs to extension or library item. - """ - if is_sequence(extlib): - lib_name, build_info = extlib - dict_append(build_info, - libraries=self.libraries, - include_dirs=self.include_dirs) - else: - from numpy.distutils.core import Extension - assert isinstance(extlib, Extension), repr(extlib) - extlib.libraries.extend(self.libraries) - extlib.include_dirs.extend(self.include_dirs) - - def _get_svn_revision(self, path): - """Return path's SVN revision number. - """ - try: - output = subprocess.check_output(['svnversion'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): - entries = njoin(path, '_svn', 'entries') - else: - entries = njoin(path, '.svn', 'entries') - if os.path.isfile(entries): - with open(entries) as f: - fstr = f.read() - if fstr[:5] == '\d+)"', fstr) - if m: - return int(m.group('revision')) - else: # non-xml entries file --- check to be sure that - m = re.search(r'dir[\n\r]+(?P\d+)', fstr) - if m: - return int(m.group('revision')) - return None - - def _get_hg_revision(self, path): - """Return path's Mercurial revision number. - """ - try: - output = subprocess.check_output( - ['hg', 'identify', '--num'], cwd=path) - except (subprocess.CalledProcessError, OSError): - pass - else: - m = re.match(rb'(?P\d+)', output) - if m: - return int(m.group('revision')) - - branch_fn = njoin(path, '.hg', 'branch') - branch_cache_fn = njoin(path, '.hg', 'branch.cache') - - if os.path.isfile(branch_fn): - branch0 = None - with open(branch_fn) as f: - revision0 = f.read().strip() - - branch_map = {} - with open(branch_cache_fn) as f: - for line in f: - branch1, revision1 = line.split()[:2] - if revision1==revision0: - branch0 = branch1 - try: - revision1 = int(revision1) - except ValueError: - continue - branch_map[branch1] = revision1 - - return branch_map.get(branch0) - - return None - - - def get_version(self, version_file=None, version_variable=None): - """Try to get version string of a package. - - Return a version string of the current package or None if the version - information could not be detected. - - Notes - ----- - This method scans files named - __version__.py, _version.py, version.py, and - __svn_version__.py for string variables version, __version__, and - _version, until a version number is found. - """ - version = getattr(self, 'version', None) - if version is not None: - return version - - # Get version from version file. - if version_file is None: - files = ['__version__.py', - self.name.split('.')[-1]+'_version.py', - 'version.py', - '__svn_version__.py', - '__hg_version__.py'] - else: - files = [version_file] - if version_variable is None: - version_vars = ['version', - '__version__', - self.name.split('.')[-1]+'_version'] - else: - version_vars = [version_variable] - for f in files: - fn = njoin(self.local_path, f) - if os.path.isfile(fn): - info = ('.py', 'U', 1) - name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name, name) - try: - version_module = exec_mod_from_location( - '_'.join(n.split('.')), fn) - except ImportError as e: - self.warn(str(e)) - version_module = None - if version_module is None: - continue - - for a in version_vars: - version = getattr(version_module, a, None) - if version is not None: - break - - # Try if versioneer module - try: - version = version_module.get_versions()['version'] - except AttributeError: - pass - - if version is not None: - break - - if version is not None: - self.version = version - return version - - # Get version as SVN or Mercurial revision number - revision = self._get_svn_revision(self.local_path) - if revision is None: - revision = self._get_hg_revision(self.local_path) - - if revision is not None: - version = str(revision) - self.version = version - - return version - - def make_svn_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __svn_version__.py file to the current package directory. - - Generate package __svn_version__.py file from SVN revision number, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __svn_version__.py existed before, nothing is done. - - This is - intended for working with source directories that are in an SVN - repository. - """ - target = njoin(self.local_path, '__svn_version__.py') - revision = self._get_svn_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_svn_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_svn_version_py())) - - def make_hg_version_py(self, delete=True): - """Appends a data function to the data_files list that will generate - __hg_version__.py file to the current package directory. - - Generate package __hg_version__.py file from Mercurial revision, - it will be removed after python exits but will be available - when sdist, etc commands are executed. - - Notes - ----- - If __hg_version__.py existed before, nothing is done. - - This is intended for working with source directories that are - in a Mercurial repository. - """ - target = njoin(self.local_path, '__hg_version__.py') - revision = self._get_hg_revision(self.local_path) - if os.path.isfile(target) or revision is None: - return - else: - def generate_hg_version_py(): - if not os.path.isfile(target): - version = str(revision) - self.info('Creating %s (version=%r)' % (target, version)) - with open(target, 'w') as f: - f.write('version = %r\n' % (version)) - - def rm_file(f=target,p=self.info): - if delete: - try: os.remove(f); p('removed '+f) - except OSError: pass - try: os.remove(f+'c'); p('removed '+f+'c') - except OSError: pass - - atexit.register(rm_file) - - return target - - self.add_data_files(('', generate_hg_version_py())) - - def make_config_py(self,name='__config__'): - """Generate package __config__.py file containing system_info - information used during building the package. - - This file is installed to the - package installation directory. - - """ - self.py_modules.append((self.name, name, generate_config_py)) - - def get_info(self,*names): - """Get resources information. - - Return information (from system_info.get_info) for all of the names in - the argument list in a single dictionary. - """ - from .system_info import get_info, dict_append - info_dict = {} - for a in names: - dict_append(info_dict,**get_info(a)) - return info_dict - - -def get_cmd(cmdname, _cache={}): - if cmdname not in _cache: - import distutils.core - dist = distutils.core._setup_distribution - if dist is None: - from distutils.errors import DistutilsInternalError - raise DistutilsInternalError( - 'setup distribution instance not initialized') - cmd = dist.get_command_obj(cmdname) - _cache[cmdname] = cmd - return _cache[cmdname] - -def get_numpy_include_dirs(): - # numpy_include_dirs are set by numpy/_core/setup.py, otherwise [] - include_dirs = Configuration.numpy_include_dirs[:] - if not include_dirs: - import numpy - include_dirs = [ numpy.get_include() ] - # else running numpy/_core/setup.py - return include_dirs - -def get_npy_pkg_dir(): - """Return the path where to find the npy-pkg-config directory. - - If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that - is returned. Otherwise, a path inside the location of the numpy module is - returned. - - The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining - customized npy-pkg-config .ini files for the cross-compilation - environment, and using them when cross-compiling. - - """ - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d is not None: - return d - spec = importlib.util.find_spec('numpy') - d = os.path.join(os.path.dirname(spec.origin), - '_core', 'lib', 'npy-pkg-config') - return d - -def get_pkg_info(pkgname, dirs=None): - """ - Return library info for the given package. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_info - - """ - from numpy.distutils.npy_pkg_config import read_config - - if dirs: - dirs.append(get_npy_pkg_dir()) - else: - dirs = [get_npy_pkg_dir()] - return read_config(pkgname, dirs) - -def get_info(pkgname, dirs=None): - """ - Return an info dict for a given C library. - - The info dict contains the necessary options to use the C library. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of additional directories where to look - for npy-pkg-config files. Those directories are searched prior to the - NumPy directory. - - Returns - ------- - info : dict - The dictionary with build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - Configuration.add_npy_pkg_config, Configuration.add_installed_library, - get_pkg_info - - Examples - -------- - To get the necessary information for the npymath library from NumPy: - - >>> npymath_info = np.distutils.misc_util.get_info('npymath') - >>> npymath_info #doctest: +SKIP - {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': - ['.../numpy/_core/lib'], 'include_dirs': ['.../numpy/_core/include']} - - This info dict can then be used as input to a `Configuration` instance:: - - config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) - - """ - from numpy.distutils.npy_pkg_config import parse_flags - pkg_info = get_pkg_info(pkgname, dirs) - - # Translate LibraryInfo instance into a build_info dict - info = parse_flags(pkg_info.cflags()) - for k, v in parse_flags(pkg_info.libs()).items(): - info[k].extend(v) - - # add_extension extra_info argument is ANAL - info['define_macros'] = info['macros'] - del info['macros'] - del info['ignored'] - - return info - -def is_bootstrapping(): - import builtins - - try: - builtins.__NUMPY_SETUP__ - return True - except AttributeError: - return False - - -######################### - -def default_config_dict(name = None, parent_name = None, local_path=None): - """Return a configuration dictionary for usage in - configuration() function defined in file setup_.py. - """ - import warnings - warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ - 'deprecated default_config_dict(%r,%r,%r)' - % (name, parent_name, local_path, - name, parent_name, local_path, - ), stacklevel=2) - c = Configuration(name, parent_name, local_path) - return c.todict() - - -def dict_append(d, **kws): - for k, v in kws.items(): - if k in d: - ov = d[k] - if isinstance(ov, str): - d[k] = v - else: - d[k].extend(v) - else: - d[k] = v - -def appendpath(prefix, path): - if os.path.sep != '/': - prefix = prefix.replace('/', os.path.sep) - path = path.replace('/', os.path.sep) - drive = '' - if os.path.isabs(path): - drive = os.path.splitdrive(prefix)[0] - absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] - pathdrive, path = os.path.splitdrive(path) - d = os.path.commonprefix([absprefix, path]) - if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ - or os.path.join(path[:len(d)], path[len(d):]) != path: - # Handle invalid paths - d = os.path.dirname(d) - subpath = path[len(d):] - if os.path.isabs(subpath): - subpath = subpath[1:] - else: - subpath = path - return os.path.normpath(njoin(drive + prefix, subpath)) - -def generate_config_py(target): - """Generate config.py file containing system_info information - used during building the package. - - Usage: - config['py_modules'].append((packagename, '__config__',generate_config_py)) - """ - from numpy.distutils.system_info import system_info - from distutils.dir_util import mkpath - mkpath(os.path.dirname(target)) - with open(target, 'w') as f: - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') - - # For gfortran+msvc combination, extra shared libraries may exist - f.write(textwrap.dedent(""" - import os - import sys - - extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') - - if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): - os.add_dll_directory(extra_dll_dir) - - """)) - - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(textwrap.dedent(r''' - def get_info(name): - g = globals() - return g.get(name, g.get(name + "_info", {})) - - def show(): - """ - Show libraries in the system on which NumPy was built. - - Print information about various resources (libraries, library - directories, include directories, etc.) in the system on which - NumPy was built. - - See Also - -------- - get_include : Returns the directory containing NumPy C - header files. - - Notes - ----- - 1. Classes specifying the information to be printed are defined - in the `numpy.distutils.system_info` module. - - Information may include: - - * ``language``: language used to write the libraries (mostly - C or f77) - * ``libraries``: names of libraries found in the system - * ``library_dirs``: directories containing the libraries - * ``include_dirs``: directories containing library header files - * ``src_dirs``: directories containing library source files - * ``define_macros``: preprocessor macros used by - ``distutils.setup`` - * ``baseline``: minimum CPU features required - * ``found``: dispatched features supported in the system - * ``not found``: dispatched features that are not supported - in the system - - 2. NumPy BLAS/LAPACK Installation Notes - - Installing a numpy wheel (``pip install numpy`` or force it - via ``pip install numpy --only-binary :numpy: numpy``) includes - an OpenBLAS implementation of the BLAS and LAPACK linear algebra - APIs. In this case, ``library_dirs`` reports the original build - time configuration as compiled with gcc/gfortran; at run time - the OpenBLAS library is in - ``site-packages/numpy.libs/`` (linux), or - ``site-packages/numpy/.dylibs/`` (macOS), or - ``site-packages/numpy/.libs/`` (windows). - - Installing numpy from source - (``pip install numpy --no-binary numpy``) searches for BLAS and - LAPACK dynamic link libraries at build time as influenced by - environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and - NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; - or the optional file ``~/.numpy-site.cfg``. - NumPy remembers those locations and expects to load the same - libraries at run-time. - In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS - library) is in the default build-time search order after - 'openblas'. - - Examples - -------- - >>> import numpy as np - >>> np.show_config() - blas_opt_info: - language = c - define_macros = [('HAVE_CBLAS', None)] - libraries = ['openblas', 'openblas'] - library_dirs = ['/usr/local/lib'] - """ - from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ - ) - for name,info_dict in globals().items(): - if name[0] == "_" or type(info_dict) is not type({}): continue - print(name + ":") - if not info_dict: - print(" NOT AVAILABLE") - for k,v in info_dict.items(): - v = str(v) - if k == "sources" and len(v) > 200: - v = v[:60] + " ...\n... " + v[-60:] - print(" %s = %s" % (k,v)) - - features_found, features_not_found = [], [] - for feature in __cpu_dispatch__: - if __cpu_features__[feature]: - features_found.append(feature) - else: - features_not_found.append(feature) - - print("Supported SIMD extensions in this NumPy install:") - print(" baseline = %s" % (','.join(__cpu_baseline__))) - print(" found = %s" % (','.join(features_found))) - print(" not found = %s" % (','.join(features_not_found))) - - ''')) - - return target - -def msvc_version(compiler): - """Return version major and minor of compiler instance if it is - MSVC, raise an exception otherwise.""" - if not compiler.compiler_type == "msvc": - raise ValueError("Compiler instance is not msvc (%s)"\ - % compiler.compiler_type) - return compiler._MSVCCompiler__version - -def get_build_architecture(): - # Importing distutils.msvccompiler triggers a warning on non-Windows - # systems, so delay the import to here. - from distutils.msvccompiler import get_build_architecture - return get_build_architecture() - - -_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} - - -def sanitize_cxx_flags(cxxflags): - ''' - Some flags are valid for C but not C++. Prune them. - ''' - return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] - - -def exec_mod_from_location(modname, modfile): - ''' - Use importlib machinery to import a module `modname` from the file - `modfile`. Depending on the `spec.loader`, the module may not be - registered in sys.modules. - ''' - spec = importlib.util.spec_from_file_location(modname, modfile) - foo = importlib.util.module_from_spec(spec) - spec.loader.exec_module(foo) - return foo diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py deleted file mode 100644 index 68239495d6c7..000000000000 --- a/numpy/distutils/msvc9compiler.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if not old: - return new - if new in old: - return old - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self, plat_name=None): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib') - environ_include = os.getenv('include') - _MSVCCompiler.initialize(self, plat_name) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): - ld_args.append('/MANIFEST') - _MSVCCompiler.manifest_setup_ldargs(self, output_filename, - build_temp, ld_args) diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py deleted file mode 100644 index 2b93221baac8..000000000000 --- a/numpy/distutils/msvccompiler.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler - -from .system_info import platform_bits - - -def _merge(old, new): - """Concatenate two environment paths avoiding repeats. - - Here `old` is the environment string before the base class initialize - function is called and `new` is the string after the call. The new string - will be a fixed string if it is not obtained from the current environment, - or the same as the old string if obtained from the same environment. The aim - here is not to append the new string if it is already contained in the old - string so as to limit the growth of the environment string. - - Parameters - ---------- - old : string - Previous environment string. - new : string - New environment string. - - Returns - ------- - ret : string - Updated environment string. - - """ - if new in old: - return old - if not old: - return new - - # Neither new nor old is empty. Give old priority. - return ';'.join([old, new]) - - -class MSVCCompiler(_MSVCCompiler): - def __init__(self, verbose=0, dry_run=0, force=0): - _MSVCCompiler.__init__(self, verbose, dry_run, force) - - def initialize(self): - # The 'lib' and 'include' variables may be overwritten - # by MSVCCompiler.initialize, so save them for later merge. - environ_lib = os.getenv('lib', '') - environ_include = os.getenv('include', '') - _MSVCCompiler.initialize(self) - - # Merge current and previous values of 'lib' and 'include' - os.environ['lib'] = _merge(environ_lib, os.environ['lib']) - os.environ['include'] = _merge(environ_include, os.environ['include']) - - # msvc9 building for 32 bits requires SSE2 to work around a - # compiler bug. - if platform_bits == 32: - self.compile_options += ['/arch:SSE2'] - self.compile_options_debug += ['/arch:SSE2'] - - -def lib_opts_if_msvc(build_cmd): - """ Add flags if we are using MSVC compiler - - We can't see `build_cmd` in our scope, because we have not initialized - the distutils build command, so use this deferred calculation to run - when we are building the library. - """ - if build_cmd.compiler.compiler_type != 'msvc': - return [] - # Explicitly disable whole-program optimization. - flags = ['/GL-'] - # Disable voltbl section for vc142 to allow link using mingw-w64; see: - # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 - if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): - flags.append('-d2VolatileMetadata-') - return flags diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py deleted file mode 100644 index 14e8791b14cd..000000000000 --- a/numpy/distutils/npy_pkg_config.py +++ /dev/null @@ -1,441 +0,0 @@ -import sys -import re -import os - -from configparser import RawConfigParser - -__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', - 'read_config', 'parse_flags'] - -_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') - -class FormatError(OSError): - """ - Exception thrown when there is a problem parsing a configuration file. - - """ - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -class PkgNotFound(OSError): - """Exception raised when a package can not be located.""" - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - -def parse_flags(line): - """ - Parse a line from a config file containing compile flags. - - Parameters - ---------- - line : str - A single line containing one or more compile flags. - - Returns - ------- - d : dict - Dictionary of parsed flags, split into relevant categories. - These categories are the keys of `d`: - - * 'include_dirs' - * 'library_dirs' - * 'libraries' - * 'macros' - * 'ignored' - - """ - d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], - 'macros': [], 'ignored': []} - - flags = (' ' + line).split(' -') - for flag in flags: - flag = '-' + flag - if len(flag) > 0: - if flag.startswith('-I'): - d['include_dirs'].append(flag[2:].strip()) - elif flag.startswith('-L'): - d['library_dirs'].append(flag[2:].strip()) - elif flag.startswith('-l'): - d['libraries'].append(flag[2:].strip()) - elif flag.startswith('-D'): - d['macros'].append(flag[2:].strip()) - else: - d['ignored'].append(flag) - - return d - -def _escape_backslash(val): - return val.replace('\\', '\\\\') - -class LibraryInfo: - """ - Object containing build information about a library. - - Parameters - ---------- - name : str - The library name. - description : str - Description of the library. - version : str - Version string. - sections : dict - The sections of the configuration file for the library. The keys are - the section headers, the values the text under each header. - vars : class instance - A `VariableSet` instance, which contains ``(name, value)`` pairs for - variables defined in the configuration file for the library. - requires : sequence, optional - The required libraries for the library to be installed. - - Notes - ----- - All input parameters (except "sections" which is a method) are available as - attributes of the same name. - - """ - def __init__(self, name, description, version, sections, vars, requires=None): - self.name = name - self.description = description - if requires: - self.requires = requires - else: - self.requires = [] - self.version = version - self._sections = sections - self.vars = vars - - def sections(self): - """ - Return the section headers of the config file. - - Parameters - ---------- - None - - Returns - ------- - keys : list of str - The list of section headers. - - """ - return list(self._sections.keys()) - - def cflags(self, section="default"): - val = self.vars.interpolate(self._sections[section]['cflags']) - return _escape_backslash(val) - - def libs(self, section="default"): - val = self.vars.interpolate(self._sections[section]['libs']) - return _escape_backslash(val) - - def __str__(self): - m = ['Name: %s' % self.name, 'Description: %s' % self.description] - if self.requires: - m.append('Requires:') - else: - m.append('Requires: %s' % ",".join(self.requires)) - m.append('Version: %s' % self.version) - - return "\n".join(m) - -class VariableSet: - """ - Container object for the variables defined in a config file. - - `VariableSet` can be used as a plain dictionary, with the variable names - as keys. - - Parameters - ---------- - d : dict - Dict of items in the "variables" section of the configuration file. - - """ - def __init__(self, d): - self._raw_data = dict([(k, v) for k, v in d.items()]) - - self._re = {} - self._re_sub = {} - - self._init_parse() - - def _init_parse(self): - for k, v in self._raw_data.items(): - self._init_parse_var(k, v) - - def _init_parse_var(self, name, value): - self._re[name] = re.compile(r'\$\{%s\}' % name) - self._re_sub[name] = value - - def interpolate(self, value): - # Brute force: we keep interpolating until there is no '${var}' anymore - # or until interpolated string is equal to input string - def _interpolate(value): - for k in self._re.keys(): - value = self._re[k].sub(self._re_sub[k], value) - return value - while _VAR.search(value): - nvalue = _interpolate(value) - if nvalue == value: - break - value = nvalue - - return value - - def variables(self): - """ - Return the list of variable names. - - Parameters - ---------- - None - - Returns - ------- - names : list of str - The names of all variables in the `VariableSet` instance. - - """ - return list(self._raw_data.keys()) - - # Emulate a dict to set/get variables values - def __getitem__(self, name): - return self._raw_data[name] - - def __setitem__(self, name, value): - self._raw_data[name] = value - self._init_parse_var(name, value) - -def parse_meta(config): - if not config.has_section('meta'): - raise FormatError("No meta section found !") - - d = dict(config.items('meta')) - - for k in ['name', 'description', 'version']: - if not k in d: - raise FormatError("Option %s (section [meta]) is mandatory, " - "but not found" % k) - - if not 'requires' in d: - d['requires'] = [] - - return d - -def parse_variables(config): - if not config.has_section('variables'): - raise FormatError("No variables section found !") - - d = {} - - for name, value in config.items("variables"): - d[name] = value - - return VariableSet(d) - -def parse_sections(config): - return meta_d, r - -def pkg_to_filename(pkg_name): - return "%s.ini" % pkg_name - -def parse_config(filename, dirs=None): - if dirs: - filenames = [os.path.join(d, filename) for d in dirs] - else: - filenames = [filename] - - config = RawConfigParser() - - n = config.read(filenames) - if not len(n) >= 1: - raise PkgNotFound("Could not find file(s) %s" % str(filenames)) - - # Parse meta and variables sections - meta = parse_meta(config) - - vars = {} - if config.has_section('variables'): - for name, value in config.items("variables"): - vars[name] = _escape_backslash(value) - - # Parse "normal" sections - secs = [s for s in config.sections() if not s in ['meta', 'variables']] - sections = {} - - requires = {} - for s in secs: - d = {} - if config.has_option(s, "requires"): - requires[s] = config.get(s, 'requires') - - for name, value in config.items(s): - d[name] = value - sections[s] = d - - return meta, vars, sections, requires - -def _read_config_imp(filenames, dirs=None): - def _read_config(f): - meta, vars, sections, reqs = parse_config(f, dirs) - # recursively add sections and variables of required libraries - for rname, rvalue in reqs.items(): - nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) - - # Update var dict for variables not in 'top' config file - for k, v in nvars.items(): - if not k in vars: - vars[k] = v - - # Update sec dict - for oname, ovalue in nsections[rname].items(): - if ovalue: - sections[rname][oname] += ' %s' % ovalue - - return meta, vars, sections, reqs - - meta, vars, sections, reqs = _read_config(filenames) - - # FIXME: document this. If pkgname is defined in the variables section, and - # there is no pkgdir variable defined, pkgdir is automatically defined to - # the path of pkgname. This requires the package to be imported to work - if not 'pkgdir' in vars and "pkgname" in vars: - pkgname = vars["pkgname"] - if not pkgname in sys.modules: - raise ValueError("You should import %s to get information on %s" % - (pkgname, meta["name"])) - - mod = sys.modules[pkgname] - vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) - - return LibraryInfo(name=meta["name"], description=meta["description"], - version=meta["version"], sections=sections, vars=VariableSet(vars)) - -# Trivial cache to cache LibraryInfo instances creation. To be really -# efficient, the cache should be handled in read_config, since a same file can -# be parsed many time outside LibraryInfo creation, but I doubt this will be a -# problem in practice -_CACHE = {} -def read_config(pkgname, dirs=None): - """ - Return library info for a package from its configuration file. - - Parameters - ---------- - pkgname : str - Name of the package (should match the name of the .ini file, without - the extension, e.g. foo for the file foo.ini). - dirs : sequence, optional - If given, should be a sequence of directories - usually including - the NumPy base directory - where to look for npy-pkg-config files. - - Returns - ------- - pkginfo : class instance - The `LibraryInfo` instance containing the build information. - - Raises - ------ - PkgNotFound - If the package is not found. - - See Also - -------- - misc_util.get_info, misc_util.get_pkg_info - - Examples - -------- - >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') - >>> type(npymath_info) - - >>> print(npymath_info) - Name: npymath - Description: Portable, core math library implementing C99 standard - Requires: - Version: 0.1 #random - - """ - try: - return _CACHE[pkgname] - except KeyError: - v = _read_config_imp(pkg_to_filename(pkgname), dirs) - _CACHE[pkgname] = v - return v - -# TODO: -# - implements version comparison (modversion + atleast) - -# pkg-config simple emulator - useful for debugging, and maybe later to query -# the system -if __name__ == '__main__': - from optparse import OptionParser - import glob - - parser = OptionParser() - parser.add_option("--cflags", dest="cflags", action="store_true", - help="output all preprocessor and compiler flags") - parser.add_option("--libs", dest="libs", action="store_true", - help="output all linker flags") - parser.add_option("--use-section", dest="section", - help="use this section instead of default for options") - parser.add_option("--version", dest="version", action="store_true", - help="output version") - parser.add_option("--atleast-version", dest="min_version", - help="Minimal version") - parser.add_option("--list-all", dest="list_all", action="store_true", - help="Minimal version") - parser.add_option("--define-variable", dest="define_variable", - help="Replace variable with the given value") - - (options, args) = parser.parse_args(sys.argv) - - if len(args) < 2: - raise ValueError("Expect package name on the command line:") - - if options.list_all: - files = glob.glob("*.ini") - for f in files: - info = read_config(f) - print("%s\t%s - %s" % (info.name, info.name, info.description)) - - pkg_name = args[1] - d = os.environ.get('NPY_PKG_CONFIG_PATH') - if d: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.', d] - ) - else: - info = read_config( - pkg_name, ['numpy/_core/lib/npy-pkg-config', '.'] - ) - - if options.section: - section = options.section - else: - section = "default" - - if options.define_variable: - m = re.search(r'([\S]+)=([\S]+)', options.define_variable) - if not m: - raise ValueError("--define-variable option should be of " - "the form --define-variable=foo=bar") - else: - name = m.group(1) - value = m.group(2) - info.vars[name] = value - - if options.cflags: - print(info.cflags(section)) - if options.libs: - print(info.libs(section)) - if options.version: - print(info.version) - if options.min_version: - print(info.version >= options.min_version) diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py deleted file mode 100644 index ea8182659cb1..000000000000 --- a/numpy/distutils/numpy_distribution.py +++ /dev/null @@ -1,17 +0,0 @@ -# XXX: Handle setuptools ? -from distutils.core import Distribution - -# This class is used because we add new files (sconscripts, and so on) with the -# scons command -class NumpyDistribution(Distribution): - def __init__(self, attrs = None): - # A list of (sconscripts, pre_hook, post_hook, src, parent_names) - self.scons_data = [] - # A list of installable libraries - self.installed_libraries = [] - # A dict of pkg_config files to generate/install - self.installed_pkg_config = {} - Distribution.__init__(self, attrs) - - def has_scons_scripts(self): - return bool(self.scons_data) diff --git a/numpy/distutils/pathccompiler.py b/numpy/distutils/pathccompiler.py deleted file mode 100644 index 1f879edf4d21..000000000000 --- a/numpy/distutils/pathccompiler.py +++ /dev/null @@ -1,21 +0,0 @@ -from distutils.unixccompiler import UnixCCompiler - -class PathScaleCCompiler(UnixCCompiler): - - """ - PathScale compiler compatible with a gcc built Python. - """ - - compiler_type = 'pathcc' - cc_exe = 'pathcc' - cxx_exe = 'pathCC' - - def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose, dry_run, force) - cc_compiler = self.cc_exe - cxx_compiler = self.cxx_exe - self.set_executables(compiler=cc_compiler, - compiler_so=cc_compiler, - compiler_cxx=cxx_compiler, - linker_exe=cc_compiler, - linker_so=cc_compiler + ' -shared') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py deleted file mode 100644 index e428b47f08d4..000000000000 --- a/numpy/distutils/system_info.py +++ /dev/null @@ -1,3267 +0,0 @@ -""" -This file defines a set of system_info classes for getting -information about various resources (libraries, library directories, -include directories, etc.) in the system. Usage: - info_dict = get_info() - where is a string 'atlas','x11','fftw','lapack','blas', - 'lapack_src', 'blas_src', etc. For a complete list of allowed names, - see the definition of get_info() function below. - - Returned info_dict is a dictionary which is compatible with - distutils.setup keyword arguments. If info_dict == {}, then the - asked resource is not available (system_info could not find it). - - Several *_info classes specify an environment variable to specify - the locations of software. When setting the corresponding environment - variable to 'None' then the software will be ignored, even when it - is available in system. - -Global parameters: - system_info.search_static_first - search static libraries (.a) - in precedence to shared ones (.so, .sl) if enabled. - system_info.verbosity - output the results to stdout if enabled. - -The file 'site.cfg' is looked for in - -1) Directory of main setup.py file being run. -2) Home directory of user running the setup.py file as ~/.numpy-site.cfg -3) System wide directory (location of this file...) - -The first one found is used to get system configuration options The -format is that used by ConfigParser (i.e., Windows .INI style). The -section ALL is not intended for general use. - -Appropriate defaults are used if nothing is specified. - -The order of finding the locations of resources is the following: - 1. environment variable - 2. section in site.cfg - 3. DEFAULT section in site.cfg - 4. System default search paths (see ``default_*`` variables below). -Only the first complete match is returned. - -Currently, the following classes are available, along with their section names: - - Numeric_info:Numeric - _numpy_info:Numeric - _pkg_config_info:None - accelerate_info:accelerate - accelerate_lapack_info:accelerate - agg2_info:agg2 - amd_info:amd - atlas_3_10_blas_info:atlas - atlas_3_10_blas_threads_info:atlas - atlas_3_10_info:atlas - atlas_3_10_threads_info:atlas - atlas_blas_info:atlas - atlas_blas_threads_info:atlas - atlas_info:atlas - atlas_threads_info:atlas - blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) - blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) - blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) - blas_info:blas - blas_mkl_info:mkl - blas_ssl2_info:ssl2 - blas_opt_info:ALL # usage recommended - blas_src_info:blas_src - blis_info:blis - boost_python_info:boost_python - dfftw_info:fftw - dfftw_threads_info:fftw - djbfft_info:djbfft - f2py_info:ALL - fft_opt_info:ALL - fftw2_info:fftw - fftw3_info:fftw3 - fftw_info:fftw - fftw_threads_info:fftw - flame_info:flame - freetype2_info:freetype2 - gdk_2_info:gdk_2 - gdk_info:gdk - gdk_pixbuf_2_info:gdk_pixbuf_2 - gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 - gdk_x11_2_info:gdk_x11_2 - gtkp_2_info:gtkp_2 - gtkp_x11_2_info:gtkp_x11_2 - lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) - lapack_atlas_3_10_info:atlas - lapack_atlas_3_10_threads_info:atlas - lapack_atlas_info:atlas - lapack_atlas_threads_info:atlas - lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) - lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) - lapack_info:lapack - lapack_mkl_info:mkl - lapack_ssl2_info:ssl2 - lapack_opt_info:ALL # usage recommended - lapack_src_info:lapack_src - mkl_info:mkl - ssl2_info:ssl2 - numarray_info:numarray - numerix_info:numerix - numpy_info:numpy - openblas64__info:openblas64_ - openblas64__lapack_info:openblas64_ - openblas_clapack_info:openblas - openblas_ilp64_info:openblas_ilp64 - openblas_ilp64_lapack_info:openblas_ilp64 - openblas_info:openblas - openblas_lapack_info:openblas - sfftw_info:fftw - sfftw_threads_info:fftw - system_info:ALL - umfpack_info:umfpack - wx_info:wx - x11_info:x11 - xft_info:xft - -Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER -and NPY_LAPACK_ORDER environment variables to determine the order in which -specific BLAS and LAPACK libraries are searched for. - -This search (or autodetection) can be bypassed by defining the environment -variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the -exact linker flags to use (language will be set to F77). Building against -Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK -implementations at runtime. If using this to build NumPy itself, it is -recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a -CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized -otherwise). - -Example: ----------- -[DEFAULT] -# default section -library_dirs = /usr/lib:/usr/local/lib:/opt/lib -include_dirs = /usr/include:/usr/local/include:/opt/include -src_dirs = /usr/local/src:/opt/src -# search static libraries (.a) in preference to shared ones (.so) -search_static_first = 0 - -[fftw] -libraries = rfftw, fftw - -[atlas] -library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas -# for overriding the names of the atlas libraries -libraries = lapack, f77blas, cblas, atlas - -[x11] -library_dirs = /usr/X11R6/lib -include_dirs = /usr/X11R6/include ----------- - -Note that the ``libraries`` key is the default setting for libraries. - -Authors: - Pearu Peterson , February 2002 - David M. Cooke , April 2002 - -Copyright 2002 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy (BSD style) license. See LICENSE.txt that came with -this distribution for specifics. - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -""" -import sys -import os -import re -import copy -import warnings -import subprocess -import textwrap - -from glob import glob -from functools import reduce -from configparser import NoOptionError -from configparser import RawConfigParser as ConfigParser -# It seems that some people are importing ConfigParser from here so is -# good to keep its class name. Use of RawConfigParser is needed in -# order to be able to load path names with percent in them, like -# `feature%2Fcool` which is common on git flow branch names. - -from distutils.errors import DistutilsError -from distutils.dist import Distribution -import sysconfig -from numpy.distutils import log -from distutils.util import get_platform - -from numpy.distutils.exec_command import ( - find_executable, filepath_from_subprocess_output, - ) -from numpy.distutils.misc_util import (is_sequence, is_string, - get_shared_lib_extension) -from numpy.distutils.command.config import config as cmd_config -from numpy.distutils import customized_ccompiler as _customized_ccompiler -from numpy.distutils import _shell_utils -import distutils.ccompiler -import tempfile -import shutil - -__all__ = ['system_info'] - -# Determine number of bits -import platform -_bits = {'32bit': 32, '64bit': 64} -platform_bits = _bits[platform.architecture()[0]] - - -global_compiler = None - -def customized_ccompiler(): - global global_compiler - if not global_compiler: - global_compiler = _customized_ccompiler() - return global_compiler - - -def _c_string_literal(s): - """ - Convert a python string into a literal suitable for inclusion into C code - """ - # only these three characters are forbidden in C strings - s = s.replace('\\', r'\\') - s = s.replace('"', r'\"') - s = s.replace('\n', r'\n') - return '"{}"'.format(s) - - -def libpaths(paths, bits): - """Return a list of library paths valid on 32 or 64 bit systems. - - Inputs: - paths : sequence - A sequence of strings (typically paths) - bits : int - An integer, the only valid values are 32 or 64. A ValueError exception - is raised otherwise. - - Examples: - - Consider a list of directories - >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - - For a 32-bit platform, this is already valid: - >>> np.distutils.system_info.libpaths(paths,32) - ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] - - On 64 bits, we prepend the '64' postfix - >>> np.distutils.system_info.libpaths(paths,64) - ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', - '/usr/lib64', '/usr/lib'] - """ - if bits not in (32, 64): - raise ValueError("Invalid bit size in libpaths: 32 or 64 only") - - # Handle 32bit case - if bits == 32: - return paths - - # Handle 64bit case - out = [] - for p in paths: - out.extend([p + '64', p]) - - return out - - -if sys.platform == 'win32': - default_lib_dirs = ['C:\\', - os.path.join(sysconfig.get_config_var('exec_prefix'), - 'libs')] - default_runtime_dirs = [] - default_include_dirs = [] - default_src_dirs = ['.'] - default_x11_lib_dirs = [] - default_x11_include_dirs = [] - _include_dirs = [ - 'include', - 'include/suitesparse', - ] - _lib_dirs = [ - 'lib', - ] - - _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] - _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] - def add_system_root(library_root): - """Add a package manager root to the include directories""" - global default_lib_dirs - global default_include_dirs - - library_root = os.path.normpath(library_root) - - default_lib_dirs.extend( - os.path.join(library_root, d) for d in _lib_dirs) - default_include_dirs.extend( - os.path.join(library_root, d) for d in _include_dirs) - - # VCpkg is the de-facto package manager on windows for C/C++ - # libraries. If it is on the PATH, then we append its paths here. - vcpkg = shutil.which('vcpkg') - if vcpkg: - vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture()[0] == '32bit': - specifier = 'x86' - else: - specifier = 'x64' - - vcpkg_installed = os.path.join(vcpkg_dir, 'installed') - for vcpkg_root in [ - os.path.join(vcpkg_installed, specifier + '-windows'), - os.path.join(vcpkg_installed, specifier + '-windows-static'), - ]: - add_system_root(vcpkg_root) - - # Conda is another popular package manager that provides libraries - conda = shutil.which('conda') - if conda: - conda_dir = os.path.dirname(conda) - add_system_root(os.path.join(conda_dir, '..', 'Library')) - add_system_root(os.path.join(conda_dir, 'Library')) - -else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'], platform_bits) - default_runtime_dirs = [] - default_include_dirs = ['/usr/local/include', - '/opt/include', - # path of umfpack under macports - '/opt/local/include/ufsparse', - '/opt/local/include', '/sw/include', - '/usr/include/suitesparse'] - default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] - - default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', - '/usr/lib'], platform_bits) - default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] - - if os.path.exists('/usr/lib/X11'): - globbed_x11_dir = glob('/usr/lib/*/libX11.so') - if globbed_x11_dir: - x11_so_dir = os.path.split(globbed_x11_dir[0])[0] - default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) - default_x11_include_dirs.extend(['/usr/lib/X11/include', - '/usr/include/X11']) - - with open(os.devnull, 'w') as tmp: - try: - p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, - stderr=tmp) - except (OSError, DistutilsError): - # OSError if gcc is not installed, or SandboxViolation (DistutilsError - # subclass) if an old setuptools bug is triggered (see gh-3160). - pass - else: - triplet = str(p.communicate()[0].decode().strip()) - if p.returncode == 0: - # gcc supports the "-print-multiarch" option - default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] - default_lib_dirs += [os.path.join("/usr/lib/", triplet)] - - -if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: - default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) - default_include_dirs.append(os.path.join(sys.prefix, 'include')) - default_src_dirs.append(os.path.join(sys.prefix, 'src')) - -default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] -default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] -default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] -default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] - -so_ext = get_shared_lib_extension() - - -def get_standard_file(fname): - """Returns a list of files named 'fname' from - 1) System-wide directory (directory-location of this module) - 2) Users HOME directory (os.environ['HOME']) - 3) Local directory - """ - # System-wide file - filenames = [] - try: - f = __file__ - except NameError: - f = sys.argv[0] - sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], - fname) - if os.path.isfile(sysfile): - filenames.append(sysfile) - - # Home directory - # And look for the user config file - try: - f = os.path.expanduser('~') - except KeyError: - pass - else: - user_file = os.path.join(f, fname) - if os.path.isfile(user_file): - filenames.append(user_file) - - # Local file - if os.path.isfile(fname): - filenames.append(os.path.abspath(fname)) - - return filenames - - -def _parse_env_order(base_order, env): - """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` - - This method will sequence the environment variable and check for their - individual elements in `base_order`. - - The items in the environment variable may be negated via '^item' or '!itema,itemb'. - It must start with ^/! to negate all options. - - Raises - ------ - ValueError: for mixed negated and non-negated orders or multiple negated orders - - Parameters - ---------- - base_order : list of str - the base list of orders - env : str - the environment variable to be parsed, if none is found, `base_order` is returned - - Returns - ------- - allow_order : list of str - allowed orders in lower-case - unknown_order : list of str - for values not overlapping with `base_order` - """ - order_str = os.environ.get(env, None) - - # ensure all base-orders are lower-case (for easier comparison) - base_order = [order.lower() for order in base_order] - if order_str is None: - return base_order, [] - - neg = order_str.startswith(('^', '!')) - # Check format - order_str_l = list(order_str) - sum_neg = order_str_l.count('^') + order_str_l.count('!') - if neg: - if sum_neg > 1: - raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") - # remove prefix - order_str = order_str[1:] - elif sum_neg > 0: - raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") - - # Split and lower case - orders = order_str.lower().split(',') - - # to inform callee about non-overlapping elements - unknown_order = [] - - # if negated, we have to remove from the order - if neg: - allow_order = base_order.copy() - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order in allow_order: - allow_order.remove(order) - - else: - allow_order = [] - - for order in orders: - if not order: - continue - - if order not in base_order: - unknown_order.append(order) - continue - - if order not in allow_order: - allow_order.append(order) - - return allow_order, unknown_order - - -def get_info(name, notfound_action=0): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'armpl': armpl_info, - 'blas_armpl': blas_armpl_info, - 'lapack_armpl': lapack_armpl_info, - 'fftw3_armpl': fftw3_armpl_info, - 'atlas': atlas_info, # use lapack_opt or blas_opt instead - 'atlas_threads': atlas_threads_info, # ditto - 'atlas_blas': atlas_blas_info, - 'atlas_blas_threads': atlas_blas_threads_info, - 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead - 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto - 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead - 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto - 'atlas_3_10_blas': atlas_3_10_blas_info, - 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, - 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead - 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto - 'flame': flame_info, # use lapack_opt instead - 'mkl': mkl_info, - 'ssl2': ssl2_info, - # openblas which may or may not have embedded lapack - 'openblas': openblas_info, # use blas_opt instead - # openblas with embedded lapack - 'openblas_lapack': openblas_lapack_info, # use blas_opt instead - 'openblas_clapack': openblas_clapack_info, # use blas_opt instead - 'blis': blis_info, # use blas_opt instead - 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead - 'blas_mkl': blas_mkl_info, # use blas_opt instead - 'lapack_ssl2': lapack_ssl2_info, - 'blas_ssl2': blas_ssl2_info, - 'accelerate': accelerate_info, # use blas_opt instead - 'accelerate_lapack': accelerate_lapack_info, - 'openblas64_': openblas64__info, - 'openblas64__lapack': openblas64__lapack_info, - 'openblas_ilp64': openblas_ilp64_info, - 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, - 'x11': x11_info, - 'fft_opt': fft_opt_info, - 'fftw': fftw_info, - 'fftw2': fftw2_info, - 'fftw3': fftw3_info, - 'dfftw': dfftw_info, - 'sfftw': sfftw_info, - 'fftw_threads': fftw_threads_info, - 'dfftw_threads': dfftw_threads_info, - 'sfftw_threads': sfftw_threads_info, - 'djbfft': djbfft_info, - 'blas': blas_info, # use blas_opt instead - 'lapack': lapack_info, # use lapack_opt instead - 'lapack_src': lapack_src_info, - 'blas_src': blas_src_info, - 'numpy': numpy_info, - 'f2py': f2py_info, - 'Numeric': Numeric_info, - 'numeric': Numeric_info, - 'numarray': numarray_info, - 'numerix': numerix_info, - 'lapack_opt': lapack_opt_info, - 'lapack_ilp64_opt': lapack_ilp64_opt_info, - 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, - 'lapack64__opt': lapack64__opt_info, - 'blas_opt': blas_opt_info, - 'blas_ilp64_opt': blas_ilp64_opt_info, - 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, - 'blas64__opt': blas64__opt_info, - 'boost_python': boost_python_info, - 'agg2': agg2_info, - 'wx': wx_info, - 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, - 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, - 'gdk_pixbuf_2': gdk_pixbuf_2_info, - 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, - 'gdk': gdk_info, - 'gdk_2': gdk_2_info, - 'gdk-2.0': gdk_2_info, - 'gdk_x11_2': gdk_x11_2_info, - 'gdk-x11-2.0': gdk_x11_2_info, - 'gtkp_x11_2': gtkp_x11_2_info, - 'gtk+-x11-2.0': gtkp_x11_2_info, - 'gtkp_2': gtkp_2_info, - 'gtk+-2.0': gtkp_2_info, - 'xft': xft_info, - 'freetype2': freetype2_info, - 'umfpack': umfpack_info, - 'amd': amd_info, - }.get(name.lower(), system_info) - return cl().get_info(notfound_action) - - -class NotFoundError(DistutilsError): - """Some third-party program or library is not found.""" - - -class AliasedOptionError(DistutilsError): - """ - Aliases entries in config files should not be existing. - In section '{section}' we found multiple appearances of options {options}.""" - - -class AtlasNotFoundError(NotFoundError): - """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [atlas]) or by setting - the ATLAS environment variable.""" - - -class FlameNotFoundError(NotFoundError): - """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [flame]).""" - - -class LapackNotFoundError(NotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [lapack]) or by setting - the LAPACK environment variable.""" - - -class LapackSrcNotFoundError(LapackNotFoundError): - """ - Lapack (http://www.netlib.org/lapack/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [lapack_src]) or by setting - the LAPACK_SRC environment variable.""" - - -class LapackILP64NotFoundError(NotFoundError): - """ - 64-bit Lapack libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasOptNotFoundError(NotFoundError): - """ - Optimized (vendor) Blas libraries are not found. - Falls back to netlib Blas library which has worse performance. - A better performance should be easily gained by switching - Blas library.""" - -class BlasNotFoundError(NotFoundError): - """ - Blas (http://www.netlib.org/blas/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [blas]) or by setting - the BLAS environment variable.""" - -class BlasILP64NotFoundError(NotFoundError): - """ - 64-bit Blas libraries not found. - Known libraries in numpy/distutils/site.cfg file are: - openblas64_, openblas_ilp64 - """ - -class BlasSrcNotFoundError(BlasNotFoundError): - """ - Blas (http://www.netlib.org/blas/) sources not found. - Directories to search for the sources can be specified in the - numpy/distutils/site.cfg file (section [blas_src]) or by setting - the BLAS_SRC environment variable.""" - - -class FFTWNotFoundError(NotFoundError): - """ - FFTW (http://www.fftw.org/) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [fftw]) or by setting - the FFTW environment variable.""" - - -class DJBFFTNotFoundError(NotFoundError): - """ - DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. - Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [djbfft]) or by setting - the DJBFFT environment variable.""" - - -class NumericNotFoundError(NotFoundError): - """ - Numeric (https://www.numpy.org/) module not found. - Get it from above location, install it, and retry setup.py.""" - - -class X11NotFoundError(NotFoundError): - """X11 libraries not found.""" - - -class UmfpackNotFoundError(NotFoundError): - """ - UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) - not found. Directories to search for the libraries can be specified in the - numpy/distutils/site.cfg file (section [umfpack]) or by setting - the UMFPACK environment variable.""" - - -class system_info: - - """ get_info() is the only public method. Don't use others. - """ - dir_env_var = None - # XXX: search_static_first is disabled by default, may disappear in - # future unless it is proved to be useful. - search_static_first = 0 - # The base-class section name is a random word "ALL" and is not really - # intended for general use. It cannot be None nor can it be DEFAULT as - # these break the ConfigParser. See gh-15338 - section = 'ALL' - saved_results = {} - - notfounderror = NotFoundError - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), - 'include_dirs': os.pathsep.join(default_include_dirs), - 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), - 'rpath': '', - 'src_dirs': os.pathsep.join(default_src_dirs), - 'search_static_first': str(self.search_static_first), - 'extra_compile_args': '', 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - self.files = [] - self.files.extend(get_standard_file('.numpy-site.cfg')) - self.files.extend(get_standard_file('site.cfg')) - self.parse_config_files() - - if self.section is not None: - self.search_static_first = self.cp.getboolean( - self.section, 'search_static_first') - assert isinstance(self.search_static_first, int) - - def parse_config_files(self): - self.cp.read(self.files) - if not self.cp.has_section(self.section): - if self.section is not None: - self.cp.add_section(self.section) - - def calc_libraries_info(self): - libs = self.get_libraries() - dirs = self.get_lib_dirs() - # The extensions use runtime_library_dirs - r_dirs = self.get_runtime_lib_dirs() - # Intrinsic distutils use rpath, we simply append both entries - # as though they were one entry - r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) - info = {} - for lib in libs: - i = self.check_libs(dirs, [lib]) - if i is not None: - dict_append(info, **i) - else: - log.info('Library %s was not found. Ignoring' % (lib)) - - if r_dirs: - i = self.check_libs(r_dirs, [lib]) - if i is not None: - # Swap library keywords found to runtime_library_dirs - # the libraries are insisting on the user having defined - # them using the library_dirs, and not necessarily by - # runtime_library_dirs - del i['libraries'] - i['runtime_library_dirs'] = i.pop('library_dirs') - dict_append(info, **i) - else: - log.info('Runtime library %s was not found. Ignoring' % (lib)) - - return info - - def set_info(self, **info): - if info: - lib_info = self.calc_libraries_info() - dict_append(info, **lib_info) - # Update extra information - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - self.saved_results[self.__class__.__name__] = info - - def get_option_single(self, *options): - """ Ensure that only one of `options` are found in the section - - Parameters - ---------- - *options : list of str - a list of options to be found in the section (``self.section``) - - Returns - ------- - str : - the option that is uniquely found in the section - - Raises - ------ - AliasedOptionError : - in case more than one of the options are found - """ - found = [self.cp.has_option(self.section, opt) for opt in options] - if sum(found) == 1: - return options[found.index(True)] - elif sum(found) == 0: - # nothing is found anyways - return options[0] - - # Else we have more than 1 key found - if AliasedOptionError.__doc__ is None: - raise AliasedOptionError() - raise AliasedOptionError(AliasedOptionError.__doc__.format( - section=self.section, options='[{}]'.format(', '.join(options)))) - - - def has_info(self): - return self.__class__.__name__ in self.saved_results - - def calc_extra_info(self): - """ Updates the information in the current information with - respect to these flags: - extra_compile_args - extra_link_args - """ - info = {} - for key in ['extra_compile_args', 'extra_link_args']: - # Get values - opt = self.cp.get(self.section, key) - opt = _shell_utils.NativeParser.split(opt) - if opt: - tmp = {key: opt} - dict_append(info, **tmp) - return info - - def get_info(self, notfound_action=0): - """ Return a dictionary with items that are compatible - with numpy.distutils.setup keyword arguments. - """ - flag = 0 - if not self.has_info(): - flag = 1 - log.info(self.__class__.__name__ + ':') - if hasattr(self, 'calc_info'): - self.calc_info() - if notfound_action: - if not self.has_info(): - if notfound_action == 1: - warnings.warn(self.notfounderror.__doc__, stacklevel=2) - elif notfound_action == 2: - raise self.notfounderror(self.notfounderror.__doc__) - else: - raise ValueError(repr(notfound_action)) - - if not self.has_info(): - log.info(' NOT AVAILABLE') - self.set_info() - else: - log.info(' FOUND:') - - res = self.saved_results.get(self.__class__.__name__) - if log.get_threshold() <= log.INFO and flag: - for k, v in res.items(): - v = str(v) - if k in ['sources', 'libraries'] and len(v) > 270: - v = v[:120] + '...\n...\n...' + v[-120:] - log.info(' %s = %s', k, v) - log.info('') - - return copy.deepcopy(res) - - def get_paths(self, section, key): - dirs = self.cp.get(section, key).split(os.pathsep) - env_var = self.dir_env_var - if env_var: - if is_sequence(env_var): - e0 = env_var[-1] - for e in env_var: - if e in os.environ: - e0 = e - break - if not env_var[0] == e0: - log.info('Setting %s=%s' % (env_var[0], e0)) - env_var = e0 - if env_var and env_var in os.environ: - d = os.environ[env_var] - if d == 'None': - log.info('Disabled %s: %s', - self.__class__.__name__, '(%s is None)' - % (env_var,)) - return [] - if os.path.isfile(d): - dirs = [os.path.dirname(d)] + dirs - l = getattr(self, '_lib_names', []) - if len(l) == 1: - b = os.path.basename(d) - b = os.path.splitext(b)[0] - if b[:3] == 'lib': - log.info('Replacing _lib_names[0]==%r with %r' \ - % (self._lib_names[0], b[3:])) - self._lib_names[0] = b[3:] - else: - ds = d.split(os.pathsep) - ds2 = [] - for d in ds: - if os.path.isdir(d): - ds2.append(d) - for dd in ['include', 'lib']: - d1 = os.path.join(d, dd) - if os.path.isdir(d1): - ds2.append(d1) - dirs = ds2 + dirs - default_dirs = self.cp.get(self.section, key).split(os.pathsep) - dirs.extend(default_dirs) - ret = [] - for d in dirs: - if len(d) > 0 and not os.path.isdir(d): - warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) - continue - - if d not in ret: - ret.append(d) - - log.debug('( %s = %s )', key, ':'.join(ret)) - return ret - - def get_lib_dirs(self, key='library_dirs'): - return self.get_paths(self.section, key) - - def get_runtime_lib_dirs(self, key='runtime_library_dirs'): - path = self.get_paths(self.section, key) - if path == ['']: - path = [] - return path - - def get_include_dirs(self, key='include_dirs'): - return self.get_paths(self.section, key) - - def get_src_dirs(self, key='src_dirs'): - return self.get_paths(self.section, key) - - def get_libs(self, key, default): - try: - libs = self.cp.get(self.section, key) - except NoOptionError: - if not default: - return [] - if is_string(default): - return [default] - return default - return [b for b in [a.strip() for a in libs.split(',')] if b] - - def get_libraries(self, key='libraries'): - if hasattr(self, '_lib_names'): - return self.get_libs(key, default=self._lib_names) - else: - return self.get_libs(key, '') - - def library_extensions(self): - c = customized_ccompiler() - static_exts = [] - if c.compiler_type != 'msvc': - # MSVC doesn't understand binutils - static_exts.append('.a') - if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC and others - if self.search_static_first: - exts = static_exts + [so_ext] - else: - exts = [so_ext] + static_exts - if sys.platform == 'cygwin': - exts.append('.dll.a') - if sys.platform == 'darwin': - exts.append('.dylib') - return exts - - def check_libs(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks for all libraries as shared libraries first, then - static (or vice versa if self.search_static_first is True). - """ - exts = self.library_extensions() - info = None - for ext in exts: - info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) - if info is not None: - break - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - return info - - def check_libs2(self, lib_dirs, libs, opt_libs=[]): - """If static or shared libraries are available then return - their info dictionary. - - Checks each library for shared or static. - """ - exts = self.library_extensions() - info = self._check_libs(lib_dirs, libs, opt_libs, exts) - if not info: - log.info(' libraries %s not found in %s', ','.join(libs), - lib_dirs) - - return info - - def _find_lib(self, lib_dir, lib, exts): - assert is_string(lib_dir) - # under windows first try without 'lib' prefix - if sys.platform == 'win32': - lib_prefixes = ['', 'lib'] - else: - lib_prefixes = ['lib'] - # for each library name, see if we can find a file for it. - for ext in exts: - for prefix in lib_prefixes: - p = self.combine_paths(lib_dir, prefix + lib + ext) - if p: - break - if p: - assert len(p) == 1 - # ??? splitext on p[0] would do this for cygwin - # doesn't seem correct - if ext == '.dll.a': - lib += '.dll' - if ext == '.lib': - lib = prefix + lib - return lib - - return False - - def _find_libs(self, lib_dirs, libs, exts): - # make sure we preserve the order of libs, as it can be important - found_dirs, found_libs = [], [] - for lib in libs: - for lib_dir in lib_dirs: - found_lib = self._find_lib(lib_dir, lib, exts) - if found_lib: - found_libs.append(found_lib) - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - break - return found_dirs, found_libs - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Find mandatory and optional libs in expected paths. - - Missing optional libraries are silently forgotten. - """ - if not is_sequence(lib_dirs): - lib_dirs = [lib_dirs] - # First, try to find the mandatory libraries - found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) - if len(found_libs) > 0 and len(found_libs) == len(libs): - # Now, check for optional libraries - opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) - found_libs.extend(opt_found_libs) - for lib_dir in opt_found_dirs: - if lib_dir not in found_dirs: - found_dirs.append(lib_dir) - info = {'libraries': found_libs, 'library_dirs': found_dirs} - return info - else: - return None - - def combine_paths(self, *args): - """Return a list of existing paths composed by all combinations - of items from the arguments. - """ - return combine_paths(*args) - - -class fft_opt_info(system_info): - - def calc_info(self): - info = {} - fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') - djbfft_info = get_info('djbfft') - if fftw_info: - dict_append(info, **fftw_info) - if djbfft_info: - dict_append(info, **djbfft_info) - self.set_info(**info) - return - - -class fftw_info(system_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - {'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]}] - - def calc_ver_info(self, ver_param): - """Returns True on successful version detection, else False""" - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - - opt = self.get_option_single(self.section + '_libs', 'libraries') - libs = self.get_libs(opt, ver_param['libs']) - info = self.check_libs(lib_dirs, libs) - if info is not None: - flag = 0 - for d in incl_dirs: - if len(self.combine_paths(d, ver_param['includes'])) \ - == len(ver_param['includes']): - dict_append(info, include_dirs=[d]) - flag = 1 - break - if flag: - dict_append(info, define_macros=ver_param['macros']) - else: - info = None - if info is not None: - self.set_info(**info) - return True - else: - log.info(' %s not found' % (ver_param['name'])) - return False - - def calc_info(self): - for i in self.ver_info: - if self.calc_ver_info(i): - break - - -class fftw2_info(fftw_info): - #variables to override - section = 'fftw' - dir_env_var = 'FFTW' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw2', - 'libs':['rfftw', 'fftw'], - 'includes':['fftw.h', 'rfftw.h'], - 'macros':[('SCIPY_FFTW_H', None)]} - ] - - -class fftw3_info(fftw_info): - #variables to override - section = 'fftw3' - dir_env_var = 'FFTW3' - notfounderror = FFTWNotFoundError - ver_info = [{'name':'fftw3', - 'libs':['fftw3'], - 'includes':['fftw3.h'], - 'macros':[('SCIPY_FFTW3_H', None)]}, - ] - - -class fftw3_armpl_info(fftw_info): - section = 'fftw3' - dir_env_var = 'ARMPL_DIR' - notfounderror = FFTWNotFoundError - ver_info = [{'name': 'fftw3', - 'libs': ['armpl_lp64_mp'], - 'includes': ['fftw3.h'], - 'macros': [('SCIPY_FFTW3_H', None)]}] - - -class dfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw', - 'libs':['drfftw', 'dfftw'], - 'includes':['dfftw.h', 'drfftw.h'], - 'macros':[('SCIPY_DFFTW_H', None)]}] - - -class sfftw_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw', - 'libs':['srfftw', 'sfftw'], - 'includes':['sfftw.h', 'srfftw.h'], - 'macros':[('SCIPY_SFFTW_H', None)]}] - - -class fftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'fftw threads', - 'libs':['rfftw_threads', 'fftw_threads'], - 'includes':['fftw_threads.h', 'rfftw_threads.h'], - 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] - - -class dfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'dfftw threads', - 'libs':['drfftw_threads', 'dfftw_threads'], - 'includes':['dfftw_threads.h', 'drfftw_threads.h'], - 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] - - -class sfftw_threads_info(fftw_info): - section = 'fftw' - dir_env_var = 'FFTW' - ver_info = [{'name':'sfftw threads', - 'libs':['srfftw_threads', 'sfftw_threads'], - 'includes':['sfftw_threads.h', 'srfftw_threads.h'], - 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] - - -class djbfft_info(system_info): - section = 'djbfft' - dir_env_var = 'DJBFFT' - notfounderror = DJBFFTNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - info = None - for d in lib_dirs: - p = self.combine_paths(d, ['djbfft.a']) - if p: - info = {'extra_objects': p} - break - p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) - if p: - info = {'libraries': ['djbfft'], 'library_dirs': [d]} - break - if info is None: - return - for d in incl_dirs: - if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: - dict_append(info, include_dirs=[d], - define_macros=[('SCIPY_DJBFFT_H', None)]) - self.set_info(**info) - return - return - - -class mkl_info(system_info): - section = 'mkl' - dir_env_var = 'MKLROOT' - _lib_mkl = ['mkl_rt'] - - def get_mkl_rootdir(self): - mklroot = os.environ.get('MKLROOT', None) - if mklroot is not None: - return mklroot - paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) - ld_so_conf = '/etc/ld.so.conf' - if os.path.isfile(ld_so_conf): - with open(ld_so_conf) as f: - for d in f: - d = d.strip() - if d: - paths.append(d) - intel_mkl_dirs = [] - for path in paths: - path_atoms = path.split(os.sep) - for m in path_atoms: - if m.startswith('mkl'): - d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) - intel_mkl_dirs.append(d) - break - for d in paths: - dirs = glob(os.path.join(d, 'mkl', '*')) - dirs += glob(os.path.join(d, 'mkl*')) - for sub_dir in dirs: - if os.path.isdir(os.path.join(sub_dir, 'lib')): - return sub_dir - return None - - def __init__(self): - mklroot = self.get_mkl_rootdir() - if mklroot is None: - system_info.__init__(self) - else: - from .cpuinfo import cpu - if cpu.is_Itanium(): - plt = '64' - elif cpu.is_Intel() and cpu.is_64bit(): - plt = 'intel64' - else: - plt = '32' - system_info.__init__( - self, - default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], - default_include_dirs=[os.path.join(mklroot, 'include')]) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - opt = self.get_option_single('mkl_libs', 'libraries') - mkl_libs = self.get_libs(opt, self._lib_mkl) - info = self.check_libs2(lib_dirs, mkl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - if sys.platform == 'win32': - pass # win32 has no pthread library - else: - dict_append(info, libraries=['pthread']) - self.set_info(**info) - - -class lapack_mkl_info(mkl_info): - pass - - -class blas_mkl_info(mkl_info): - pass - - -class ssl2_info(system_info): - section = 'ssl2' - dir_env_var = 'SSL2_DIR' - # Multi-threaded version. Python itself must be built by Fujitsu compiler. - _lib_ssl2 = ['fjlapackexsve'] - # Single-threaded version - #_lib_ssl2 = ['fjlapacksve'] - - def get_tcsds_rootdir(self): - tcsdsroot = os.environ.get('TCSDS_PATH', None) - if tcsdsroot is not None: - return tcsdsroot - return None - - def __init__(self): - tcsdsroot = self.get_tcsds_rootdir() - if tcsdsroot is None: - system_info.__init__(self) - else: - system_info.__init__( - self, - default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], - default_include_dirs=[os.path.join(tcsdsroot, - 'clang-comp/include')]) - - def calc_info(self): - tcsdsroot = self.get_tcsds_rootdir() - - lib_dirs = self.get_lib_dirs() - if lib_dirs is None: - lib_dirs = os.path.join(tcsdsroot, 'lib64') - - incl_dirs = self.get_include_dirs() - if incl_dirs is None: - incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') - - ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) - - info = self.check_libs2(lib_dirs, ssl2_libs) - if info is None: - return - dict_append(info, - define_macros=[('HAVE_CBLAS', None), - ('HAVE_SSL2', 1)], - include_dirs=incl_dirs,) - self.set_info(**info) - - -class lapack_ssl2_info(ssl2_info): - pass - - -class blas_ssl2_info(ssl2_info): - pass - - - -class armpl_info(system_info): - section = 'armpl' - dir_env_var = 'ARMPL_DIR' - _lib_armpl = ['armpl_lp64_mp'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - incl_dirs = self.get_include_dirs() - armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) - info = self.check_libs2(lib_dirs, armpl_libs) - if info is None: - return - dict_append(info, - define_macros=[('SCIPY_MKL_H', None), - ('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - -class lapack_armpl_info(armpl_info): - pass - -class blas_armpl_info(armpl_info): - pass - - -class atlas_info(system_info): - section = 'atlas' - dir_env_var = 'ATLAS' - _lib_names = ['f77blas', 'cblas'] - if sys.platform[:7] == 'freebsd': - _lib_atlas = ['atlas_r'] - _lib_lapack = ['alapack_r'] - else: - _lib_atlas = ['atlas'] - _lib_lapack = ['lapack'] - - notfounderror = AtlasNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', - 'sse', '3dnow', 'sse2']) + [d]) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) - atlas = None - lapack = None - atlas_1 = None - for d in lib_dirs: - atlas = self.check_libs2(d, atlas_libs, []) - if atlas is not None: - lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) - lapack = self.check_libs2(lib_dirs2, lapack_libs, []) - if lapack is not None: - break - if atlas: - atlas_1 = atlas - log.info(self.__class__) - if atlas is None: - atlas = atlas_1 - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - if lapack is not None: - dict_append(info, **lapack) - dict_append(info, **atlas) - elif 'lapack_atlas' in atlas['libraries']: - dict_append(info, **atlas) - dict_append(info, - define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) - self.set_info(**info) - return - else: - dict_append(info, **atlas) - dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) - message = textwrap.dedent(""" - ********************************************************************* - Could not find lapack library within the ATLAS installation. - ********************************************************************* - """) - warnings.warn(message, stacklevel=2) - self.set_info(**info) - return - - # Check if lapack library is complete, only warn if it is not. - lapack_dir = lapack['library_dirs'][0] - lapack_name = lapack['libraries'][0] - lapack_lib = None - lib_prefixes = ['lib'] - if sys.platform == 'win32': - lib_prefixes.append('') - for e in self.library_extensions(): - for prefix in lib_prefixes: - fn = os.path.join(lapack_dir, prefix + lapack_name + e) - if os.path.exists(fn): - lapack_lib = fn - break - if lapack_lib: - break - if lapack_lib is not None: - sz = os.stat(lapack_lib)[6] - if sz <= 4000 * 1024: - message = textwrap.dedent(""" - ********************************************************************* - Lapack library (from ATLAS) is probably incomplete: - size of %s is %sk (expected >4000k) - - Follow the instructions in the KNOWN PROBLEMS section of the file - numpy/INSTALL.txt. - ********************************************************************* - """) % (lapack_lib, sz / 1024) - warnings.warn(message, stacklevel=2) - else: - info['language'] = 'f77' - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(info, **atlas_extra_info) - - self.set_info(**info) - - -class atlas_blas_info(atlas_info): - _lib_names = ['f77blas', 'cblas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_libs', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_threads_info(atlas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class atlas_blas_threads_info(atlas_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['ptf77blas', 'ptcblas'] - - -class lapack_atlas_info(atlas_info): - _lib_names = ['lapack_atlas'] + atlas_info._lib_names - - -class lapack_atlas_threads_info(atlas_threads_info): - _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names - - -class atlas_3_10_info(atlas_info): - _lib_names = ['satlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_info(atlas_3_10_info): - _lib_names = ['satlas'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - info = {} - opt = self.get_option_single('atlas_lib', 'libraries') - atlas_libs = self.get_libs(opt, self._lib_names) - atlas = self.check_libs2(lib_dirs, atlas_libs, []) - if atlas is None: - return - include_dirs = self.get_include_dirs() - h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) - h = h[0] - if h: - h = os.path.dirname(h) - dict_append(info, include_dirs=[h]) - info['language'] = 'c' - info['define_macros'] = [('HAVE_CBLAS', None)] - - atlas_version, atlas_extra_info = get_atlas_version(**atlas) - dict_append(atlas, **atlas_extra_info) - - dict_append(info, **atlas) - - self.set_info(**info) - return - - -class atlas_3_10_threads_info(atlas_3_10_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - _lib_atlas = _lib_names - _lib_lapack = _lib_names - - -class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): - dir_env_var = ['PTATLAS', 'ATLAS'] - _lib_names = ['tatlas'] - - -class lapack_atlas_3_10_info(atlas_3_10_info): - pass - - -class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): - pass - - -class lapack_info(system_info): - section = 'lapack' - dir_env_var = 'LAPACK' - _lib_names = ['lapack'] - notfounderror = LapackNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('lapack_libs', 'libraries') - lapack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, lapack_libs, []) - if info is None: - return - info['language'] = 'f77' - self.set_info(**info) - - -class lapack_src_info(system_info): - # LAPACK_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'lapack_src' - dir_env_var = 'LAPACK_SRC' - notfounderror = LapackSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'dgesv.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - # The following is extracted from LAPACK-3.0/SRC/Makefile. - # Added missing names from lapack-lite-3.1.1/SRC/Makefile - # while keeping removed names for Lapack-3.0 compatibility. - allaux = ''' - ilaenv ieeeck lsame lsamen xerbla - iparmq - ''' # *.f - laux = ''' - bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 - laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 - lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre - larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 - lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 - lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf - stebz stedc steqr sterf - - larra larrc larrd larr larrk larrj larrr laneg laisnan isnan - lazq3 lazq4 - ''' # [s|d]*.f - lasrc = ''' - gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak - gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv - gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 - geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd - gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal - gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd - ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein - hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 - lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb - lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp - laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv - lartv larz larzb larzt laswp lasyf latbs latdf latps latrd - latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv - pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 - potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri - pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs - spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv - sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 - tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs - trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs - tzrqf tzrzf - - lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 - ''' # [s|c|d|z]*.f - sd_lasrc = ''' - laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l - org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr - orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 - ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx - sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd - stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd - sygvx sytd2 sytrd - ''' # [s|d]*.f - cz_lasrc = ''' - bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev - heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv - hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd - hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf - hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 - laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe - laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv - spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq - ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 - unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr - ''' # [c|z]*.f - ####### - sclaux = laux + ' econd ' # s*.f - dzlaux = laux + ' secnd ' # d*.f - slasrc = lasrc + sd_lasrc # s*.f - dlasrc = lasrc + sd_lasrc # d*.f - clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f - zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f - oclasrc = ' icmax1 scsum1 ' # *.f - ozlasrc = ' izmax1 dzsum1 ' # *.f - sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ - + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ - + ['c%s.f' % f for f in (clasrc).split()] \ - + ['z%s.f' % f for f in (zlasrc).split()] \ - + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] - sources = [os.path.join(src_dir, f) for f in sources] - # Lapack 3.1: - src_dir2 = os.path.join(src_dir, '..', 'INSTALL') - sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] - # Lapack 3.2.1: - sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] - sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] - # Should we check here actual existence of source files? - # Yes, the file listing is different between 3.0 and 3.1 - # versions. - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - -atlas_version_c_text = r''' -/* This file is generated from numpy/distutils/system_info.py */ -void ATL_buildinfo(void); -int main(void) { - ATL_buildinfo(); - return 0; -} -''' - -_cached_atlas_version = {} - - -def get_atlas_version(**config): - libraries = config.get('libraries', []) - library_dirs = config.get('library_dirs', []) - key = (tuple(libraries), tuple(library_dirs)) - if key in _cached_atlas_version: - return _cached_atlas_version[key] - c = cmd_config(Distribution()) - atlas_version = None - info = {} - try: - s, o = c.get_output(atlas_version_c_text, - libraries=libraries, library_dirs=library_dirs, - ) - if s and re.search(r'undefined reference to `_gfortran', o, re.M): - s, o = c.get_output(atlas_version_c_text, - libraries=libraries + ['gfortran'], - library_dirs=library_dirs, - ) - if not s: - warnings.warn(textwrap.dedent(""" - ***************************************************** - Linkage with ATLAS requires gfortran. Use - - python setup.py config_fc --fcompiler=gnu95 ... - - when building extension libraries that use ATLAS. - Make sure that -lgfortran is used for C++ extensions. - ***************************************************** - """), stacklevel=2) - dict_append(info, language='f90', - define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) - except Exception: # failed to get version from file -- maybe on Windows - # look at directory name - for o in library_dirs: - m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) - if m: - atlas_version = m.group('version') - if atlas_version is not None: - break - - # final choice --- look at ATLAS_VERSION environment - # variable - if atlas_version is None: - atlas_version = os.environ.get('ATLAS_VERSION', None) - if atlas_version: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - else: - dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) - return atlas_version or '?.?.?', info - - if not s: - m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) - if m: - atlas_version = m.group('version') - if atlas_version is None: - if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): - atlas_version = '3.2.1_pre3.3.6' - else: - log.info('Status: %d', s) - log.info('Output: %s', o) - - elif atlas_version == '3.2.1_pre3.3.6': - dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) - else: - dict_append(info, define_macros=[( - 'ATLAS_INFO', _c_string_literal(atlas_version)) - ]) - result = _cached_atlas_version[key] = atlas_version, info - return result - - -class lapack_opt_info(system_info): - notfounderror = LapackNotFoundError - - # List of all known LAPACK libraries, in the default order - lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', - 'accelerate', 'atlas', 'lapack'] - order_env_var_name = 'NPY_LAPACK_ORDER' - - def _calc_info_armpl(self): - info = get_info('lapack_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('lapack_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('lapack_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas_lapack') - if info: - self.set_info(**info) - return True - info = get_info('openblas_clapack') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_flame(self): - info = get_info('flame') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_threads') - if not info: - info = get_info('atlas_3_10') - if not info: - info = get_info('atlas_threads') - if not info: - info = get_info('atlas') - if info: - # Figure out if ATLAS has lapack... - # If not we need the lapack library, but not BLAS! - l = info.get('define_macros', []) - if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ - or ('ATLAS_WITHOUT_LAPACK', None) in l: - # Get LAPACK (with possible warnings) - # If not found we don't accept anything - # since we can't use ATLAS with LAPACK! - lapack_info = self._get_info_lapack() - if not lapack_info: - return False - dict_append(info, **lapack_info) - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _get_info_blas(self): - # Default to get the optimized BLAS implementation - info = get_info('blas_opt') - if not info: - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('blas_src') - if not info_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('fblas_src', info_src)]) - return info - - def _get_info_lapack(self): - info = get_info('lapack') - if not info: - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) - info_src = get_info('lapack_src') - if not info_src: - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) - return {} - dict_append(info, libraries=[('flapack_src', info_src)]) - return info - - def _calc_info_lapack(self): - info = self._get_info_lapack() - if info: - info_blas = self._get_info_blas() - dict_append(info, **info_blas) - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - self.set_info(**info) - return True - return False - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("lapack_opt_info user defined " - "LAPACK order has unacceptable " - "values: {}".format(unknown_order)) - - if 'NPY_LAPACK_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for lapack in lapack_order: - if self._calc_info(lapack): - return - - if 'lapack' not in lapack_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class _ilp64_opt_info_mixin: - symbol_suffix = None - symbol_prefix = None - - def _check_info(self, info): - macros = dict(info.get('define_macros', [])) - prefix = macros.get('BLAS_SYMBOL_PREFIX', '') - suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') - - if self.symbol_prefix not in (None, prefix): - return False - - if self.symbol_suffix not in (None, suffix): - return False - - return bool(info) - - -class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): - notfounderror = LapackILP64NotFoundError - lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' - - def _calc_info(self, name): - print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) - info = get_info(name + '_lapack') - if self._check_info(info): - self.set_info(**info) - return True - else: - print('%s_lapack does not exist' % (name)) - return False - - -class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): - # Same as lapack_ilp64_opt_info, but fix symbol names - symbol_prefix = '' - symbol_suffix = '' - - -class lapack64__opt_info(lapack_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class blas_opt_info(system_info): - notfounderror = BlasNotFoundError - # List of all known BLAS libraries, in the default order - - blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', - 'accelerate', 'atlas', 'blas'] - order_env_var_name = 'NPY_BLAS_ORDER' - - def _calc_info_armpl(self): - info = get_info('blas_armpl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_mkl(self): - info = get_info('blas_mkl') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_ssl2(self): - info = get_info('blas_ssl2') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blis(self): - info = get_info('blis') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_openblas(self): - info = get_info('openblas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_atlas(self): - info = get_info('atlas_3_10_blas_threads') - if not info: - info = get_info('atlas_3_10_blas') - if not info: - info = get_info('atlas_blas_threads') - if not info: - info = get_info('atlas_blas') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_accelerate(self): - info = get_info('accelerate') - if info: - self.set_info(**info) - return True - return False - - def _calc_info_blas(self): - # Warn about a non-optimized BLAS library - warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) - info = {} - dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) - - blas = get_info('blas') - if blas: - dict_append(info, **blas) - else: - # Not even BLAS was found! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) - - blas_src = get_info('blas_src') - if not blas_src: - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) - return False - dict_append(info, libraries=[('fblas_src', blas_src)]) - - self.set_info(**info) - return True - - def _calc_info_from_envvar(self): - info = {} - info['language'] = 'f77' - info['libraries'] = [] - info['include_dirs'] = [] - info['define_macros'] = [] - info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() - if 'NPY_CBLAS_LIBS' in os.environ: - info['define_macros'].append(('HAVE_CBLAS', None)) - info['extra_link_args'].extend( - os.environ['NPY_CBLAS_LIBS'].split()) - self.set_info(**info) - return True - - def _calc_info(self, name): - return getattr(self, '_calc_info_{}'.format(name))() - - def calc_info(self): - blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) - if len(unknown_order) > 0: - raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) - - if 'NPY_BLAS_LIBS' in os.environ: - # Bypass autodetection, set language to F77 and use env var linker - # flags directly - self._calc_info_from_envvar() - return - - for blas in blas_order: - if self._calc_info(blas): - return - - if 'blas' not in blas_order: - # Since the user may request *not* to use any library, we still need - # to raise warnings to signal missing packages! - warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) - warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) - - -class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): - notfounderror = BlasILP64NotFoundError - blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] - order_env_var_name = 'NPY_BLAS_ILP64_ORDER' - - def _calc_info(self, name): - info = get_info(name) - if self._check_info(info): - self.set_info(**info) - return True - return False - - -class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '' - - -class blas64__opt_info(blas_ilp64_opt_info): - symbol_prefix = '' - symbol_suffix = '64_' - - -class cblas_info(system_info): - section = 'cblas' - dir_env_var = 'CBLAS' - # No default as it's used only in blas_info - _lib_names = [] - notfounderror = BlasNotFoundError - - -class blas_info(system_info): - section = 'blas' - dir_env_var = 'BLAS' - _lib_names = ['blas'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blas_libs', 'libraries') - blas_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, blas_libs, []) - if info is None: - return - else: - info['include_dirs'] = self.get_include_dirs() - if platform.system() == 'Windows': - # The check for windows is needed because get_cblas_libs uses the - # same compiler that was used to compile Python and msvc is - # often not installed when mingw is being used. This rough - # treatment is not desirable, but windows is tricky. - info['language'] = 'f77' # XXX: is it generally true? - # If cblas is given as an option, use those - cblas_info_obj = cblas_info() - cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') - cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) - if cblas_libs: - info['libraries'] = cblas_libs + blas_libs - info['define_macros'] = [('HAVE_CBLAS', None)] - else: - lib = self.get_cblas_libs(info) - if lib is not None: - info['language'] = 'c' - info['libraries'] = lib - info['define_macros'] = [('HAVE_CBLAS', None)] - self.set_info(**info) - - def get_cblas_libs(self, info): - """ Check whether we can link with CBLAS interface - - This method will search through several combinations of libraries - to check whether CBLAS is present: - - 1. Libraries in ``info['libraries']``, as is - 2. As 1. but also explicitly adding ``'cblas'`` as a library - 3. As 1. but also explicitly adding ``'blas'`` as a library - 4. Check only library ``'cblas'`` - 5. Check only library ``'blas'`` - - Parameters - ---------- - info : dict - system information dictionary for compilation and linking - - Returns - ------- - libraries : list of str or None - a list of libraries that enables the use of CBLAS interface. - Returns None if not found or a compilation error occurs. - - Since 1.17 returns a list. - """ - # primitive cblas check by looking for the header and trying to link - # cblas or blas - c = customized_ccompiler() - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - #include - int main(int argc, const char *argv[]) - { - double a[4] = {1,2,3,4}; - double b[4] = {5,6,7,8}; - return cblas_ddot(4, a, 1, b, 1) > 10; - }""") - src = os.path.join(tmpdir, 'source.c') - try: - with open(src, 'w') as f: - f.write(s) - - try: - # check we can compile (find headers) - obj = c.compile([src], output_dir=tmpdir, - include_dirs=self.get_include_dirs()) - except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): - return None - - # check we can link (find library) - # some systems have separate cblas and blas libs. - for libs in [info['libraries'], ['cblas'] + info['libraries'], - ['blas'] + info['libraries'], ['cblas'], ['blas']]: - try: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=libs, - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - return libs - except distutils.ccompiler.LinkError: - pass - finally: - shutil.rmtree(tmpdir) - return None - - -class openblas_info(blas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = [] - notfounderror = BlasNotFoundError - - @property - def symbol_prefix(self): - try: - return self.cp.get(self.section, 'symbol_prefix') - except NoOptionError: - return '' - - @property - def symbol_suffix(self): - try: - return self.cp.get(self.section, 'symbol_suffix') - except NoOptionError: - return '' - - def _calc_info(self): - c = customized_ccompiler() - - lib_dirs = self.get_lib_dirs() - - # Prefer to use libraries over openblas_libs - opt = self.get_option_single('openblas_libs', 'libraries') - openblas_libs = self.get_libs(opt, self._lib_names) - - info = self.check_libs(lib_dirs, openblas_libs, []) - - if c.compiler_type == "msvc" and info is None: - from numpy.distutils.fcompiler import new_fcompiler - f = new_fcompiler(c_compiler=c) - if f and f.compiler_type == 'gnu95': - # Try gfortran-compatible library files - info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) - # Skip lapack check, we'd need build_ext to do it - skip_symbol_check = True - elif info: - skip_symbol_check = False - info['language'] = 'c' - - if info is None: - return None - - # Add extra info for OpenBLAS - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if not (skip_symbol_check or self.check_symbols(info)): - return None - - info['define_macros'] = [('HAVE_CBLAS', None)] - if self.symbol_prefix: - info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] - if self.symbol_suffix: - info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] - - return info - - def calc_info(self): - info = self._calc_info() - if info is not None: - self.set_info(**info) - - def check_msvc_gfortran_libs(self, library_dirs, libraries): - # First, find the full path to each library directory - library_paths = [] - for library in libraries: - for library_dir in library_dirs: - # MinGW static ext will be .a - fullpath = os.path.join(library_dir, library + '.a') - if os.path.isfile(fullpath): - library_paths.append(fullpath) - break - else: - return None - - # Generate numpy.distutils virtual static library file - basename = self.__class__.__name__ - tmpdir = os.path.join(os.getcwd(), 'build', basename) - if not os.path.isdir(tmpdir): - os.makedirs(tmpdir) - - info = {'library_dirs': [tmpdir], - 'libraries': [basename], - 'language': 'f77'} - - fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') - fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') - with open(fake_lib_file, 'w') as f: - f.write("\n".join(library_paths)) - with open(fake_clib_file, 'w') as f: - pass - - return info - - def check_symbols(self, info): - res = False - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - - prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - calls = "\n".join("%s%s%s();" % (self.symbol_prefix, - symbol_name, - self.symbol_suffix) - for symbol_name in self._require_symbols) - s = textwrap.dedent("""\ - %(prototypes)s - int main(int argc, const char *argv[]) - { - %(calls)s - return 0; - }""") % dict(prototypes=prototypes, calls=calls) - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - try: - extra_args = info['extra_link_args'] - except Exception: - extra_args = [] - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - res = True - except distutils.ccompiler.LinkError: - res = False - finally: - shutil.rmtree(tmpdir) - return res - -class openblas_lapack_info(openblas_info): - section = 'openblas' - dir_env_var = 'OPENBLAS' - _lib_names = ['openblas'] - _require_symbols = ['zungqr_'] - notfounderror = BlasNotFoundError - -class openblas_clapack_info(openblas_lapack_info): - _lib_names = ['openblas', 'lapack'] - -class openblas_ilp64_info(openblas_info): - section = 'openblas_ilp64' - dir_env_var = 'OPENBLAS_ILP64' - _lib_names = ['openblas64'] - _require_symbols = ['dgemm_', 'cblas_dgemm'] - notfounderror = BlasILP64NotFoundError - - def _calc_info(self): - info = super()._calc_info() - if info is not None: - info['define_macros'] += [('HAVE_BLAS_ILP64', None)] - return info - -class openblas_ilp64_lapack_info(openblas_ilp64_info): - _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] - - def _calc_info(self): - info = super()._calc_info() - if info: - info['define_macros'] += [('HAVE_LAPACKE', None)] - return info - -class openblas64__info(openblas_ilp64_info): - # ILP64 Openblas, with default symbol suffix - section = 'openblas64_' - dir_env_var = 'OPENBLAS64_' - _lib_names = ['openblas64_'] - symbol_suffix = '64_' - symbol_prefix = '' - -class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): - pass - -class blis_info(blas_info): - section = 'blis' - dir_env_var = 'BLIS' - _lib_names = ['blis'] - notfounderror = BlasNotFoundError - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - opt = self.get_option_single('blis_libs', 'libraries') - blis_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs2(lib_dirs, blis_libs, []) - if info is None: - return - - # Add include dirs - incl_dirs = self.get_include_dirs() - dict_append(info, - language='c', - define_macros=[('HAVE_CBLAS', None)], - include_dirs=incl_dirs) - self.set_info(**info) - - -class flame_info(system_info): - """ Usage of libflame for LAPACK operations - - This requires libflame to be compiled with lapack wrappers: - - ./configure --enable-lapack2flame ... - - Be aware that libflame 5.1.0 has some missing names in the shared library, so - if you have problems, try the static flame library. - """ - section = 'flame' - _lib_names = ['flame'] - notfounderror = FlameNotFoundError - - def check_embedded_lapack(self, info): - """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ - c = customized_ccompiler() - - tmpdir = tempfile.mkdtemp() - s = textwrap.dedent("""\ - void zungqr_(); - int main(int argc, const char *argv[]) - { - zungqr_(); - return 0; - }""") - src = os.path.join(tmpdir, 'source.c') - out = os.path.join(tmpdir, 'a.out') - # Add the additional "extra" arguments - extra_args = info.get('extra_link_args', []) - try: - with open(src, 'w') as f: - f.write(s) - obj = c.compile([src], output_dir=tmpdir) - try: - c.link_executable(obj, out, libraries=info['libraries'], - library_dirs=info['library_dirs'], - extra_postargs=extra_args) - return True - except distutils.ccompiler.LinkError: - return False - finally: - shutil.rmtree(tmpdir) - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - flame_libs = self.get_libs('libraries', self._lib_names) - - info = self.check_libs2(lib_dirs, flame_libs, []) - if info is None: - return - - # Add the extra flag args to info - extra_info = self.calc_extra_info() - dict_append(info, **extra_info) - - if self.check_embedded_lapack(info): - # check if the user has supplied all information required - self.set_info(**info) - else: - # Try and get the BLAS lib to see if we can get it to work - blas_info = get_info('blas_opt') - if not blas_info: - # since we already failed once, this ain't going to work either - return - - # Now we need to merge the two dictionaries - for key in blas_info: - if isinstance(blas_info[key], list): - info[key] = info.get(key, []) + blas_info[key] - elif isinstance(blas_info[key], tuple): - info[key] = info.get(key, ()) + blas_info[key] - else: - info[key] = info.get(key, '') + blas_info[key] - - # Now check again - if self.check_embedded_lapack(info): - self.set_info(**info) - - -class accelerate_info(system_info): - section = 'accelerate' - _lib_names = ['accelerate', 'veclib'] - notfounderror = BlasNotFoundError - - def calc_info(self): - # Make possible to enable/disable from config file/env var - libraries = os.environ.get('ACCELERATE') - if libraries: - libraries = [libraries] - else: - libraries = self.get_libs('libraries', self._lib_names) - libraries = [lib.strip().lower() for lib in libraries] - - if (sys.platform == 'darwin' and - not os.getenv('_PYTHON_HOST_PLATFORM', None)): - # Use the system BLAS from Accelerate or vecLib under OSX - args = [] - link_args = [] - if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ - 'x86_64' in get_platform() or \ - 'i386' in platform.platform(): - intel = 1 - else: - intel = 0 - if (os.path.exists('/System/Library/Frameworks' - '/Accelerate.framework/') and - 'accelerate' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) - elif (os.path.exists('/System/Library/Frameworks' - '/vecLib.framework/') and - 'veclib' in libraries): - if intel: - args.extend(['-msse3']) - args.extend([ - '-I/System/Library/Frameworks/vecLib.framework/Headers']) - link_args.extend(['-Wl,-framework', '-Wl,vecLib']) - - if args: - macros = [ - ('NO_ATLAS_INFO', 3), - ('HAVE_CBLAS', None), - ('ACCELERATE_NEW_LAPACK', None), - ] - if(os.getenv('NPY_USE_BLAS_ILP64', None)): - print('Setting HAVE_BLAS_ILP64') - macros += [ - ('HAVE_BLAS_ILP64', None), - ('ACCELERATE_LAPACK_ILP64', None), - ] - self.set_info(extra_compile_args=args, - extra_link_args=link_args, - define_macros=macros) - - return - -class accelerate_lapack_info(accelerate_info): - def _calc_info(self): - return super()._calc_info() - -class blas_src_info(system_info): - # BLAS_SRC is deprecated, please do not use this! - # Build or install a BLAS library via your package manager or from - # source separately. - section = 'blas_src' - dir_env_var = 'BLAS_SRC' - notfounderror = BlasSrcNotFoundError - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['blas'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'daxpy.f')): - src_dir = d - break - if not src_dir: - #XXX: Get sources from netlib. May be ask first. - return - blas1 = ''' - caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot - dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 - srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg - dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax - snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap - scabs1 - ''' - blas2 = ''' - cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv - chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv - dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv - sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger - stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc - zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 - ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv - ''' - blas3 = ''' - cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k - dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm - ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm - ''' - sources = [os.path.join(src_dir, f + '.f') \ - for f in (blas1 + blas2 + blas3).split()] - #XXX: should we check here actual existence of source files? - sources = [f for f in sources if os.path.isfile(f)] - info = {'sources': sources, 'language': 'f77'} - self.set_info(**info) - - -class x11_info(system_info): - section = 'x11' - notfounderror = X11NotFoundError - _lib_names = ['X11'] - - def __init__(self): - system_info.__init__(self, - default_lib_dirs=default_x11_lib_dirs, - default_include_dirs=default_x11_include_dirs) - - def calc_info(self): - if sys.platform in ['win32']: - return - lib_dirs = self.get_lib_dirs() - include_dirs = self.get_include_dirs() - opt = self.get_option_single('x11_libs', 'libraries') - x11_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, x11_libs, []) - if info is None: - return - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, 'X11/X.h'): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - self.set_info(**info) - - -class _numpy_info(system_info): - section = 'Numeric' - modulename = 'Numeric' - notfounderror = NumericNotFoundError - - def __init__(self): - include_dirs = [] - try: - module = __import__(self.modulename) - prefix = [] - for name in module.__file__.split(os.sep): - if name == 'lib': - break - prefix.append(name) - - # Ask numpy for its own include path before attempting - # anything else - try: - include_dirs.append(getattr(module, 'get_include')()) - except AttributeError: - pass - - include_dirs.append(sysconfig.get_path('include')) - except ImportError: - pass - py_incl_dir = sysconfig.get_path('include') - include_dirs.append(py_incl_dir) - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in include_dirs: - include_dirs.append(py_pincl_dir) - for d in default_include_dirs: - d = os.path.join(d, os.path.basename(py_incl_dir)) - if d not in include_dirs: - include_dirs.append(d) - system_info.__init__(self, - default_lib_dirs=[], - default_include_dirs=include_dirs) - - def calc_info(self): - try: - module = __import__(self.modulename) - except ImportError: - return - info = {} - macros = [] - for v in ['__version__', 'version']: - vrs = getattr(module, v, None) - if vrs is None: - continue - macros = [(self.modulename.upper() + '_VERSION', - _c_string_literal(vrs)), - (self.modulename.upper(), None)] - break - dict_append(info, define_macros=macros) - include_dirs = self.get_include_dirs() - inc_dir = None - for d in include_dirs: - if self.combine_paths(d, - os.path.join(self.modulename, - 'arrayobject.h')): - inc_dir = d - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir]) - if info: - self.set_info(**info) - return - - -class numarray_info(_numpy_info): - section = 'numarray' - modulename = 'numarray' - - -class Numeric_info(_numpy_info): - section = 'Numeric' - modulename = 'Numeric' - - -class numpy_info(_numpy_info): - section = 'numpy' - modulename = 'numpy' - - -class numerix_info(system_info): - section = 'numerix' - - def calc_info(self): - which = None, None - if os.getenv("NUMERIX"): - which = os.getenv("NUMERIX"), "environment var" - # If all the above fail, default to numpy. - if which[0] is None: - which = "numpy", "defaulted" - try: - import numpy # noqa: F401 - which = "numpy", "defaulted" - except ImportError as e: - msg1 = str(e) - try: - import Numeric # noqa: F401 - which = "numeric", "defaulted" - except ImportError as e: - msg2 = str(e) - try: - import numarray # noqa: F401 - which = "numarray", "defaulted" - except ImportError as e: - msg3 = str(e) - log.info(msg1) - log.info(msg2) - log.info(msg3) - which = which[0].strip().lower(), which[1] - if which[0] not in ["numeric", "numarray", "numpy"]: - raise ValueError("numerix selector must be either 'Numeric' " - "or 'numarray' or 'numpy' but the value obtained" - " from the %s was '%s'." % (which[1], which[0])) - os.environ['NUMERIX'] = which[0] - self.set_info(**get_info(which[0])) - - -class f2py_info(system_info): - def calc_info(self): - try: - import numpy.f2py as f2py - except ImportError: - return - f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') - self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], - include_dirs=[f2py_dir]) - return - - -class boost_python_info(system_info): - section = 'boost_python' - dir_env_var = 'BOOST' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['boost*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', - 'module.cpp')): - src_dir = d - break - if not src_dir: - return - py_incl_dirs = [sysconfig.get_path('include')] - py_pincl_dir = sysconfig.get_path('platinclude') - if py_pincl_dir not in py_incl_dirs: - py_incl_dirs.append(py_pincl_dir) - srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') - bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) - bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) - info = {'libraries': [('boost_python_src', - {'include_dirs': [src_dir] + py_incl_dirs, - 'sources':bpl_srcs} - )], - 'include_dirs': [src_dir], - } - if info: - self.set_info(**info) - return - - -class agg2_info(system_info): - section = 'agg2' - dir_env_var = 'AGG2' - - def get_paths(self, section, key): - pre_dirs = system_info.get_paths(self, section, key) - dirs = [] - for d in pre_dirs: - dirs.extend([d] + self.combine_paths(d, ['agg2*'])) - return [d for d in dirs if os.path.isdir(d)] - - def calc_info(self): - src_dirs = self.get_src_dirs() - src_dir = '' - for d in src_dirs: - if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): - src_dir = d - break - if not src_dir: - return - if sys.platform == 'win32': - agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', - 'win32', 'agg_win32_bmp.cpp')) - else: - agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) - agg2_srcs += [os.path.join(src_dir, 'src', 'platform', - 'X11', - 'agg_platform_support.cpp')] - - info = {'libraries': - [('agg2_src', - {'sources': agg2_srcs, - 'include_dirs': [os.path.join(src_dir, 'include')], - } - )], - 'include_dirs': [os.path.join(src_dir, 'include')], - } - if info: - self.set_info(**info) - return - - -class _pkg_config_info(system_info): - section = None - config_env_var = 'PKG_CONFIG' - default_config_exe = 'pkg-config' - append_config_exe = '' - version_macro_name = None - release_macro_name = None - version_flag = '--modversion' - cflags_flag = '--cflags' - - def get_config_exe(self): - if self.config_env_var in os.environ: - return os.environ[self.config_env_var] - return self.default_config_exe - - def get_config_output(self, config_exe, option): - cmd = config_exe + ' ' + self.append_config_exe + ' ' + option - try: - o = subprocess.check_output(cmd) - except (OSError, subprocess.CalledProcessError): - pass - else: - o = filepath_from_subprocess_output(o) - return o - - def calc_info(self): - config_exe = find_executable(self.get_config_exe()) - if not config_exe: - log.warn('File not found: %s. Cannot determine %s info.' \ - % (config_exe, self.section)) - return - info = {} - macros = [] - libraries = [] - library_dirs = [] - include_dirs = [] - extra_link_args = [] - extra_compile_args = [] - version = self.get_config_output(config_exe, self.version_flag) - if version: - macros.append((self.__class__.__name__.split('.')[-1].upper(), - _c_string_literal(version))) - if self.version_macro_name: - macros.append((self.version_macro_name + '_%s' - % (version.replace('.', '_')), None)) - if self.release_macro_name: - release = self.get_config_output(config_exe, '--release') - if release: - macros.append((self.release_macro_name + '_%s' - % (release.replace('.', '_')), None)) - opts = self.get_config_output(config_exe, '--libs') - if opts: - for opt in opts.split(): - if opt[:2] == '-l': - libraries.append(opt[2:]) - elif opt[:2] == '-L': - library_dirs.append(opt[2:]) - else: - extra_link_args.append(opt) - opts = self.get_config_output(config_exe, self.cflags_flag) - if opts: - for opt in opts.split(): - if opt[:2] == '-I': - include_dirs.append(opt[2:]) - elif opt[:2] == '-D': - if '=' in opt: - n, v = opt[2:].split('=') - macros.append((n, v)) - else: - macros.append((opt[2:], None)) - else: - extra_compile_args.append(opt) - if macros: - dict_append(info, define_macros=macros) - if libraries: - dict_append(info, libraries=libraries) - if library_dirs: - dict_append(info, library_dirs=library_dirs) - if include_dirs: - dict_append(info, include_dirs=include_dirs) - if extra_link_args: - dict_append(info, extra_link_args=extra_link_args) - if extra_compile_args: - dict_append(info, extra_compile_args=extra_compile_args) - if info: - self.set_info(**info) - return - - -class wx_info(_pkg_config_info): - section = 'wx' - config_env_var = 'WX_CONFIG' - default_config_exe = 'wx-config' - append_config_exe = '' - version_macro_name = 'WX_VERSION' - release_macro_name = 'WX_RELEASE' - version_flag = '--version' - cflags_flag = '--cxxflags' - - -class gdk_pixbuf_xlib_2_info(_pkg_config_info): - section = 'gdk_pixbuf_xlib_2' - append_config_exe = 'gdk-pixbuf-xlib-2.0' - version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' - - -class gdk_pixbuf_2_info(_pkg_config_info): - section = 'gdk_pixbuf_2' - append_config_exe = 'gdk-pixbuf-2.0' - version_macro_name = 'GDK_PIXBUF_VERSION' - - -class gdk_x11_2_info(_pkg_config_info): - section = 'gdk_x11_2' - append_config_exe = 'gdk-x11-2.0' - version_macro_name = 'GDK_X11_VERSION' - - -class gdk_2_info(_pkg_config_info): - section = 'gdk_2' - append_config_exe = 'gdk-2.0' - version_macro_name = 'GDK_VERSION' - - -class gdk_info(_pkg_config_info): - section = 'gdk' - append_config_exe = 'gdk' - version_macro_name = 'GDK_VERSION' - - -class gtkp_x11_2_info(_pkg_config_info): - section = 'gtkp_x11_2' - append_config_exe = 'gtk+-x11-2.0' - version_macro_name = 'GTK_X11_VERSION' - - -class gtkp_2_info(_pkg_config_info): - section = 'gtkp_2' - append_config_exe = 'gtk+-2.0' - version_macro_name = 'GTK_VERSION' - - -class xft_info(_pkg_config_info): - section = 'xft' - append_config_exe = 'xft' - version_macro_name = 'XFT_VERSION' - - -class freetype2_info(_pkg_config_info): - section = 'freetype2' - append_config_exe = 'freetype2' - version_macro_name = 'FREETYPE2_VERSION' - - -class amd_info(system_info): - section = 'amd' - dir_env_var = 'AMD' - _lib_names = ['amd'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('amd_libs', 'libraries') - amd_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, amd_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, 'amd.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_AMD_H', None)], - swig_opts=['-I' + inc_dir]) - - self.set_info(**info) - return - - -class umfpack_info(system_info): - section = 'umfpack' - dir_env_var = 'UMFPACK' - notfounderror = UmfpackNotFoundError - _lib_names = ['umfpack'] - - def calc_info(self): - lib_dirs = self.get_lib_dirs() - - opt = self.get_option_single('umfpack_libs', 'libraries') - umfpack_libs = self.get_libs(opt, self._lib_names) - info = self.check_libs(lib_dirs, umfpack_libs, []) - if info is None: - return - - include_dirs = self.get_include_dirs() - - inc_dir = None - for d in include_dirs: - p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') - if p: - inc_dir = os.path.dirname(p[0]) - break - if inc_dir is not None: - dict_append(info, include_dirs=[inc_dir], - define_macros=[('SCIPY_UMFPACK_H', None)], - swig_opts=['-I' + inc_dir]) - - dict_append(info, **get_info('amd')) - - self.set_info(**info) - return - - -def combine_paths(*args, **kws): - """ Return a list of existing paths composed by all combinations of - items from arguments. - """ - r = [] - for a in args: - if not a: - continue - if is_string(a): - a = [a] - r.append(a) - args = r - if not args: - return [] - if len(args) == 1: - result = reduce(lambda a, b: a + b, map(glob, args[0]), []) - elif len(args) == 2: - result = [] - for a0 in args[0]: - for a1 in args[1]: - result.extend(glob(os.path.join(a0, a1))) - else: - result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) - log.debug('(paths: %s)', ','.join(result)) - return result - -language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} -inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} - - -def dict_append(d, **kws): - languages = [] - for k, v in kws.items(): - if k == 'language': - languages.append(v) - continue - if k in d: - if k in ['library_dirs', 'include_dirs', - 'extra_compile_args', 'extra_link_args', - 'runtime_library_dirs', 'define_macros']: - [d[k].append(vv) for vv in v if vv not in d[k]] - else: - d[k].extend(v) - else: - d[k] = v - if languages: - l = inv_language_map[max([language_map.get(l, 0) for l in languages])] - d['language'] = l - return - - -def parseCmdLine(argv=(None,)): - import optparse - parser = optparse.OptionParser("usage: %prog [-v] [info objs]") - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', - default=False, - help='be verbose and print more messages') - - opts, args = parser.parse_args(args=argv[1:]) - return opts, args - - -def show_all(argv=None): - import inspect - if argv is None: - argv = sys.argv - opts, args = parseCmdLine(argv) - if opts.verbose: - log.set_threshold(log.DEBUG) - else: - log.set_threshold(log.INFO) - show_only = [] - for n in args: - if n[-5:] != '_info': - n = n + '_info' - show_only.append(n) - show_all = not show_only - _gdict_ = globals().copy() - for name, c in _gdict_.items(): - if not inspect.isclass(c): - continue - if not issubclass(c, system_info) or c is system_info: - continue - if not show_all: - if name not in show_only: - continue - del show_only[show_only.index(name)] - conf = c() - conf.verbosity = 2 - # we don't need the result, but we want - # the side effect of printing diagnostics - conf.get_info() - if show_only: - log.info('Info classes not defined: %s', ','.join(show_only)) - -if __name__ == "__main__": - show_all() diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py deleted file mode 100644 index 7124cc407a2f..000000000000 --- a/numpy/distutils/tests/test_build_ext.py +++ /dev/null @@ -1,74 +0,0 @@ -'''Tests for numpy.distutils.build_ext.''' - -import os -import subprocess -import sys -from textwrap import indent, dedent -import pytest -from numpy.testing import IS_WASM - -@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") -@pytest.mark.slow -def test_multi_fortran_libs_link(tmp_path): - ''' - Ensures multiple "fake" static libraries are correctly linked. - see gh-18295 - ''' - - # We need to make sure we actually have an f77 compiler. - # This is nontrivial, so we'll borrow the utilities - # from f2py tests: - from numpy.distutils.tests.utilities import has_f77_compiler - if not has_f77_compiler(): - pytest.skip('No F77 compiler found') - - # make some dummy sources - with open(tmp_path / '_dummy1.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_one() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy2.f', 'w') as fid: - fid.write(indent(dedent('''\ - FUNCTION dummy_two() - RETURN - END FUNCTION'''), prefix=' '*6)) - with open(tmp_path / '_dummy.c', 'w') as fid: - # doesn't need to load - just needs to exist - fid.write('int PyInit_dummyext;') - - # make a setup file - with open(tmp_path / 'setup.py', 'w') as fid: - srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') - fid.write(dedent(f'''\ - def configuration(parent_package="", top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration("", parent_package, top_path) - config.add_library("dummy1", sources=["_dummy1.f"]) - config.add_library("dummy2", sources=["_dummy2.f"]) - config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) - return config - - - if __name__ == "__main__": - import sys - sys.path.insert(0, r"{srctree}") - from numpy.distutils.core import setup - setup(**configuration(top_path="").todict())''')) - - # build the test extension and "install" into a temporary directory - build_dir = tmp_path - subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', - '--prefix', str(tmp_path / 'installdir'), - '--record', str(tmp_path / 'tmp_install_log.txt'), - ], - cwd=str(build_dir), - ) - # get the path to the so - so = None - with open(tmp_path /'tmp_install_log.txt') as fid: - for line in fid: - if 'dummyext' in line: - so = line.strip() - break - assert so is not None diff --git a/numpy/distutils/tests/test_ccompiler_opt.py b/numpy/distutils/tests/test_ccompiler_opt.py deleted file mode 100644 index 3714aea0e12e..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt.py +++ /dev/null @@ -1,808 +0,0 @@ -import re, textwrap, os -from os import sys, path -from distutils.errors import DistutilsError - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - import unittest, contextlib, tempfile, shutil - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt - - # from numpy/testing/_private/utils.py - @contextlib.contextmanager - def tempdir(*args, **kwargs): - tmpdir = tempfile.mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - def assert_(expr, msg=''): - if not expr: - raise AssertionError(msg) -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - from numpy.testing import assert_, tempdir - -# architectures and compilers to test -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang", "fcc"), - s390x = ("gcc", "clang"), - noarch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = "" - def __init__(self, trap_files="", trap_flags="", *args, **kwargs): - self.fake_trap_files = trap_files - self.fake_trap_flags = trap_flags - CCompilerOpt.__init__(self, None, **kwargs) - - def __repr__(self): - return textwrap.dedent("""\ - <<<< - march : {} - compiler : {} - ---------------- - {} - >>>> - """).format(self.cc_march, self.cc_name, self.report()) - - def dist_compile(self, sources, flags, **kwargs): - assert(isinstance(sources, list)) - assert(isinstance(flags, list)) - if self.fake_trap_files: - for src in sources: - if re.match(self.fake_trap_files, src): - self.dist_error("source is trapped by a fake interface") - if self.fake_trap_flags: - for f in flags: - if re.match(self.fake_trap_flags, f): - self.dist_error("flag is trapped by a fake interface") - # fake objects - return zip(sources, [' '.join(flags)] * len(sources)) - - def dist_info(self): - return FakeCCompilerOpt.fake_info - - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _Test_CCompilerOpt: - arch = None # x86_64 - cc = None # gcc - - def setup_class(self): - FakeCCompilerOpt.conf_nocache = True - self._opt = None - - def nopt(self, *args, **kwargs): - FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") - return FakeCCompilerOpt(*args, **kwargs) - - def opt(self): - if not self._opt: - self._opt = self.nopt() - return self._opt - - def march(self): - return self.opt().cc_march - - def cc_name(self): - return self.opt().cc_name - - def get_targets(self, targets, groups, **kwargs): - FakeCCompilerOpt.conf_target_groups = groups - opt = self.nopt( - cpu_baseline=kwargs.get("baseline", "min"), - cpu_dispatch=kwargs.get("dispatch", "max"), - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - with tempdir() as tmpdir: - file = os.path.join(tmpdir, "test_targets.c") - with open(file, 'w') as f: - f.write(targets) - gtargets = [] - gflags = {} - fake_objects = opt.try_dispatch([file]) - for source, flags in fake_objects: - gtar = path.basename(source).split('.')[1:-1] - glen = len(gtar) - if glen == 0: - gtar = "baseline" - elif glen == 1: - gtar = gtar[0].upper() - else: - # converting multi-target into parentheses str format to be equivalent - # to the configuration statements syntax. - gtar = ('('+' '.join(gtar)+')').upper() - gtargets.append(gtar) - gflags[gtar] = flags - - has_baseline, targets = opt.sources_status[file] - targets = targets + ["baseline"] if has_baseline else targets - # convert tuple that represent multi-target into parentheses str format - targets = [ - '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar - for tar in targets - ] - if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): - raise AssertionError( - "'sources_status' returns different targets than the compiled targets\n" - "%s != %s" % (targets, gtargets) - ) - # return targets from 'sources_status' since the order is matters - return targets, gflags - - def arg_regex(self, **kwargs): - map2origin = dict( - x64 = "x86", - ppc64le = "ppc64", - aarch64 = "armhf", - clang = "gcc", - ) - march = self.march(); cc_name = self.cc_name() - map_march = map2origin.get(march, march) - map_cc = map2origin.get(cc_name, cc_name) - for key in ( - march, cc_name, map_march, map_cc, - march + '_' + cc_name, - map_march + '_' + cc_name, - march + '_' + map_cc, - map_march + '_' + map_cc, - ) : - regex = kwargs.pop(key, None) - if regex is not None: - break - if regex: - if isinstance(regex, dict): - for k, v in regex.items(): - if v[-1:] not in ')}$?\\.+*': - regex[k] = v + '$' - else: - assert(isinstance(regex, str)) - if regex[-1:] not in ')}$?\\.+*': - regex += '$' - return regex - - def expect(self, dispatch, baseline="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_dispatch_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'dispatch features "%s" not match "%s"' % (features, match) - ) - - def expect_baseline(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - features = ' '.join(opt.cpu_baseline_names()) - if not match: - if len(features) != 0: - raise AssertionError( - 'expected empty features, not "%s"' % features - ) - return - if not re.match(match, features, re.IGNORECASE): - raise AssertionError( - 'baseline features "%s" not match "%s"' % (features, match) - ) - - def expect_flags(self, baseline, dispatch="", **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - opt = self.nopt( - cpu_baseline=baseline, cpu_dispatch=dispatch, - trap_files=kwargs.get("trap_files", ""), - trap_flags=kwargs.get("trap_flags", "") - ) - flags = ' '.join(opt.cpu_baseline_flags()) - if not match: - if len(flags) != 0: - raise AssertionError( - 'expected empty flags not "%s"' % flags - ) - return - if not re.match(match, flags): - raise AssertionError( - 'flags "%s" not match "%s"' % (flags, match) - ) - - def expect_targets(self, targets, groups={}, **kwargs): - match = self.arg_regex(**kwargs) - if match is None: - return - targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) - targets = ' '.join(targets) - if not match: - if len(targets) != 0: - raise AssertionError( - 'expected empty targets, not "%s"' % targets - ) - return - if not re.match(match, targets, re.IGNORECASE): - raise AssertionError( - 'targets "%s" not match "%s"' % (targets, match) - ) - - def expect_target_flags(self, targets, groups={}, **kwargs): - match_dict = self.arg_regex(**kwargs) - if match_dict is None: - return - assert(isinstance(match_dict, dict)) - _, tar_flags = self.get_targets(targets=targets, groups=groups) - - for match_tar, match_flags in match_dict.items(): - if match_tar not in tar_flags: - raise AssertionError( - 'expected to find target "%s"' % match_tar - ) - flags = tar_flags[match_tar] - if not match_flags: - if len(flags) != 0: - raise AssertionError( - 'expected to find empty flags in target "%s"' % match_tar - ) - if not re.match(match_flags, flags): - raise AssertionError( - '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) - ) - - def test_interface(self): - wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" - wrong_cc = "clang" if self.cc != "clang" else "icc" - opt = self.opt() - assert_(getattr(opt, "cc_on_" + self.arch)) - assert_(not getattr(opt, "cc_on_" + wrong_arch)) - assert_(getattr(opt, "cc_is_" + self.cc)) - assert_(not getattr(opt, "cc_is_" + wrong_cc)) - - def test_args_empty(self): - for baseline, dispatch in ( - ("", "none"), - (None, ""), - ("none +none", "none - none"), - ("none -max", "min - max"), - ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), - ("max -vsx - avx + avx512f neon -MAX ", - "min -min + max -max -vsx + avx2 -avx2 +NONE") - ) : - opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - assert(len(opt.cpu_baseline_names()) == 0) - assert(len(opt.cpu_dispatch_names()) == 0) - - def test_args_validation(self): - if self.march() == "unknown": - return - # check sanity of argument's validation - for baseline, dispatch in ( - ("unkown_feature - max +min", "unknown max min"), # unknowing features - ("#avx2", "$vsx") # groups and polices aren't acceptable - ) : - try: - self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) - raise AssertionError("excepted an exception for invalid arguments") - except DistutilsError: - pass - - def test_skip(self): - # only takes what platform supports and skip the others - # without casing exceptions - self.expect( - "sse vsx neon", - x86="sse", ppc64="vsx", armhf="neon", unknown="" - ) - self.expect( - "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", - x86 = "sse41 avx avx2", - ppc64 = "vsx2 vsx3", - armhf = "neon_vfpv4 asimd", - unknown = "" - ) - # any features in cpu_dispatch must be ignored if it's part of baseline - self.expect( - "sse neon vsx", baseline="sse neon vsx", - x86="", ppc64="", armhf="" - ) - self.expect( - "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", - x86="", ppc64="", armhf="" - ) - - def test_implies(self): - # baseline combining implied features, so we count - # on it instead of testing 'feature_implies()'' directly - self.expect_baseline( - "fma3 avx2 asimd vsx3", - # .* between two spaces can validate features in between - x86 = "sse .* sse41 .* fma3.*avx2", - ppc64 = "vsx vsx2 vsx3", - armhf = "neon neon_fp16 neon_vfpv4 asimd" - ) - """ - special cases - """ - # in icc and msvc, FMA3 and AVX2 can't be separated - # both need to implies each other, same for avx512f & cd - for f0, f1 in ( - ("fma3", "avx2"), - ("avx512f", "avx512cd"), - ): - diff = ".* sse42 .* %s .*%s$" % (f0, f1) - self.expect_baseline(f0, - x86_gcc=".* sse42 .* %s$" % f0, - x86_icc=diff, x86_iccw=diff - ) - self.expect_baseline(f1, - x86_gcc=".* avx .* %s$" % f1, - x86_icc=diff, x86_iccw=diff - ) - # in msvc, following features can't be separated too - for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): - for ff in f: - self.expect_baseline(ff, - x86_msvc=".*%s" % ' '.join(f) - ) - - # in ppc64le VSX and VSX2 can't be separated - self.expect_baseline("vsx", ppc64le="vsx vsx2") - # in aarch64 following features can't be separated - for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): - self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") - - def test_args_options(self): - # max & native - for o in ("max", "native"): - if o == "native" and self.cc_name() == "msvc": - continue - self.expect(o, - trap_files=".*cpu_(sse|vsx|neon|vx).c", - x86="", ppc64="", armhf="", s390x="" - ) - self.expect(o, - trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", - x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", - aarch64="", ppc64le="", s390x="vx" - ) - self.expect(o, - trap_files=".*cpu_(popcnt|vsx3).c", - x86="sse .* sse41", ppc64="vsx vsx2", - armhf="neon neon_fp16 .* asimd .*", - s390x="vx vxe vxe2" - ) - self.expect(o, - x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in icc, xop and fam4 aren't supported - x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", - # in msvc, avx512_knl avx512_knm aren't supported - x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", - armhf=".* asimd asimdhp asimddp .*", - ppc64="vsx vsx2 vsx3 vsx4.*", - s390x="vx vxe vxe2.*" - ) - # min - self.expect("min", - x86="sse sse2", x64="sse sse2 sse3", - armhf="", aarch64="neon neon_fp16 .* asimd", - ppc64="", ppc64le="vsx vsx2", s390x="" - ) - self.expect( - "min", trap_files=".*cpu_(sse2|vsx2).c", - x86="", ppc64le="" - ) - # an exception must triggered if native flag isn't supported - # when option "native" is activated through the args - try: - self.expect("native", - trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", - x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_flags(self): - self.expect_flags( - "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", - x86_gcc="-msse -msse2", x86_icc="-msse -msse2", - x86_iccw="/arch:SSE2", - x86_msvc="/arch:SSE2" if self.march() == "x86" else "", - ppc64_gcc= "-mcpu=power8", - ppc64_clang="-mcpu=power8", - armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", - aarch64="", - s390x="-mzvector -march=arch12" - ) - # testing normalize -march - self.expect_flags( - "asimd", - aarch64="", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" - ) - self.expect_flags( - "asimdhp", - aarch64_gcc=r"-march=armv8.2-a\+fp16", - armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" - ) - self.expect_flags( - "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" - ) - self.expect_flags( - # asimdfhm implies asimdhp - "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" - ) - self.expect_flags( - "asimddp asimdhp asimdfhm", - aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" - ) - self.expect_flags( - "vx vxe vxe2", - s390x=r"-mzvector -march=arch13" - ) - - def test_targets_exceptions(self): - for targets in ( - "bla bla", "/*@targets", - "/*@targets */", - "/*@targets unknown */", - "/*@targets $unknown_policy avx2 */", - "/*@targets #unknown_group avx2 */", - "/*@targets $ */", - "/*@targets # vsx */", - "/*@targets #$ vsx */", - "/*@targets vsx avx2 ) */", - "/*@targets vsx avx2 (avx2 */", - "/*@targets vsx avx2 () */", - "/*@targets vsx avx2 ($autovec) */", # no features - "/*@targets vsx avx2 (xxx) */", - "/*@targets vsx avx2 (baseline) */", - ) : - try: - self.expect_targets( - targets, - x86="", armhf="", ppc64="", s390x="" - ) - if self.march() != "unknown": - raise AssertionError( - "excepted an exception for %s" % self.march() - ) - except DistutilsError: - if self.march() == "unknown": - raise AssertionError("excepted no exceptions") - - def test_targets_syntax(self): - for targets in ( - "/*@targets $keep_baseline sse vsx neon vx*/", - "/*@targets,$keep_baseline,sse,vsx,neon vx*/", - "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", - """ - /* - ** @targets - ** $keep_baseline, sse vsx,neon, vx - */ - """, - """ - /* - ************@targets**************** - ** $keep_baseline, sse vsx, neon, vx - ************************************ - */ - """, - """ - /* - /////////////@targets///////////////// - //$keep_baseline//sse//vsx//neon//vx - ///////////////////////////////////// - */ - """, - """ - /* - @targets - $keep_baseline - SSE VSX NEON VX*/ - """ - ) : - self.expect_targets(targets, - x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" - ) - - def test_targets(self): - # test skipping baseline features - self.expect_targets( - """ - /*@targets - sse sse2 sse41 avx avx2 avx512f - vsx vsx2 vsx3 vsx4 - neon neon_fp16 asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="avx vsx2 asimd vx vxe", - x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", - s390x="vxe2" - ) - # test skipping non-dispatch features - self.expect_targets( - """ - /*@targets - sse41 avx avx2 avx512f - vsx2 vsx3 vsx4 - asimd asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", - x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" - ) - # test skipping features that not supported - self.expect_targets( - """ - /*@targets - sse2 sse41 avx2 avx512f - vsx2 vsx3 vsx4 - neon asimdhp asimddp - vx vxe vxe2 - */ - """, - baseline="", - trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", - x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", - s390x="vxe vx" - ) - # test skipping features that implies each other - self.expect_targets( - """ - /*@targets - sse sse2 avx fma3 avx2 avx512f avx512cd - vsx vsx2 vsx3 - neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp - asimddp asimdfhm - */ - """, - baseline="", - x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", - x86_msvc="avx512cd avx2 avx sse2", - x86_icc="avx512cd avx2 avx sse2", - x86_iccw="avx512cd avx2 avx sse2", - ppc64="vsx3 vsx2 vsx", - ppc64le="vsx3 vsx2", - armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", - aarch64="asimdfhm asimddp asimdhp asimd" - ) - - def test_targets_policies(self): - # 'keep_baseline', generate objects for baseline features - self.expect_targets( - """ - /*@targets - $keep_baseline - sse2 sse42 avx2 avx512f - vsx2 vsx3 - neon neon_vfpv4 asimd asimddp - vx vxe vxe2 - */ - """, - baseline="sse41 avx2 vsx2 asimd vsx3 vxe", - x86="avx512f avx2 sse42 sse2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd", - s390x="vxe2 vxe vx" - ) - # 'keep_sort', leave the sort as-is - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort - avx512f sse42 avx2 sse2 - vsx2 vsx3 - asimd neon neon_vfpv4 asimddp - vxe vxe2 - */ - """, - x86="avx512f sse42 avx2 sse2", - ppc64="vsx2 vsx3", - armhf="asimd neon neon_vfpv4 asimddp", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimd asimddp", - s390x="vxe vxe2" - ) - # 'autovec', skipping features that can't be - # vectorized by the compiler - self.expect_targets( - """ - /*@targets - $keep_baseline $keep_sort $autovec - avx512f avx2 sse42 sse41 sse2 - vsx3 vsx2 - asimddp asimd neon_vfpv4 neon - */ - """, - x86_gcc="avx512f avx2 sse42 sse41 sse2", - x86_icc="avx512f avx2 sse42 sse41 sse2", - x86_iccw="avx512f avx2 sse42 sse41 sse2", - x86_msvc="avx512f avx2 sse2" - if self.march() == 'x86' else "avx512f avx2", - ppc64="vsx3 vsx2", - armhf="asimddp asimd neon_vfpv4 neon", - # neon, neon_vfpv4, asimd implies each other - aarch64="asimddp asimd" - ) - for policy in ("$maxopt", "$autovec"): - # 'maxopt' and autovec set the max acceptable optimization flags - self.expect_target_flags( - "/*@targets baseline %s */" % policy, - gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, - iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, - unknown={"baseline":".*"} - ) - - # 'werror', force compilers to treat warnings as errors - self.expect_target_flags( - "/*@targets baseline $werror */", - gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, - iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, - unknown={"baseline":".*"} - ) - - def test_targets_groups(self): - self.expect_targets( - """ - /*@targets $keep_baseline baseline #test_group */ - """, - groups=dict( - test_group=(""" - $keep_baseline - asimddp sse2 vsx2 avx2 vsx3 - avx512f asimdhp - """) - ), - x86="avx512f avx2 sse2 baseline", - ppc64="vsx3 vsx2 baseline", - armhf="asimddp asimdhp baseline" - ) - # test skip duplicating and sorting - self.expect_targets( - """ - /*@targets - * sse42 avx avx512f - * #test_group_1 - * vsx2 - * #test_group_2 - * asimddp asimdfhm - */ - """, - groups=dict( - test_group_1=(""" - VSX2 vsx3 asimd avx2 SSE41 - """), - test_group_2=(""" - vsx2 vsx3 asImd aVx2 sse41 - """) - ), - x86="avx512f avx2 avx sse42 sse41", - ppc64="vsx3 vsx2", - # vsx2 part of the default baseline of ppc64le, option ("min") - ppc64le="vsx3", - armhf="asimdfhm asimddp asimd", - # asimd part of the default baseline of aarch64, option ("min") - aarch64="asimdfhm asimddp" - ) - - def test_targets_multi(self): - self.expect_targets( - """ - /*@targets - (avx512_clx avx512_cnl) (asimdhp asimddp) - */ - """, - x86=r"\(avx512_clx avx512_cnl\)", - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and auto-sort - self.expect_targets( - """ - /*@targets - f16c (sse41 avx sse42) (sse3 avx2 avx512f) - vsx2 (vsx vsx3 vsx2) - (neon neon_vfpv4 asimd asimdhp asimddp) - */ - """, - x86="avx512f f16c avx", - ppc64="vsx3 vsx2", - ppc64le="vsx3", # vsx2 part of baseline - armhf=r"\(asimdhp asimddp\)", - ) - # test skipping implied features and keep sort - self.expect_targets( - """ - /*@targets $keep_sort - (sse41 avx sse42) (sse3 avx2 avx512f) - (vsx vsx3 vsx2) - (asimddp neon neon_vfpv4 asimd asimdhp) - (vx vxe vxe2) - */ - """, - x86="avx avx512f", - ppc64="vsx3", - armhf=r"\(asimdhp asimddp\)", - s390x="vxe2" - ) - # test compiler variety and avoiding duplicating - self.expect_targets( - """ - /*@targets $keep_sort - fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 - */ - """, - x86_gcc=r"fma3 avx2 \(fma3 avx2\)", - x86_icc="avx2", x86_iccw="avx2", - x86_msvc="avx2" - ) - -def new_test(arch, cc): - if is_standalone: return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): - arch = '{arch}' - cc = '{cc}' - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self.setup_class() - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) - return textwrap.dedent("""\ - class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): - arch = '{arch}' - cc = '{cc}' - """).format( - class_name=arch + '_' + cc, arch=arch, cc=cc - ) -""" -if 1 and is_standalone: - FakeCCompilerOpt.fake_info = "x86_icc" - cco = FakeCCompilerOpt(None, cpu_baseline="avx2") - print(' '.join(cco.cpu_baseline_names())) - print(cco.cpu_baseline_flags()) - unittest.main() - sys.exit() -""" -for arch, compilers in arch_compilers.items(): - for cc in compilers: - exec(new_test(arch, cc)) - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py deleted file mode 100644 index d9e8b2b0a834..000000000000 --- a/numpy/distutils/tests/test_ccompiler_opt_conf.py +++ /dev/null @@ -1,176 +0,0 @@ -import unittest -from os import sys, path - -is_standalone = __name__ == '__main__' and __package__ is None -if is_standalone: - sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) - from ccompiler_opt import CCompilerOpt -else: - from numpy.distutils.ccompiler_opt import CCompilerOpt - -arch_compilers = dict( - x86 = ("gcc", "clang", "icc", "iccw", "msvc"), - x64 = ("gcc", "clang", "icc", "iccw", "msvc"), - ppc64 = ("gcc", "clang"), - ppc64le = ("gcc", "clang"), - armhf = ("gcc", "clang"), - aarch64 = ("gcc", "clang"), - narch = ("gcc",) -) - -class FakeCCompilerOpt(CCompilerOpt): - fake_info = ("arch", "compiler", "extra_args") - def __init__(self, *args, **kwargs): - CCompilerOpt.__init__(self, None, **kwargs) - def dist_compile(self, sources, flags, **kwargs): - return sources - def dist_info(self): - return FakeCCompilerOpt.fake_info - @staticmethod - def dist_log(*args, stderr=False): - pass - -class _TestConfFeatures(FakeCCompilerOpt): - """A hook to check the sanity of configured features -- before it called by the abstract class '_Feature' - """ - - def conf_features_partial(self): - conf_all = self.conf_features - for feature_name, feature in conf_all.items(): - self.test_feature( - "attribute conf_features", - conf_all, feature_name, feature - ) - - conf_partial = FakeCCompilerOpt.conf_features_partial(self) - for feature_name, feature in conf_partial.items(): - self.test_feature( - "conf_features_partial()", - conf_partial, feature_name, feature - ) - return conf_partial - - def test_feature(self, log, search_in, feature_name, feature_dict): - error_msg = ( - "during validate '{}' within feature '{}', " - "march '{}' and compiler '{}'\n>> " - ).format(log, feature_name, self.cc_march, self.cc_name) - - if not feature_name.isupper(): - raise AssertionError(error_msg + "feature name must be in uppercase") - - for option, val in feature_dict.items(): - self.test_option_types(error_msg, option, val) - self.test_duplicates(error_msg, option, val) - - self.test_implies(error_msg, search_in, feature_name, feature_dict) - self.test_group(error_msg, search_in, feature_name, feature_dict) - self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) - - def test_option_types(self, error_msg, option, val): - for tp, available in ( - ((str, list), ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - )), - ((str,), ("disable",)), - ((int,), ("interest",)), - ((bool,), ("implies_detect",)), - ((bool, type(None)), ("autovec",)), - ) : - found_it = option in available - if not found_it: - continue - if not isinstance(val, tp): - error_tp = [t.__name__ for t in (*tp,)] - error_tp = ' or '.join(error_tp) - raise AssertionError(error_msg + - "expected '%s' type for option '%s' not '%s'" % ( - error_tp, option, type(val).__name__ - )) - break - - if not found_it: - raise AssertionError(error_msg + "invalid option name '%s'" % option) - - def test_duplicates(self, error_msg, option, val): - if option not in ( - "implies", "headers", "flags", "group", "detect", "extra_checks" - ) : return - - if isinstance(val, str): - val = val.split() - - if len(val) != len(set(val)): - raise AssertionError(error_msg + "duplicated values in option '%s'" % option) - - def test_implies(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - implies = feature_dict.get("implies", "") - if not implies: - return - if isinstance(implies, str): - implies = implies.split() - - if feature_name in implies: - raise AssertionError(error_msg + "feature implies itself") - - for impl in implies: - impl_dict = search_in.get(impl) - if impl_dict is not None: - if "disable" in impl_dict: - raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) - continue - raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) - - def test_group(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - group = feature_dict.get("group", "") - if not group: - return - if isinstance(group, str): - group = group.split() - - for f in group: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'group', '%s' already exists as a feature name" % f - ) - - def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): - if feature_dict.get("disabled") is not None: - return - extra_checks = feature_dict.get("extra_checks", "") - if not extra_checks: - return - if isinstance(extra_checks, str): - extra_checks = extra_checks.split() - - for f in extra_checks: - impl_dict = search_in.get(f) - if not impl_dict or "disable" in impl_dict: - continue - raise AssertionError(error_msg + - "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f - ) - -class TestConfFeatures(unittest.TestCase): - def __init__(self, methodName="runTest"): - unittest.TestCase.__init__(self, methodName) - self._setup() - - def _setup(self): - FakeCCompilerOpt.conf_nocache = True - - def test_features(self): - for arch, compilers in arch_compilers.items(): - for cc in compilers: - FakeCCompilerOpt.fake_info = (arch, cc, "") - _TestConfFeatures() - -if is_standalone: - unittest.main() diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py deleted file mode 100644 index 749523528e63..000000000000 --- a/numpy/distutils/tests/test_exec_command.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import pytest -import sys -from tempfile import TemporaryFile - -from numpy.distutils import exec_command -from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_, IS_WASM - - -# In python 3 stdout, stderr are text (unicode compliant) devices, so to -# emulate them import StringIO from the io module. -from io import StringIO - -class redirect_stdout: - """Context manager to redirect stdout for exec_command test.""" - def __init__(self, stdout=None): - self._stdout = stdout or sys.stdout - - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self._stdout - - def __exit__(self, exc_type, exc_value, traceback): - self._stdout.flush() - sys.stdout = self.old_stdout - # note: closing sys.stdout won't close it. - self._stdout.close() - -class redirect_stderr: - """Context manager to redirect stderr for exec_command test.""" - def __init__(self, stderr=None): - self._stderr = stderr or sys.stderr - - def __enter__(self): - self.old_stderr = sys.stderr - sys.stderr = self._stderr - - def __exit__(self, exc_type, exc_value, traceback): - self._stderr.flush() - sys.stderr = self.old_stderr - # note: closing sys.stderr won't close it. - self._stderr.close() - -class emulate_nonposix: - """Context manager to emulate os.name != 'posix' """ - def __init__(self, osname='non-posix'): - self._new_name = osname - - def __enter__(self): - self._old_name = os.name - os.name = self._new_name - - def __exit__(self, exc_type, exc_value, traceback): - os.name = self._old_name - - -def test_exec_command_stdout(): - # Regression test for gh-2999 and gh-2915. - # There are several packages (nose, scipy.weave.inline, Sage inline - # Fortran) that replace stdout, in which case it doesn't have a fileno - # method. This is tested here, with a do-nothing command that fails if the - # presence of fileno() is assumed in exec_command. - - # The code has a special case for posix systems, so if we are on posix test - # both that the special case works and that the generic code works. - - # Test posix version: - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(StringIO()): - with redirect_stderr(TemporaryFile()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - -def test_exec_command_stderr(): - # Test posix version: - with redirect_stdout(TemporaryFile(mode='w+')): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - if os.name == 'posix': - # Test general (non-posix) version: - with emulate_nonposix(): - with redirect_stdout(TemporaryFile()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - exec_command.exec_command("cd '.'") - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -class TestExecCommand: - def setup_method(self): - self.pyexe = get_pythonexe() - - def check_nt(self, **kws): - s, o = exec_command.exec_command('cmd /C echo path=%path%') - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - assert_(s == 0) - assert_(o == 'win32') - - def check_posix(self, **kws): - s, o = exec_command.exec_command("echo Hello", **kws) - assert_(s == 0) - assert_(o == 'Hello') - - s, o = exec_command.exec_command('echo $AAA', **kws) - assert_(s == 0) - assert_(o == '') - - s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - assert_(s == 0) - assert_(o == 'Tere') - - s, o = exec_command.exec_command('echo "$AAA"', **kws) - assert_(s == 0) - assert_(o == '') - - if 'BBB' not in os.environ: - os.environ['BBB'] = 'Hi' - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - assert_(s == 0) - assert_(o == 'Hey') - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == 'Hi') - - del os.environ['BBB'] - - s, o = exec_command.exec_command('echo "$BBB"', **kws) - assert_(s == 0) - assert_(o == '') - - - s, o = exec_command.exec_command('this_is_not_a_command', **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command('echo path=$PATH', **kws) - assert_(s == 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'posix') - - def check_basic(self, *kws): - s, o = exec_command.exec_command( - '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - assert_(s != 0) - assert_(o != '') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.stderr.write(\'0\');' - 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % - self.pyexe, **kws) - assert_(s == 0) - assert_(o == '012') - - s, o = exec_command.exec_command( - '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - assert_(s == 15) - assert_(o == '') - - s, o = exec_command.exec_command( - '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - assert_(s == 0) - assert_(o == 'Heipa') - - def check_execute_in(self, **kws): - with tempdir() as tmpdir: - fn = "file" - tmpfile = os.path.join(tmpdir, fn) - with open(tmpfile, 'w') as f: - f.write('Hello') - - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % - (self.pyexe, fn), **kws) - assert_(s != 0) - assert_(o != '') - s, o = exec_command.exec_command( - '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' - 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - assert_(s == 0) - assert_(o == 'Hello') - - def test_basic(self): - with redirect_stdout(StringIO()): - with redirect_stderr(StringIO()): - with pytest.warns(DeprecationWarning): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py deleted file mode 100644 index dd97f1e72afc..000000000000 --- a/numpy/distutils/tests/test_fcompiler.py +++ /dev/null @@ -1,43 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -customizable_flags = [ - ('f77', 'F77FLAGS'), - ('f90', 'F90FLAGS'), - ('free', 'FREEFLAGS'), - ('arch', 'FARCH'), - ('debug', 'FDEBUG'), - ('flags', 'FFLAGS'), - ('linker_so', 'LDFLAGS'), -] - - -def test_fcompiler_flags(monkeypatch): - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') - flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - assert_(new_flags == [new_flag]) - - monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') - - for opt, envvar in customizable_flags: - new_flag = '-dummy-{}-flag'.format(opt) - prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) - new_flags = getattr(flag_vars, opt) - - monkeypatch.delenv(envvar) - if prev_flags is None: - assert_(new_flags == [new_flag]) - else: - assert_(new_flags == prev_flags + [new_flag]) - diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py deleted file mode 100644 index 0817ae58c214..000000000000 --- a/numpy/distutils/tests/test_fcompiler_gnu.py +++ /dev/null @@ -1,55 +0,0 @@ -from numpy.testing import assert_ - -import numpy.distutils.fcompiler - -g77_version_strings = [ - ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), - ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), - ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), - ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), - ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' - ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), -] - -gfortran_version_strings = [ - ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', - '4.0.3'), - ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), - ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), - ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), - ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), - ('4.8.0', '4.8.0'), - ('4.0.3-7', '4.0.3'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", - '4.9.1'), - ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" - "gfortran: warning: yet another warning\n4.9.1", - '4.9.1'), - ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') -] - -class TestG77Versions: - def test_g77_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, version in g77_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_g77(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') - for vs, _ in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) - -class TestGFortranVersions: - def test_gfortran_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, version in gfortran_version_strings: - v = fc.version_match(vs) - assert_(v == version, (vs, v)) - - def test_not_gfortran(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') - for vs, _ in g77_version_strings: - v = fc.version_match(vs) - assert_(v is None, (vs, v)) diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py deleted file mode 100644 index 45c9cdac1910..000000000000 --- a/numpy/distutils/tests/test_fcompiler_intel.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy.distutils.fcompiler -from numpy.testing import assert_ - - -intel_32bit_version_strings = [ - ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" - "running on Intel(R) 32, Version 11.1", '11.1'), -] - -intel_64bit_version_strings = [ - ("Intel(R) Fortran IA-64 Compiler Professional for applications" - "running on IA-64, Version 11.0", '11.0'), - ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" - "running on Intel(R) 64, Version 11.1", '11.1') -] - -class TestIntelFCompilerVersions: - def test_32bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') - for vs, version in intel_32bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) - - -class TestIntelEM64TFCompilerVersions: - def test_64bit_version(self): - fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') - for vs, version in intel_64bit_version_strings: - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py deleted file mode 100644 index 2e04f5266dc1..000000000000 --- a/numpy/distutils/tests/test_fcompiler_nagfor.py +++ /dev/null @@ -1,22 +0,0 @@ -from numpy.testing import assert_ -import numpy.distutils.fcompiler - -nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' - '6.2(Chiyoda) Build 6200', '6.2'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.1(Tozai) Build 6136', '6.1'), - ('nagfor', 'NAG Fortran Compiler Release ' - '6.0(Hibiya) Build 1021', '6.0'), - ('nagfor', 'NAG Fortran Compiler Release ' - '5.3.2(971)', '5.3.2'), - ('nag', 'NAGWare Fortran 95 compiler Release 5.1' - '(347,355-367,375,380-383,389,394,399,401-402,407,' - '431,435,437,446,459-460,463,472,494,496,503,508,' - '511,517,529,555,557,565)', '5.1')] - -class TestNagFCompilerVersions: - def test_version_match(self): - for comp, vs, version in nag_version_strings: - fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) - v = fc.version_match(vs) - assert_(v == version) diff --git a/numpy/distutils/tests/test_from_template.py b/numpy/distutils/tests/test_from_template.py deleted file mode 100644 index 588175496299..000000000000 --- a/numpy/distutils/tests/test_from_template.py +++ /dev/null @@ -1,44 +0,0 @@ - -from numpy.distutils.from_template import process_str -from numpy.testing import assert_equal - - -pyf_src = """ -python module foo - <_rd=real,double precision> - interface - subroutine foosub(tol) - <_rd>, intent(in,out) :: tol - end subroutine foosub - end interface -end python module foo -""" - -expected_pyf = """ -python module foo - interface - subroutine sfoosub(tol) - real, intent(in,out) :: tol - end subroutine sfoosub - subroutine dfoosub(tol) - double precision, intent(in,out) :: tol - end subroutine dfoosub - end interface -end python module foo -""" - - -def normalize_whitespace(s): - """ - Remove leading and trailing whitespace, and convert internal - stretches of whitespace to a single space. - """ - return ' '.join(s.split()) - - -def test_from_template(): - """Regression test for gh-10712.""" - pyf = process_str(pyf_src) - normalized_pyf = normalize_whitespace(pyf) - normalized_expected_pyf = normalize_whitespace(expected_pyf) - assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/numpy/distutils/tests/test_log.py b/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f..000000000000 --- a/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py deleted file mode 100644 index c4eac7b72de1..000000000000 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ /dev/null @@ -1,47 +0,0 @@ -import shutil -import subprocess -import sys -import pytest -import os -import sysconfig - -from numpy.distutils import mingw32ccompiler - - -@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') -@pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), - reason="test requires mingw library layout") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') -def test_build_import(): - '''Test the mingw32ccompiler.build_import_library, which builds a - `python.a` from the MSVC `python.lib` - ''' - - # make sure `nm.exe` exists and supports the current python version. This - # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit - try: - out = subprocess.check_output(['nm.exe', '--help']) - except FileNotFoundError: - pytest.skip("'nm.exe' not on path, is mingw installed?") - supported = out[out.find(b'supported targets:'):] - if sys.maxsize < 2**32: - if b'pe-i386' not in supported: - raise ValueError("'nm.exe' found but it does not support 32-bit " - "dlls when using 32-bit python. Supported " - "formats: '%s'" % supported) - elif b'pe-x86-64' not in supported: - raise ValueError("'nm.exe' found but it does not support 64-bit " - "dlls when using 64-bit python. Supported " - "formats: '%s'" % supported) - # Hide the import library to force a build - has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() - if has_import_lib: - shutil.move(fullpath, fullpath + '.bak') - - try: - # Whew, now we can actually test the function - mingw32ccompiler.build_import_library() - - finally: - if has_import_lib: - shutil.move(fullpath + '.bak', fullpath) diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py deleted file mode 100644 index 40e7606eeb76..000000000000 --- a/numpy/distutils/tests/test_misc_util.py +++ /dev/null @@ -1,88 +0,0 @@ -from os.path import join, sep, dirname - -import pytest - -from numpy.distutils.misc_util import ( - appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info - ) -from numpy.testing import ( - assert_, assert_equal, IS_EDITABLE - ) - -ajoin = lambda *paths: join(*((sep,)+paths)) - -class TestAppendpath: - - def test_1(self): - assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) - assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) - assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) - assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) - - def test_2(self): - assert_equal(appendpath('prefix/sub', 'name'), - join('prefix', 'sub', 'name')) - assert_equal(appendpath('prefix/sub', 'sup/name'), - join('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub', '/prefix/name'), - ajoin('prefix', 'sub', 'name')) - - def test_3(self): - assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), - ajoin('prefix', 'sub', 'sup', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) - assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), - ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) - -class TestMinrelpath: - - def test_1(self): - n = lambda path: path.replace('/', sep) - assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) - assert_equal(minrelpath('..'), '..') - assert_equal(minrelpath(n('aa/..')), '') - assert_equal(minrelpath(n('aa/../bb')), 'bb') - assert_equal(minrelpath(n('aa/bb/..')), 'aa') - assert_equal(minrelpath(n('aa/bb/../..')), '') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) - assert_equal(minrelpath(n('.././..')), n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) - -class TestGpaths: - - def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__), '..')) - ls = gpaths('command/*.py', local_path) - assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) - f = gpaths('system_info.py', local_path) - assert_(join(local_path, 'system_info.py') == f[0], repr(f)) - -class TestSharedExtension: - - def test_get_shared_lib_extension(self): - import sys - ext = get_shared_lib_extension(is_python_ext=False) - if sys.platform.startswith('linux'): - assert_equal(ext, '.so') - elif sys.platform.startswith('gnukfreebsd'): - assert_equal(ext, '.so') - elif sys.platform.startswith('darwin'): - assert_equal(ext, '.dylib') - elif sys.platform.startswith('win'): - assert_equal(ext, '.dll') - # just check for no crash - assert_(get_shared_lib_extension(is_python_ext=True)) - - -@pytest.mark.skipif( - IS_EDITABLE, - reason="`get_info` .ini lookup method incompatible with editable install" -) -def test_installed_npymath_ini(): - # Regression test for gh-7707. If npymath.ini wasn't installed, then this - # will give an error. - info = get_info('npymath') - - assert isinstance(info, dict) - assert "define_macros" in info diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py deleted file mode 100644 index b287ebe2e832..000000000000 --- a/numpy/distutils/tests/test_npy_pkg_config.py +++ /dev/null @@ -1,84 +0,0 @@ -import os - -from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import temppath, assert_ - -simple = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[default] -cflags = -I/usr/include -libs = -L/usr/lib -""" -simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', - 'version': '0.1', 'name': 'foo'} - -simple_variable = """\ -[meta] -Name = foo -Description = foo lib -Version = 0.1 - -[variables] -prefix = /foo/bar -libdir = ${prefix}/lib -includedir = ${prefix}/include - -[default] -cflags = -I${includedir} -libs = -L${libdir} -""" -simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', - 'version': '0.1', 'name': 'foo'} - -class TestLibraryInfo: - def test_simple(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_d['cflags']) - assert_(out.libs() == simple_d['libflags']) - assert_(out.name == simple_d['name']) - assert_(out.version == simple_d['version']) - - def test_simple_variable(self): - with temppath('foo.ini') as path: - with open(path, 'w') as f: - f.write(simple_variable) - pkg = os.path.splitext(path)[0] - out = read_config(pkg) - - assert_(out.cflags() == simple_variable_d['cflags']) - assert_(out.libs() == simple_variable_d['libflags']) - assert_(out.name == simple_variable_d['name']) - assert_(out.version == simple_variable_d['version']) - out.vars['prefix'] = '/Users/david' - assert_(out.cflags() == '-I/Users/david/include') - -class TestParseFlags: - def test_simple_cflags(self): - d = parse_flags("-I/usr/include") - assert_(d['include_dirs'] == ['/usr/include']) - - d = parse_flags("-I/usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - d = parse_flags("-I /usr/include -DFOO") - assert_(d['include_dirs'] == ['/usr/include']) - assert_(d['macros'] == ['FOO']) - - def test_simple_lflags(self): - d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) - - d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - assert_(d['libraries'] == ['foo', 'bar']) diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py deleted file mode 100644 index 696d38ddd66a..000000000000 --- a/numpy/distutils/tests/test_shell_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -import subprocess -import json -import sys - -from numpy.distutils import _shell_utils -from numpy.testing import IS_WASM - -argv_cases = [ - [r'exe'], - [r'path/exe'], - [r'path\exe'], - [r'\\server\path\exe'], - [r'path to/exe'], - [r'path to\exe'], - - [r'exe', '--flag'], - [r'path/exe', '--flag'], - [r'path\exe', '--flag'], - [r'path to/exe', '--flag'], - [r'path to\exe', '--flag'], - - # flags containing literal quotes in their name - [r'path to/exe', '--flag-"quoted"'], - [r'path to\exe', '--flag-"quoted"'], - [r'path to/exe', '"--flag-quoted"'], - [r'path to\exe', '"--flag-quoted"'], -] - - -@pytest.fixture(params=[ - _shell_utils.WindowsParser, - _shell_utils.PosixParser -]) -def Parser(request): - return request.param - - -@pytest.fixture -def runner(Parser): - if Parser != _shell_utils.NativeParser: - pytest.skip('Unable to run with non-native parser') - - if Parser == _shell_utils.WindowsParser: - return lambda cmd: subprocess.check_output(cmd) - elif Parser == _shell_utils.PosixParser: - # posix has no non-shell string parsing - return lambda cmd: subprocess.check_output(cmd, shell=True) - else: - raise NotImplementedError - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_join_matches_subprocess(Parser, runner, argv): - """ - Test that join produces strings understood by subprocess - """ - # invoke python to return its arguments as json - cmd = [ - sys.executable, '-c', - 'import json, sys; print(json.dumps(sys.argv[1:]))' - ] - joined = Parser.join(cmd + argv) - json_out = runner(joined).decode() - assert json.loads(json_out) == argv - - -@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.parametrize('argv', argv_cases) -def test_roundtrip(Parser, argv): - """ - Test that split is the inverse operation of join - """ - try: - joined = Parser.join(argv) - assert argv == Parser.split(joined) - except NotImplementedError: - pytest.skip("Not implemented") diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py deleted file mode 100644 index 5887abea76bd..000000000000 --- a/numpy/distutils/tests/test_system_info.py +++ /dev/null @@ -1,334 +0,0 @@ -import os -import shutil -import pytest -from tempfile import mkstemp, mkdtemp -from subprocess import Popen, PIPE -import importlib.metadata -from distutils.errors import DistutilsError - -from numpy.testing import assert_, assert_equal, assert_raises -from numpy.distutils import ccompiler, customized_ccompiler -from numpy.distutils.system_info import system_info, ConfigParser, mkl_info -from numpy.distutils.system_info import AliasedOptionError -from numpy.distutils.system_info import default_lib_dirs, default_include_dirs -from numpy.distutils import _shell_utils - - -try: - if importlib.metadata.version('setuptools') >= '60': - # pkg-resources gives deprecation warnings, and there may be more - # issues. We only support setuptools <60 - pytest.skip("setuptools is too new", allow_module_level=True) -except importlib.metadata.PackageNotFoundError: - # we don't require `setuptools`; if it is not found, continue - pass - - -def get_class(name, notfound_action=1): - """ - notfound_action: - 0 - do nothing - 1 - display warning message - 2 - raise error - """ - cl = {'temp1': Temp1Info, - 'temp2': Temp2Info, - 'duplicate_options': DuplicateOptionInfo, - }.get(name.lower(), _system_info) - return cl() - -simple_site = """ -[ALL] -library_dirs = {dir1:s}{pathsep:s}{dir2:s} -libraries = {lib1:s},{lib2:s} -extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os -runtime_library_dirs = {dir1:s} - -[temp1] -library_dirs = {dir1:s} -libraries = {lib1:s} -runtime_library_dirs = {dir1:s} - -[temp2] -library_dirs = {dir2:s} -libraries = {lib2:s} -extra_link_args = -Wl,-rpath={lib2_escaped:s} -rpath = {dir2:s} - -[duplicate_options] -mylib_libs = {lib1:s} -libraries = {lib2:s} -""" -site_cfg = simple_site - -fakelib_c_text = """ -/* This file is generated from numpy/distutils/testing/test_system_info.py */ -#include -void foo(void) { - printf("Hello foo"); -} -void bar(void) { - printf("Hello bar"); -} -""" - -def have_compiler(): - """ Return True if there appears to be an executable compiler - """ - compiler = customized_ccompiler() - try: - cmd = compiler.compiler # Unix compilers - except AttributeError: - try: - if not compiler.initialized: - compiler.initialize() # MSVC is different - except (DistutilsError, ValueError): - return False - cmd = [compiler.cc] - try: - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - p.stdout.close() - p.stderr.close() - p.wait() - except OSError: - return False - return True - - -HAVE_COMPILER = have_compiler() - - -class _system_info(system_info): - - def __init__(self, - default_lib_dirs=default_lib_dirs, - default_include_dirs=default_include_dirs, - verbosity=1, - ): - self.__class__.info = {} - self.local_prefixes = [] - defaults = {'library_dirs': '', - 'include_dirs': '', - 'runtime_library_dirs': '', - 'rpath': '', - 'src_dirs': '', - 'search_static_first': "0", - 'extra_compile_args': '', - 'extra_link_args': ''} - self.cp = ConfigParser(defaults) - # We have to parse the config files afterwards - # to have a consistent temporary filepath - - def _check_libs(self, lib_dirs, libs, opt_libs, exts): - """Override _check_libs to return with all dirs """ - info = {'libraries': libs, 'library_dirs': lib_dirs} - return info - - -class Temp1Info(_system_info): - """For testing purposes""" - section = 'temp1' - - -class Temp2Info(_system_info): - """For testing purposes""" - section = 'temp2' - -class DuplicateOptionInfo(_system_info): - """For testing purposes""" - section = 'duplicate_options' - - -class TestSystemInfoReading: - - def setup_method(self): - """ Create the libraries """ - # Create 2 sources and 2 libraries - self._dir1 = mkdtemp() - self._src1 = os.path.join(self._dir1, 'foo.c') - self._lib1 = os.path.join(self._dir1, 'libfoo.so') - self._dir2 = mkdtemp() - self._src2 = os.path.join(self._dir2, 'bar.c') - self._lib2 = os.path.join(self._dir2, 'libbar.so') - # Update local site.cfg - global simple_site, site_cfg - site_cfg = simple_site.format( - dir1=self._dir1, - lib1=self._lib1, - dir2=self._dir2, - lib2=self._lib2, - pathsep=os.pathsep, - lib2_escaped=_shell_utils.NativeParser.join([self._lib2]) - ) - # Write site.cfg - fd, self._sitecfg = mkstemp() - os.close(fd) - with open(self._sitecfg, 'w') as fd: - fd.write(site_cfg) - # Write the sources - with open(self._src1, 'w') as fd: - fd.write(fakelib_c_text) - with open(self._src2, 'w') as fd: - fd.write(fakelib_c_text) - # We create all class-instances - - def site_and_parse(c, site_cfg): - c.files = [site_cfg] - c.parse_config_files() - return c - self.c_default = site_and_parse(get_class('default'), self._sitecfg) - self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) - self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) - self.c_dup_options = site_and_parse(get_class('duplicate_options'), - self._sitecfg) - - def teardown_method(self): - # Do each removal separately - try: - shutil.rmtree(self._dir1) - except Exception: - pass - try: - shutil.rmtree(self._dir2) - except Exception: - pass - try: - os.remove(self._sitecfg) - except Exception: - pass - - def test_all(self): - # Read in all information in the ALL block - tsi = self.c_default - assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) - - def test_temp1(self): - # Read in all information in the temp1 block - tsi = self.c_temp1 - assert_equal(tsi.get_lib_dirs(), [self._dir1]) - assert_equal(tsi.get_libraries(), [self._lib1]) - assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) - - def test_temp2(self): - # Read in all information in the temp2 block - tsi = self.c_temp2 - assert_equal(tsi.get_lib_dirs(), [self._dir2]) - assert_equal(tsi.get_libraries(), [self._lib2]) - # Now from rpath and not runtime_library_dirs - assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) - extra = tsi.calc_extra_info() - assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - - def test_duplicate_options(self): - # Ensure that duplicates are raising an AliasedOptionError - tsi = self.c_dup_options - assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") - assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) - assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - def test_compile1(self): - # Compile source and link the first source - c = customized_ccompiler() - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir1) - c.compile([os.path.basename(self._src1)], output_dir=self._dir1) - # Ensure that the object exists - assert_(os.path.isfile(self._src1.replace('.c', '.o')) or - os.path.isfile(self._src1.replace('.c', '.obj'))) - finally: - os.chdir(previousDir) - - @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") - @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), - reason="Fails with MSVC compiler ") - def test_compile2(self): - # Compile source and link the second source - tsi = self.c_temp2 - c = customized_ccompiler() - extra_link_args = tsi.calc_extra_info()['extra_link_args'] - previousDir = os.getcwd() - try: - # Change directory to not screw up directories - os.chdir(self._dir2) - c.compile([os.path.basename(self._src2)], output_dir=self._dir2, - extra_postargs=extra_link_args) - # Ensure that the object exists - assert_(os.path.isfile(self._src2.replace('.c', '.o'))) - finally: - os.chdir(previousDir) - - HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) - - @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " - "numpy is built with MKL support")) - def test_overrides(self): - previousDir = os.getcwd() - cfg = os.path.join(self._dir1, 'site.cfg') - shutil.copy(self._sitecfg, cfg) - try: - os.chdir(self._dir1) - # Check that the '[ALL]' section does not override - # missing values from other sections - info = mkl_info() - lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) - assert info.get_lib_dirs() != lib_dirs - - # But if we copy the values to a '[mkl]' section the value - # is correct - with open(cfg) as fid: - mkl = fid.read().replace('[ALL]', '[mkl]', 1) - with open(cfg, 'w') as fid: - fid.write(mkl) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - - # Also, the values will be taken from a section named '[DEFAULT]' - with open(cfg) as fid: - dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) - with open(cfg, 'w') as fid: - fid.write(dflt) - info = mkl_info() - assert info.get_lib_dirs() == lib_dirs - finally: - os.chdir(previousDir) - - -def test_distutils_parse_env_order(monkeypatch): - from numpy.distutils.system_info import _parse_env_order - env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' - - base_order = list('abcdef') - - monkeypatch.setenv(env, 'b,i,e,f') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 3 - assert order == list('bef') - assert len(unknown) == 1 - - # For when LAPACK/BLAS optimization is disabled - monkeypatch.setenv(env, '') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 0 - assert len(unknown) == 0 - - for prefix in '^!': - monkeypatch.setenv(env, f'{prefix}b,i,e') - order, unknown = _parse_env_order(base_order, env) - assert len(order) == 4 - assert order == list('acdf') - assert len(unknown) == 1 - - with pytest.raises(ValueError): - monkeypatch.setenv(env, 'b,^e,i') - _parse_env_order(base_order, env) - - with pytest.raises(ValueError): - monkeypatch.setenv(env, '!b,^e,i') - _parse_env_order(base_order, env) diff --git a/numpy/distutils/tests/utilities.py b/numpy/distutils/tests/utilities.py deleted file mode 100644 index 5016a83d2164..000000000000 --- a/numpy/distutils/tests/utilities.py +++ /dev/null @@ -1,90 +0,0 @@ -# Kanged out of numpy.f2py.tests.util for test_build_ext -from numpy.testing import IS_WASM -import textwrap -import shutil -import tempfile -import os -import re -import subprocess -import sys - -# -# Check if compilers are available at all... -# - -_compiler_status = None - - -def _get_compiler_status(): - global _compiler_status - if _compiler_status is not None: - return _compiler_status - - _compiler_status = (False, False, False) - if IS_WASM: - # Can't run compiler from inside WASM. - return _compiler_status - - # XXX: this is really ugly. But I don't know how to invoke Distutils - # in a safer way... - code = textwrap.dedent( - f"""\ - import os - import sys - sys.path = {repr(sys.path)} - - def configuration(parent_name='',top_path=None): - global config - from numpy.distutils.misc_util import Configuration - config = Configuration('', parent_name, top_path) - return config - - from numpy.distutils.core import setup - setup(configuration=configuration) - - config_cmd = config.get_config_cmd() - have_c = config_cmd.try_compile('void foo() {{}}') - print('COMPILERS:%%d,%%d,%%d' %% (have_c, - config.have_f77c(), - config.have_f90c())) - sys.exit(99) - """ - ) - code = code % dict(syspath=repr(sys.path)) - - tmpdir = tempfile.mkdtemp() - try: - script = os.path.join(tmpdir, "setup.py") - - with open(script, "w") as f: - f.write(code) - - cmd = [sys.executable, "setup.py", "config"] - p = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir - ) - out, err = p.communicate() - finally: - shutil.rmtree(tmpdir) - - m = re.search(rb"COMPILERS:(\d+),(\d+),(\d+)", out) - if m: - _compiler_status = ( - bool(int(m.group(1))), - bool(int(m.group(2))), - bool(int(m.group(3))), - ) - # Finished - return _compiler_status - - -def has_c_compiler(): - return _get_compiler_status()[0] - - -def has_f77_compiler(): - return _get_compiler_status()[1] - - -def has_f90_compiler(): - return _get_compiler_status()[2] diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py deleted file mode 100644 index 4884960fdf22..000000000000 --- a/numpy/distutils/unixccompiler.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -unixccompiler - can handle very long argument lists for ar. - -""" -import os -import sys -import subprocess -import shlex - -from distutils.errors import CompileError, DistutilsExecError, LibError -from distutils.unixccompiler import UnixCCompiler -from numpy.distutils.ccompiler import replace_method -from numpy.distutils.misc_util import _commandline_dep_string -from numpy.distutils import log - -# Note that UnixCCompiler._compile appeared in Python 2.3 -def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): - """Compile a single source files with a Unix-style compiler.""" - # HP ad-hoc fix, see ticket 1383 - ccomp = self.compiler_so - if ccomp[0] == 'aCC': - # remove flags that will trigger ANSI-C mode for aCC - if '-Ae' in ccomp: - ccomp.remove('-Ae') - if '-Aa' in ccomp: - ccomp.remove('-Aa') - # add flags for (almost) sane C++ handling - ccomp += ['-AA'] - self.compiler_so = ccomp - # ensure OPT environment variable is read - if 'OPT' in os.environ: - # XXX who uses this? - from sysconfig import get_config_vars - opt = shlex.join(shlex.split(os.environ['OPT'])) - gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) - ccomp_s = shlex.join(self.compiler_so) - if opt not in ccomp_s: - ccomp_s = ccomp_s.replace(gcv_opt, opt) - self.compiler_so = shlex.split(ccomp_s) - llink_s = shlex.join(self.linker_so) - if opt not in llink_s: - self.linker_so = self.linker_so + shlex.split(opt) - - display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) - - # gcc style automatic dependencies, outputs a makefile (-MF) that lists - # all headers needed by a c file as a side effect of compilation (-MMD) - if getattr(self, '_auto_depends', False): - deps = ['-MMD', '-MF', obj + '.d'] - else: - deps = [] - - try: - self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + - extra_postargs, display = display) - except DistutilsExecError as e: - msg = str(e) - raise CompileError(msg) from None - - # add commandline flags to dependency file - if deps: - # After running the compiler, the file created will be in EBCDIC - # but will not be tagged as such. This tags it so the file does not - # have multiple different encodings being written to it - if sys.platform == 'zos': - subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) - with open(obj + '.d', 'a') as f: - f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) - -replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) - - -def UnixCCompiler_create_static_lib(self, objects, output_libname, - output_dir=None, debug=0, target_lang=None): - """ - Build a static library in a separate sub-process. - - Parameters - ---------- - objects : list or tuple of str - List of paths to object files used to build the static library. - output_libname : str - The library name as an absolute or relative (if `output_dir` is used) - path. - output_dir : str, optional - The path to the output directory. Default is None, in which case - the ``output_dir`` attribute of the UnixCCompiler instance. - debug : bool, optional - This parameter is not used. - target_lang : str, optional - This parameter is not used. - - Returns - ------- - None - - """ - objects, output_dir = self._fix_object_args(objects, output_dir) - - output_filename = \ - self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - try: - # previous .a may be screwed up; best to remove it first - # and recreate. - # Also, ar on OS X doesn't handle updating universal archives - os.unlink(output_filename) - except OSError: - pass - self.mkpath(os.path.dirname(output_filename)) - tmp_objects = objects + self.objects - while tmp_objects: - objects = tmp_objects[:50] - tmp_objects = tmp_objects[50:] - display = '%s: adding %d object files to %s' % ( - os.path.basename(self.archiver[0]), - len(objects), output_filename) - self.spawn(self.archiver + [output_filename] + objects, - display = display) - - # Not many Unices required ranlib anymore -- SunOS 4.x is, I - # think the only major Unix that does. Maybe we need some - # platform intelligence here to skip ranlib if it's not - # needed -- or maybe Python's configure script took care of - # it for us, hence the check for leading colon. - if self.ranlib: - display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), - output_filename) - try: - self.spawn(self.ranlib + [output_filename], - display = display) - except DistutilsExecError as e: - msg = str(e) - raise LibError(msg) from None - else: - log.debug("skipping %s (up-to-date)", output_filename) - return - -replace_method(UnixCCompiler, 'create_static_lib', - UnixCCompiler_create_static_lib) diff --git a/numpy/f2py/_backends/__init__.py b/numpy/f2py/_backends/__init__.py index e91393c14be3..beb2bab2384d 100644 --- a/numpy/f2py/_backends/__init__.py +++ b/numpy/f2py/_backends/__init__.py @@ -2,8 +2,5 @@ def f2py_build_generator(name): if name == "meson": from ._meson import MesonBackend return MesonBackend - elif name == "distutils": - from ._distutils import DistutilsBackend - return DistutilsBackend else: raise ValueError(f"Unknown backend: {name}") diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py deleted file mode 100644 index 5c8f1092b568..000000000000 --- a/numpy/f2py/_backends/_distutils.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import shutil -import sys -import warnings - -from numpy.distutils.core import Extension, setup -from numpy.distutils.misc_util import dict_append -from numpy.distutils.system_info import get_info -from numpy.exceptions import VisibleDeprecationWarning - -from ._backend import Backend - - -class DistutilsBackend(Backend): - def __init__(sef, *args, **kwargs): - warnings.warn( - "\ndistutils has been deprecated since NumPy 1.26.x\n" - "Use the Meson backend instead, or generate wrappers" - " without -c and use a custom build script", - VisibleDeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - def compile(self): - num_info = {} - if num_info: - self.include_dirs.extend(num_info.get("include_dirs", [])) - ext_args = { - "name": self.modulename, - "sources": self.sources, - "include_dirs": self.include_dirs, - "library_dirs": self.library_dirs, - "libraries": self.libraries, - "define_macros": self.define_macros, - "undef_macros": self.undef_macros, - "extra_objects": self.extra_objects, - "f2py_options": self.f2py_flags, - } - - if self.sysinfo_flags: - for n in self.sysinfo_flags: - i = get_info(n) - if not i: - print( - f"No {n!r} resources found" - "in system (try `f2py --help-link`)" - ) - dict_append(ext_args, **i) - - ext = Extension(**ext_args) - - sys.argv = [sys.argv[0]] + self.setup_flags - sys.argv.extend( - [ - "build", - "--build-temp", - self.build_dir, - "--build-base", - self.build_dir, - "--build-platlib", - ".", - "--disable-optimization", - ] - ) - - if self.fc_flags: - sys.argv.extend(["config_fc"] + self.fc_flags) - if self.flib_flags: - sys.argv.extend(["build_ext"] + self.flib_flags) - - setup(ext_modules=[ext]) - - if self.remove_build_dir and os.path.exists(self.build_dir): - print(f"Removing build directory {self.build_dir}") - shutil.rmtree(self.build_dir) diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 7eb1697cc787..865cf4a3e6cd 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -36,15 +36,11 @@ def run(): has_f2py2e = 0 try: - import numpy.distutils - has_numpy_distutils = 2 - except ImportError: - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 if has_newnumpy: try: @@ -63,14 +59,9 @@ def run(): if has_numpy_distutils: try: - if has_numpy_distutils == 2: - print('Found numpy.distutils version %r in %r' % ( - numpy.distutils.__version__, - numpy.distutils.__file__)) - else: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) print('------') except Exception as msg: print('error:', msg) @@ -96,12 +87,8 @@ def run(): 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') print('------') try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.fcompiler ...', end=' ') - import numpy.distutils.fcompiler as fcompiler - else: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler print('ok') print('------') try: @@ -115,24 +102,18 @@ def run(): print('error:', msg) print('------') try: - if has_numpy_distutils == 2: - print('Importing numpy.distutils.cpuinfo ...', end=' ') - from numpy.distutils.cpuinfo import cpuinfo + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo print('ok') print('------') - else: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') cpu = cpuinfo() print('CPU information:', end=' ') for name in dir(cpuinfo): diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 84a5aa3c20a6..a1cce0fa9a74 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -36,7 +36,6 @@ # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess -MESON_ONLY_VER = (sys.version_info >= (3, 12)) __usage__ =\ f"""Usage: @@ -583,7 +582,7 @@ def preparse_sysargv(): sys.argv = [sys.argv[0]] + remaining_argv backend_key = args.backend - if MESON_ONLY_VER and backend_key == 'distutils': + if backend_key == 'distutils': outmess("Cannot use distutils backend with Python>=3.12," " using meson backend instead.\n") backend_key = "meson" @@ -656,36 +655,16 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] - if not (MESON_ONLY_VER or backend_key == 'meson'): - fc_flags.extend(distutils_flags) sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: v = '--fcompiler=' if s[:len(v)] == v: - if MESON_ONLY_VER or backend_key == 'meson': - outmess( - "--fcompiler cannot be used with meson," - "set compiler with the FC environment variable\n" - ) - else: - from numpy.distutils import fcompiler - fcompiler.load_all_fcompiler_classes() - allowed_keys = list(fcompiler.fcompiler_class.keys()) - nv = ov = s[len(v):].lower() - if ov not in allowed_keys: - vmap = {} # XXX - try: - nv = vmap[ov] - except KeyError: - if ov not in vmap.values(): - print(f'Unknown vendor: "{s[len(v):]}"') - nv = ov - i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv # noqa: B909 - continue + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) for s in del_list: i = flib_flags.index(s) del flib_flags[i] @@ -775,11 +754,7 @@ def validate_modulename(pyf_files, modulename='untitled'): def main(): if '--help-link' in sys.argv[1:]: sys.argv.remove('--help-link') - if MESON_ONLY_VER: - outmess("Use --dep for meson builds\n") - else: - from numpy.distutils.system_info import show_all - show_all() + outmess("Use --dep for meson builds\n") return if '-c' in sys.argv[1:]: diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 959e1527c482..65d33dded209 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -233,38 +233,6 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): assert "untitledmodule.c" in out -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') -def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): - """Check that no distutils imports are performed on 3.12 - CLI :: --fcompiler --help-link --backend distutils - """ - MNAME = "hi" - foutl = get_io_paths(hello_world_f90, mname=MNAME) - ipath = foutl.f90inp - monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() - ) - with util.switchdir(ipath.parent): - compiler_check_f2pycli() - out, _ = capfd.readouterr() - assert "--fcompiler cannot be used with meson" in out - monkeypatch.setattr( - sys, "argv", ["f2py", "--help-link"] - ) - with util.switchdir(ipath.parent): - f2pycli() - out, _ = capfd.readouterr() - assert "Use --dep for meson builds" in out - MNAME = "hi2" # Needs to be different for a new -c - monkeypatch.setattr( - sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() - ) - with util.switchdir(ipath.parent): - f2pycli() - out, _ = capfd.readouterr() - assert "Cannot use distutils backend with Python>=3.12" in out - - @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): """Tests that functions can be skipped diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index ed928a5ec7b4..8a89c464b49d 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -6,6 +6,7 @@ import contextlib import gc import importlib.metadata +import importlib.util import operator import os import pathlib @@ -1441,12 +1442,13 @@ def rundocs(filename=None, raise_on_error=True): """ import doctest - from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] name = os.path.splitext(os.path.basename(filename))[0] - m = exec_mod_from_location(name, filename) + spec = importlib.util.spec_from_file_location(name, filename) + m = importlib.util.module_from_spec(spec) + spec.loader.exec_module(m) tests = doctest.DocTestFinder().find(m) runner = doctest.DocTestRunner(verbose=False) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index f6fa8611e181..1e26cb18b60f 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -141,18 +141,6 @@ def test_NPY_NO_EXPORT(): "typing.mypy_plugin", "version", ]] -if sys.version_info < (3, 12): - PUBLIC_MODULES += [ - 'numpy.' + s for s in [ - "distutils", - "distutils.cpuinfo", - "distutils.exec_command", - "distutils.misc_util", - "distutils.log", - "distutils.system_info", - ] - ] - PUBLIC_ALIASED_MODULES = [ "numpy.char", @@ -202,67 +190,6 @@ def test_NPY_NO_EXPORT(): "random.bit_generator", "testing.print_coercion_tables", ]] -if sys.version_info < (3, 12): - PRIVATE_BUT_PRESENT_MODULES += [ - 'numpy.' + s for s in [ - "distutils.armccompiler", - "distutils.fujitsuccompiler", - "distutils.ccompiler", - 'distutils.ccompiler_opt', - "distutils.command", - "distutils.command.autodist", - "distutils.command.bdist_rpm", - "distutils.command.build", - "distutils.command.build_clib", - "distutils.command.build_ext", - "distutils.command.build_py", - "distutils.command.build_scripts", - "distutils.command.build_src", - "distutils.command.config", - "distutils.command.config_compiler", - "distutils.command.develop", - "distutils.command.egg_info", - "distutils.command.install", - "distutils.command.install_clib", - "distutils.command.install_data", - "distutils.command.install_headers", - "distutils.command.sdist", - "distutils.conv_template", - "distutils.core", - "distutils.extension", - "distutils.fcompiler", - "distutils.fcompiler.absoft", - "distutils.fcompiler.arm", - "distutils.fcompiler.compaq", - "distutils.fcompiler.environment", - "distutils.fcompiler.g95", - "distutils.fcompiler.gnu", - "distutils.fcompiler.hpux", - "distutils.fcompiler.ibm", - "distutils.fcompiler.intel", - "distutils.fcompiler.lahey", - "distutils.fcompiler.mips", - "distutils.fcompiler.nag", - "distutils.fcompiler.none", - "distutils.fcompiler.pathf95", - "distutils.fcompiler.pg", - "distutils.fcompiler.nv", - "distutils.fcompiler.sun", - "distutils.fcompiler.vast", - "distutils.fcompiler.fujitsu", - "distutils.from_template", - "distutils.intelccompiler", - "distutils.lib2def", - "distutils.line_endings", - "distutils.mingw32ccompiler", - "distutils.msvccompiler", - "distutils.npy_pkg_config", - "distutils.numpy_distribution", - "distutils.pathccompiler", - "distutils.unixccompiler", - ] - ] - def is_unexpected(name): """Check if this needs to be considered.""" @@ -274,10 +201,7 @@ def is_unexpected(name): ) -if sys.version_info >= (3, 12): - SKIP_LIST = [] -else: - SKIP_LIST = ["numpy.distutils.msvc9compiler"] +SKIP_LIST = [] def test_all_modules_are_expected(): @@ -315,12 +239,6 @@ def test_all_modules_are_expected(): 'numpy.matlib.ctypeslib', 'numpy.matlib.ma', ] -if sys.version_info < (3, 12): - SKIP_LIST_2 += [ - 'numpy.distutils.log.sys', - 'numpy.distutils.log.logging', - 'numpy.distutils.log.warnings', - ] def test_all_modules_are_expected_2(): diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 0e3157a1e54d..1d91f908f021 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -23,8 +23,6 @@ ROOT / "random" / "__init__.pyi", ROOT / "testing" / "__init__.pyi", ] -if sys.version_info < (3, 12): - FILES += [ROOT / "distutils" / "__init__.pyi"] @pytest.mark.thread_unsafe( diff --git a/pytest.ini b/pytest.ini index b8a1da2b4ec6..532095ab9aa7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -22,11 +22,8 @@ filterwarnings = ignore:The numpy.array_api submodule is still experimental. See NEP 47. # ignore matplotlib headless warning for pyplot ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning -# Ignore DeprecationWarnings from distutils - ignore::DeprecationWarning:.*distutils - ignore:\n\n `numpy.distutils`:DeprecationWarning # Ignore DeprecationWarning from typing.mypy_plugin ignore:`numpy.typing.mypy_plugin` is deprecated:DeprecationWarning # Ignore DeprecationWarning from struct module # see https://github.com/numpy/numpy/issues/28926 - ignore:Due to \'_pack_\', the \ No newline at end of file + ignore:Due to \'_pack_\', the diff --git a/ruff.toml b/ruff.toml index b25a34d45984..4f87ca65c1a1 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,6 +1,5 @@ extend-exclude = [ "numpy/__config__.py", - "numpy/distutils", "numpy/typing/_char_codes.py", "spin/cmds.py", # Submodules. diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 61bc49197d79..fd66b68a43fc 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -84,11 +84,6 @@ def get_files(dir_to_check, kind='test'): relpath = os.path.relpath(path, dir_to_check) files[relpath] = path - if sys.version_info >= (3, 12): - files = { - k: v for k, v in files.items() if not k.startswith('distutils') - } - # ignore python files in vendored pythoncapi-compat submodule files = { k: v for k, v in files.items() if 'pythoncapi-compat' not in k diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt index 4413f164f582..e6b2e364230e 100644 --- a/tools/stubtest/allowlist_py311.txt +++ b/tools/stubtest/allowlist_py311.txt @@ -1,3 +1,2 @@ # python == 3.11.* -numpy\.distutils\..* diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt index 867b2f1870a3..a0ee0edf2be2 100644 --- a/tools/stubtest/allowlist_py312.txt +++ b/tools/stubtest/allowlist_py312.txt @@ -4,5 +4,3 @@ numpy\.typing\.ArrayLike numpy\.typing\.DTypeLike -# only exists before Python 3.12 -numpy\.f2py\._backends\._distutils diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index ded91d7d56f3..0a3bb07b1b94 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -5,7 +5,6 @@ exclude = (?x)( .+\.py$ | _build_utils/ | _core/code_generators/ - | distutils/ ) ) namespace_packages = False From 9ae90b664a2e95dbf6bc0f2d3b2cde256783379e Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 14:18:49 +0200 Subject: [PATCH 0948/1718] DOC, MAINT: fixes, add release note --- .../upcoming_changes/30340.expired.rst | 1 + doc/source/f2py/buildtools/distutils.rst | 2 +- .../reference/distutils_status_migration.rst | 4 +- doc/source/release/1.16.0-notes.rst | 2 +- doc/source/release/1.18.0-notes.rst | 2 +- doc/source/release/1.20.0-notes.rst | 2 +- numpy/_build_utils/conv_template.py | 44 +++++++++++-------- numpy/_pytesttester.py | 1 - numpy/f2py/__init__.py | 6 --- numpy/f2py/f2py2e.py | 2 +- numpy/typing/tests/test_isfile.py | 1 - 11 files changed, 33 insertions(+), 34 deletions(-) create mode 100644 doc/release/upcoming_changes/30340.expired.rst diff --git a/doc/release/upcoming_changes/30340.expired.rst b/doc/release/upcoming_changes/30340.expired.rst new file mode 100644 index 000000000000..79dd57dde737 --- /dev/null +++ b/doc/release/upcoming_changes/30340.expired.rst @@ -0,0 +1 @@ +* ``numpy.distutils`` has been removed diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst index 98416c8cdef9..bdc94ea8ae53 100644 --- a/doc/source/f2py/buildtools/distutils.rst +++ b/doc/source/f2py/buildtools/distutils.rst @@ -1,7 +1,7 @@ .. _f2py-distutils: ============================= -Using via `numpy.distutils` +Using via ``numpy.distutils`` ============================= .. legacy:: diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index 2fea390a7a8b..eff13439cf52 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -3,7 +3,7 @@ Status of ``numpy.distutils`` and migration advice ================================================== -`numpy.distutils` has been removed in NumPy ``1.25.0``. +``numpy.distutils`` was removed in NumPy ``2.5.0``. Migration advice ---------------- @@ -18,7 +18,7 @@ using a well-designed, modern and reliable build system, we recommend: If you have modest needs (only simple Cython/C extensions; no need for Fortran, BLAS/LAPACK, nested ``setup.py`` files, or other features of -``numpy.distutils``) and have been happy with ``numpy.distutils`` so far, you +``numpy.distutils``) and have been happy with ``numpy.distutils``, you can also consider switching to ``setuptools``. Note that most functionality of ``numpy.distutils`` is unlikely to be ported to ``setuptools``. diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst index 07e06ca6e043..7a387629fe46 100644 --- a/doc/source/release/1.16.0-notes.rst +++ b/doc/source/release/1.16.0-notes.rst @@ -271,7 +271,7 @@ via the services of shippable.com. Appending to build flags ------------------------ -`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and +``numpy.distutils`` has always overridden rather than appended to `LDFLAGS` and other similar such environment variables for compiling Fortran extensions. Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`, diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index a90dbb7a67d9..43d2cdedf4b6 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -350,7 +350,7 @@ and load will be addressed in a future release. ``numpy.distutils`` append behavior changed for LDFLAGS and similar ------------------------------------------------------------------- -`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and +``numpy.distutils`` has always overridden rather than appended to ``LDFLAGS`` and other similar such environment variables for compiling Fortran extensions. Now the default behavior has changed to appending - which is the expected behavior in most situations. To preserve the old (overwriting) behavior, set the diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst index a2276ac5016d..298d417bb0c2 100644 --- a/doc/source/release/1.20.0-notes.rst +++ b/doc/source/release/1.20.0-notes.rst @@ -735,7 +735,7 @@ checking. Negation of user defined BLAS/LAPACK detection order ---------------------------------------------------- -`~numpy.distutils` allows negation of libraries when determining BLAS/LAPACK +``~numpy.distutils`` allows negation of libraries when determining BLAS/LAPACK libraries. This may be used to remove an item from the library resolution phase, i.e. to disallow NetLIB libraries one could do: diff --git a/numpy/_build_utils/conv_template.py b/numpy/_build_utils/conv_template.py index c8933d1d4286..fb57abdf1587 100644 --- a/numpy/_build_utils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -82,8 +82,8 @@ __all__ = ['process_str', 'process_file'] import os -import sys import re +import sys # names for replacement that are already global. global_names = {} @@ -106,10 +106,10 @@ def parse_structure(astr, level): at zero. Returns an empty list if no loops found. """ - if level == 0 : + if level == 0: loopbeg = "/**begin repeat" loopend = "/**end repeat**/" - else : + else: loopbeg = "/**begin repeat%d" % level loopend = "/**end repeat%d**/" % level @@ -124,9 +124,9 @@ def parse_structure(astr, level): start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) + line += astr.count("\n", ind, start2 + 1) + spanlist.append((start, start2 + 1, fini1, fini2 + 1, line)) + line += astr.count("\n", start2 + 1, fini2) ind = fini2 spanlist.sort() return spanlist @@ -135,10 +135,13 @@ def parse_structure(astr, level): def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) - return ','.join([torep]*int(numrep)) + return ','.join([torep] * int(numrep)) + parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") + + def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate @@ -155,7 +158,7 @@ def parse_values(astr): named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : +def parse_loop_header(loophead): """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, @@ -179,14 +182,13 @@ def parse_loop_header(loophead) : name = rep[0] vals = parse_values(rep[1]) size = len(vals) - if nsub is None : + if nsub is None: nsub = size - elif nsub != size : + elif nsub != size: msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) - # Find any exclude variables excludes = [] @@ -200,30 +202,33 @@ def parse_loop_header(loophead) : # generate list of dictionaries, one for each template iteration dlist = [] - if nsub is None : + if nsub is None: raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist + replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : + + +def parse_string(astr, env, level, line): lineno = "#line %d\n" % line # local function for string replacement, uses env def replace(match): name = match.group(1) - try : + try: val = env[name] except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) + msg = f'line {line}: no definition of key "{name}"' raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) - if struct : + if struct: # recurse over inner loops oldend = 0 newlevel = level + 1 @@ -234,18 +239,18 @@ def replace(match): oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) - try : + try: envlist = parse_loop_header(head) except ValueError as e: msg = "line %d: %s" % (newline, e) raise ValueError(msg) - for newenv in envlist : + for newenv in envlist: newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) - else : + else: # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') @@ -325,5 +330,6 @@ def main(): outfile.write(writestr) + if __name__ == "__main__": main() diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index ecc6a1cda738..cbb4191047ba 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -123,7 +123,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import warnings import pytest diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index e34dd99aec1c..f545c9c5fd84 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -26,12 +26,6 @@ def get_include(): """ Return the directory that contains the ``fortranobject.c`` and ``.h`` files. - .. note:: - - This function is not needed when building an extension with - `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files - in one go. - Python extension modules built with f2py-generated code need to use ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` header. This function can be used to obtain the directory containing diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index a1cce0fa9a74..23682957c19a 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -655,7 +655,7 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + reg_distutils_flags)] del_list = [] for s in flib_flags: diff --git a/numpy/typing/tests/test_isfile.py b/numpy/typing/tests/test_isfile.py index 1d91f908f021..250686a98ee8 100644 --- a/numpy/typing/tests/test_isfile.py +++ b/numpy/typing/tests/test_isfile.py @@ -1,5 +1,4 @@ import os -import sys from pathlib import Path import pytest From e4eced3246450cbff86ac745f1a456563fe6373f Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 14:20:32 +0200 Subject: [PATCH 0949/1718] TYPE: remove stubs for f2py._backend._distutils DOC, MAINT: fix doc and f2py --- .../f2py/buildtools/distutils-to-meson.rst | 109 ++++-------------- doc/source/f2py/buildtools/distutils.rst | 11 -- doc/source/f2py/buildtools/index.rst | 3 +- numpy/f2py/_backends/__init__.pyi | 2 +- numpy/f2py/_backends/_distutils.pyi | 13 --- numpy/f2py/f2py2e.py | 3 +- numpy/f2py/f2py2e.pyi | 1 - 7 files changed, 25 insertions(+), 117 deletions(-) delete mode 100644 doc/source/f2py/buildtools/distutils.rst delete mode 100644 numpy/f2py/_backends/_distutils.pyi diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 585bfba57246..5248ab3355d6 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -57,46 +57,21 @@ This is unchanged: 1.2.2 Specify the backend ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: +.. code-block:: bash - .. tab-item:: Distutils - :sync: distutils + python -m numpy.f2py -c fib.f90 -m fib --backend meson - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils - - This is the default for Python versions before 3.12. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson - - This is the only option for Python versions after 3.12. +This is the only option. There used to be a ``distutils`` backend but it was +removed in NumPy2.5. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --fcompiler=gfortran + FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib --backend meson - - Native files can also be used. +Native files can also be used. Similarly, ``CC`` can be used in both cases to set the ``C`` compiler. Since the environment variables are generally pretty common across both, so a small @@ -137,73 +112,31 @@ sample is included below. 1.2.4 Dependencies ^^^^^^^^^^^^^^^^^^ -Here, ``meson`` can actually be used to set dependencies more robustly. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -llapack +.. code-block:: bash - Note that this approach in practice is error prone. + python -m numpy.f2py -c fib.f90 -m fib --dep lapack - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson --dep lapack - - This maps to ``dependency("lapack")`` and so can be used for a wide variety - of dependencies. They can be `customized further `_ - to use CMake or other systems to resolve dependencies. +This maps to ``dependency("lapack")`` and so can be used for a wide variety +of dependencies. They can be `customized further `_ +to use CMake or other systems to resolve dependencies. 1.2.5 Libraries ^^^^^^^^^^^^^^^ -Both ``meson`` and ``distutils`` are capable of linking against libraries. - -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils -lmylib -L/path/to/mylib +``meson`` is capable of linking against libraries. - .. tab-item:: Meson - :sync: meson +.. code-block:: bash - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend meson -lmylib -L/path/to/mylib + python -m numpy.f2py -c fib.f90 -m fib -lmylib -L/path/to/mylib 1.3 Customizing builds ~~~~~~~~~~~~~~~~~~~~~~ -.. tab-set:: - - .. tab-item:: Distutils - :sync: distutils - - .. code-block:: bash - - python -m numpy.f2py -c fib.f90 -m fib --backend distutils --build-dir blah - - This can be technically integrated with other codes, see :ref:`f2py-distutils`. - - .. tab-item:: Meson - :sync: meson - - .. code-block:: bash +.. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson --build-dir blah + python -m numpy.f2py -c fib.f90 -m fib --build-dir blah - The resulting build can be customized via the - `Meson Build How-To Guide `_. - In fact, the resulting set of files can even be committed directly and used - as a meson subproject in a separate codebase. +The resulting build can be customized via the +`Meson Build How-To Guide `_. +In fact, the resulting set of files can even be committed directly and used +as a meson subproject in a separate codebase. diff --git a/doc/source/f2py/buildtools/distutils.rst b/doc/source/f2py/buildtools/distutils.rst deleted file mode 100644 index bdc94ea8ae53..000000000000 --- a/doc/source/f2py/buildtools/distutils.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _f2py-distutils: - -============================= -Using via ``numpy.distutils`` -============================= - -.. legacy:: - - ``distutils`` has been removed in favor of ``meson`` see - :ref:`distutils-status-migration`. - diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index d5f3876e6dd5..946f82eb57dd 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``Python 3.12.0`` in **October 2025**. Like the rest of + which was removed in ``NumPy2.5`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. @@ -107,7 +107,6 @@ Build systems .. toctree:: :maxdepth: 2 - distutils meson cmake skbuild diff --git a/numpy/f2py/_backends/__init__.pyi b/numpy/f2py/_backends/__init__.pyi index 43625c68061f..11e3743be541 100644 --- a/numpy/f2py/_backends/__init__.pyi +++ b/numpy/f2py/_backends/__init__.pyi @@ -2,4 +2,4 @@ from typing import Literal as L from ._backend import Backend -def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ... +def f2py_build_generator(name: L["meson"]) -> Backend: ... diff --git a/numpy/f2py/_backends/_distutils.pyi b/numpy/f2py/_backends/_distutils.pyi deleted file mode 100644 index 56bbf7e5b49a..000000000000 --- a/numpy/f2py/_backends/_distutils.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from typing_extensions import deprecated, override - -from ._backend import Backend - -class DistutilsBackend(Backend): - @deprecated( - "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and " - "use a custom build script" - ) - # NOTE: the `sef` typo matches runtime - def __init__(sef, *args: object, **kwargs: object) -> None: ... - @override - def compile(self) -> None: ... diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 23682957c19a..2f73b1924f6d 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -655,7 +655,8 @@ def run_compile(): reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + reg_distutils_flags)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 46794e552b41..686898041b9d 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -28,7 +28,6 @@ class _PreparseResult(TypedDict): ### -MESON_ONLY_VER: Final[bool] f2py_version: Final = version numpy_version: Final = version __usage__: Final[str] From 47b8b3ca327ab1c797052ee1c27ee555a1870082 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 1 Dec 2025 19:26:24 +0200 Subject: [PATCH 0950/1718] DOC: remove comparison of meson and distutils, remove warning exclusion --- doc/source/conf.py | 16 ---------------- .../f2py/buildtools/distutils-to-meson.rst | 5 +---- doc/source/f2py/buildtools/meson.rst | 5 ----- 3 files changed, 1 insertion(+), 25 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e26719b05cb0..302f1fd3731c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -145,24 +145,8 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -# List of directories, relative to source directories, that shouldn't be searched -# for source files. -exclude_dirs = [] - -exclude_patterns = [] -suppress_warnings = [] nitpick_ignore = [] -if sys.version_info[:2] >= (3, 12): - suppress_warnings += [ - 'toc.excluded', # Suppress warnings about excluded toctree entries - ] - nitpicky = True - nitpick_ignore += [ - # The first ignore is not captured without nitpicky = True. - ('py:class', 'Extension'), - ] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 5248ab3355d6..8dd504dd9c8f 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -5,8 +5,7 @@ ------------------------ As per the timeline laid out in :ref:`distutils-status-migration`, -``distutils`` has ceased to be the default build backend for ``f2py``. This page -collects common workflows in both formats. +``distutils`` has been removed. This page collects common workflows. .. note:: @@ -44,8 +43,6 @@ This will not win any awards, but can be a reasonable starting point. 1.2.1 Basic Usage ^^^^^^^^^^^^^^^^^ -This is unchanged: - .. code:: bash python -m numpy.f2py -c fib.f90 -m fib diff --git a/doc/source/f2py/buildtools/meson.rst b/doc/source/f2py/buildtools/meson.rst index c17c5d2ddc87..44560bef8c5f 100644 --- a/doc/source/f2py/buildtools/meson.rst +++ b/doc/source/f2py/buildtools/meson.rst @@ -15,11 +15,6 @@ Using via ``meson`` The default build system for ``f2py`` is now ``meson``, see :ref:`distutils-status-migration` for some more details.. -The key advantage gained by leveraging ``meson`` over the techniques described -in :ref:`f2py-distutils` is that this feeds into existing systems and larger -projects with ease. ``meson`` has a rather pythonic syntax which makes it more -comfortable and amenable to extension for ``python`` users. - Fibonacci walkthrough (F77) =========================== From 0248909815caa83878128a7305b78dccb4f4a84e Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 2 Dec 2025 12:49:22 +0200 Subject: [PATCH 0951/1718] BUG: raise BufferError when creating dlpack with wrong device tuple (#30351) --- numpy/_core/src/multiarray/dlpack.c | 3 ++- numpy/_core/tests/test_dlpack.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 62cd137daa7c..29e5aecec5d5 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -392,7 +392,8 @@ device_converter(PyObject *obj, DLDevice *result_device) return NPY_SUCCEED; } - PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + /* Must be a BufferError */ + PyErr_SetString(PyExc_BufferError, "unsupported device requested"); return NPY_FAIL; } diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 89c24032b6c1..e8198ac1823e 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -184,7 +184,7 @@ def test_device(self): np.from_dlpack(x, device="cpu") np.from_dlpack(x, device=None) - with pytest.raises(ValueError): + with pytest.raises(BufferError): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") From 69e28adc9a2b379fc7c5d025f797e0803b63bdff Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 2 Dec 2025 13:42:39 +0200 Subject: [PATCH 0952/1718] fixes from review --- doc/neps/scope.rst | 2 +- doc/source/dev/depending_on_numpy.rst | 3 +- .../f2py/buildtools/distutils-to-meson.rst | 6 +- doc/source/f2py/buildtools/index.rst | 2 +- .../reference/distutils_status_migration.rst | 11 --- numpy/__init__.py | 7 -- numpy/f2py/diagnose.py | 83 ++----------------- numpy/f2py/f2py2e.py | 9 -- numpy/f2py/tests/test_f2py2e.py | 31 +++++++ 9 files changed, 42 insertions(+), 112 deletions(-) diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst index 1d5722700a79..ffa3d8655ad8 100644 --- a/doc/neps/scope.rst +++ b/doc/neps/scope.rst @@ -36,7 +36,7 @@ Here, we describe aspects of N-d array computation that are within scope for Num - NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**: - - numpy.distutils (deprecated and removed, build support for C++, Fortran, + - numpy.distutils (removed in NumPy 2.5.0, was providing build support for C++, Fortran, BLAS/LAPACK, and other relevant libraries for scientific computing) - f2py (generating bindings for Fortran code) - testing utilities (mostly deprecated, pytest does a good job) diff --git a/doc/source/dev/depending_on_numpy.rst b/doc/source/dev/depending_on_numpy.rst index fcdae806fad3..98dc552a779e 100644 --- a/doc/source/dev/depending_on_numpy.rst +++ b/doc/source/dev/depending_on_numpy.rst @@ -141,8 +141,7 @@ for dropping support for old Python and NumPy versions: :ref:`NEP29`. We recommend all packages depending on NumPy to follow the recommendations in NEP 29. -For *run-time dependencies*, specify version bounds using -``install_requires`` in ``setup.py`` (assuming you use ``setuptools`` to build). +For *run-time dependencies*, specify version bounds in `pyproject.toml`. Most libraries that rely on NumPy will not need to set an upper version bound: NumPy is careful to preserve backward-compatibility. diff --git a/doc/source/f2py/buildtools/distutils-to-meson.rst b/doc/source/f2py/buildtools/distutils-to-meson.rst index 8dd504dd9c8f..920848b9d0d3 100644 --- a/doc/source/f2py/buildtools/distutils-to-meson.rst +++ b/doc/source/f2py/buildtools/distutils-to-meson.rst @@ -56,17 +56,17 @@ This will not win any awards, but can be a reasonable starting point. .. code-block:: bash - python -m numpy.f2py -c fib.f90 -m fib --backend meson + python -m numpy.f2py -c fib.f90 -m fib This is the only option. There used to be a ``distutils`` backend but it was -removed in NumPy2.5. +removed in NumPy2.5.0. 1.2.3 Pass a compiler name ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash - FC="gfortran" python -m numpy.f2py -c fib.f90 -m fib + FC=gfortran python -m numpy.f2py -c fib.f90 -m fib Native files can also be used. diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 946f82eb57dd..671fd5b6d2cf 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -11,7 +11,7 @@ with ``f2py``. The default build system for ``f2py`` has traditionally been through the enhanced ``numpy.distutils`` module. This module is based on ``distutils`` - which was removed in ``NumPy2.5`` in **June 2026**. Like the rest of + which was removed in ``NumPy2.5.0`` in **June 2026**. Like the rest of NumPy and SciPy, ``f2py`` uses ``meson`` now, see :ref:`distutils-status-migration` for some more details. diff --git a/doc/source/reference/distutils_status_migration.rst b/doc/source/reference/distutils_status_migration.rst index eff13439cf52..e4ca4fedcf81 100644 --- a/doc/source/reference/distutils_status_migration.rst +++ b/doc/source/reference/distutils_status_migration.rst @@ -88,17 +88,6 @@ E.g.,:: For more details, see the `setuptools documentation `__ - -.. _numpy-setuptools-interaction: - -Versioning ``setuptools`` ------------------------------------------------------- - -It is recommended to put an upper bound on your ``setuptools`` -build requirement in ``pyproject.toml`` to avoid future breakage - see -:ref:`for-downstream-package-authors`. - - .. _CMake: https://cmake.org/ .. _Meson: https://mesonbuild.com/ .. _meson-python: https://meson-python.readthedocs.io diff --git a/numpy/__init__.py b/numpy/__init__.py index 42a2e8896d68..5012decc43ab 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -744,19 +744,12 @@ def __getattr__(attr): elif attr == "char": import numpy.char as char return char - elif attr == "array_api": - raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core elif attr == "strings": import numpy.strings as strings return strings - elif attr == "distutils": - raise AttributeError("`numpy.distutils` is not available from " - "numpy 2.5 onwards", name=None) - if attr in __future_scalars__: # And future warnings for those that will change, but also give # the AttributeError diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 865cf4a3e6cd..3e2c53b0ec1d 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -23,10 +23,10 @@ def run(): try: import numpy - has_newnumpy = 1 + has_numpy = 1 except ImportError as e: - print('Failed to import new numpy:', e) - has_newnumpy = 0 + print('Failed to import numpy:', e) + has_numpy = 0 try: from numpy.f2py import f2py2e @@ -35,16 +35,9 @@ def run(): print('Failed to import f2py2e:', e) has_f2py2e = 0 - try: - import numpy_distutils - has_numpy_distutils = 1 - except ImportError as e: - print('Failed to import numpy_distutils:', e) - has_numpy_distutils = 0 - - if has_newnumpy: + if has_numpy: try: - print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + print(f'Found numpy version {numpy.__version__!r} in {numpy.__file__}') except Exception as msg: print('error:', msg) print('------') @@ -57,72 +50,6 @@ def run(): print('error:', msg) print('------') - if has_numpy_distutils: - try: - print('Found numpy_distutils version %r in %r' % ( - numpy_distutils.numpy_distutils_version.numpy_distutils_version, - numpy_distutils.__file__)) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - if has_numpy_distutils == 1: - print( - 'Importing numpy_distutils.command.build_flib ...', end=' ') - import numpy_distutils.command.build_flib as build_flib - print('ok') - print('------') - try: - print( - 'Checking availability of supported Fortran compilers:') - for compiler_class in build_flib.all_compilers: - compiler_class(verbose=1).is_available() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print( - 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') - print('------') - try: - print('Importing numpy_distutils.fcompiler ...', end=' ') - import numpy_distutils.fcompiler as fcompiler - print('ok') - print('------') - try: - print('Checking availability of supported Fortran compilers:') - fcompiler.show_fcompilers() - print('------') - except Exception as msg: - print('error:', msg) - print('------') - except Exception as msg: - print('error:', msg) - print('------') - try: - try: - print( - 'Importing numpy_distutils.command.cpuinfo ...', end=' ') - from numpy_distutils.command.cpuinfo import cpuinfo - print('ok') - print('------') - except Exception as msg: - print('error:', msg, '(ignore it)') - print('Importing numpy_distutils.cpuinfo ...', end=' ') - from numpy_distutils.cpuinfo import cpuinfo - print('ok') - print('------') - cpu = cpuinfo() - print('CPU information:', end=' ') - for name in dir(cpuinfo): - if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): - print(name[1:], end=' ') - print('------') - except Exception as msg: - print('error:', msg) - print('------') os.chdir(_path) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 2f73b1924f6d..eb5a39e088ff 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -116,10 +116,6 @@ --include-paths ::... Search include files from the given directories. - --help-link [..] List system resources found by system_info.py. See also - --link- switch below. [..] is optional list - of resources names. E.g. try 'f2py --help-link lapack_opt'. - --f2cmap Load Fortran-to-Python KIND specification from the given file. Default: .f2py_f2cmap in current directory. @@ -753,11 +749,6 @@ def validate_modulename(pyf_files, modulename='untitled'): return modulename def main(): - if '--help-link' in sys.argv[1:]: - sys.argv.remove('--help-link') - outmess("Use --dep for meson builds\n") - return - if '-c' in sys.argv[1:]: run_compile() else: diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 65d33dded209..bd7064fd348a 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -232,6 +232,37 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "untitledmodule.c" in out +def test_no_distutils_backend(capfd, hello_world_f90, monkeypatch): + """Check that distutils backend and related options fail + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + + monkeypatch.setattr( + sys, "argv", ["f2py", "--help-link"] + ) + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + assert "Unknown option --help-link" in out + + monkeypatch.setattr( + sys, "argv", ["f2py", "--backend", "distutils"] + ) + with pytest.raises(SystemExit): + compiler_check_f2pycli() + f2pycli() + out, _ = capfd.readouterr() + assert "'distutils' backend was removed" in out @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): From 54ab2c25b244f87803b7dbe065056e2b04597bfa Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 07:56:43 +0100 Subject: [PATCH 0953/1718] MAINT: require python >= 3.12 --- .../check-warnings/msvc-allowed-warnings.txt | 9 ++-- .github/workflows/linux.yml | 41 +++++-------------- .github/workflows/linux_blas.yml | 12 +++--- .github/workflows/linux_simd.yml | 14 +++---- .github/workflows/macos.yml | 2 +- .github/workflows/mypy.yml | 4 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 4 +- INSTALL.rst | 4 +- doc/source/building/understanding_meson.rst | 4 +- numpy/_core/tests/examples/cython/setup.py | 2 +- numpy/_core/tests/test_arrayprint.py | 2 - pyproject.toml | 2 +- ruff.toml | 3 +- tools/ci/cirrus_arm.yml | 12 +++--- tools/swig/test/testFarray.py | 1 + 17 files changed, 50 insertions(+), 70 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 7d2c149629ec..3f728ae29cd8 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -17,10 +17,10 @@ ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp311-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' @@ -28,4 +28,3 @@ D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hp D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: - diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a23f1f7649d8..157f2c2054e6 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -44,7 +44,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -56,7 +56,7 @@ jobs: - name: Check Python.h is first file included run: | python tools/check_python_h_first.py - + smoke_test: # To enable this job on a fork, comment out: @@ -66,7 +66,7 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.11", "3.12", "3.13", "3.14", "3.14t"] + version: ["3.12", "3.13", "3.14", "3.14t"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: @@ -78,25 +78,6 @@ jobs: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions - pypy: - needs: [smoke_test] - runs-on: ubuntu-latest - if: github.event_name != 'push' - steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - with: - python-version: 'pypy3.11-v7.3.20' - - name: Setup using scipy-openblas - run: | - python -m pip install -r requirements/ci_requirements.txt - spin config-openblas --with-scipy-openblas=32 - - uses: ./.github/meson_actions - debug: needs: [smoke_test] runs-on: ubuntu-24.04 @@ -139,7 +120,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -224,7 +205,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and benchmarking dependencies run: | sudo apt-get update @@ -263,7 +244,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) run: | set -xe @@ -312,7 +293,7 @@ jobs: - name: Set up Python uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install build and test dependencies from PyPI run: | python -m pip install -r requirements/build_requirements.txt @@ -341,7 +322,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: repository: numpy/numpy-release @@ -376,8 +357,8 @@ jobs: python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests - Linux_Python_311_32bit_full: - name: i686, cp311, full + Linux_Python_312_32bit_full: + name: i686, cp312, full needs: [smoke_test] runs-on: ubuntu-latest container: @@ -403,7 +384,7 @@ jobs: - name: build run: | - python3.11 -m venv venv + python3.12 -m venv venv source venv/bin/activate pip install --upgrade pip pip install -r requirements/ci32_requirements.txt diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 77cb9aaf91fe..6d0ab1a93777 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -76,7 +76,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -199,7 +199,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -227,7 +227,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -290,7 +290,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -354,7 +354,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | @@ -391,7 +391,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index dcc483eaf6df..a94eca64b59f 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -65,7 +65,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - uses: ./.github/meson_actions name: Build/Test @@ -83,7 +83,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install GCC9/10 run: | @@ -129,7 +129,7 @@ jobs: fetch-tags: true - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: | python -m pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt @@ -157,12 +157,12 @@ jobs: - [ "native", "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none", - "3.11" + "3.12" ] - [ "without avx512", "-Dallow-noblas=true -Dcpu-dispatch=max-x86_v4", - "3.11" + "3.12" ] env: @@ -192,7 +192,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | @@ -242,7 +242,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install Intel SDE run: | diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 2787a7c81d3a..c13df7226a1c 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t"] + version: ["3.12", "3.14t"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index e3c6e7beba13..d8a1e3dbee97 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -53,8 +53,8 @@ jobs: matrix: os_python: - [macos-latest, '3.14'] - - [ubuntu-latest, '3.12'] - - [windows-latest, '3.11'] + - [ubuntu-latest, '3.13'] + - [windows-latest, '3.12'] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 731d9091ec69..5d2ca7385ed5 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -34,7 +34,7 @@ jobs: matrix: # TODO: consider including macos and windows os: [ubuntu] - py: ["3.11", "3.14"] + py: ["3.12", "3.14"] steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 24061ae7b014..86d4c35ca8a4 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -42,7 +42,7 @@ jobs: - [macos-14, macosx_arm64, openblas] - [windows-2022, win_amd64, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311"] + python: ["cp312"] env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a0604fa87aa9..3b6f7dd6ff73 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -92,7 +92,7 @@ jobs: - name: Setup Python uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: - python-version: '3.11' + python-version: '3.12' architecture: ${{ matrix.architecture }} - name: Setup MSVC @@ -133,7 +133,7 @@ jobs: pyver: '3.14' - BLAS: 32 TEST_MODE: fast - pyver: '3.11' + pyver: '3.12' # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' diff --git a/INSTALL.rst b/INSTALL.rst index 6e9d2cd242f5..72caf98380b7 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,7 +14,7 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.11.x or newer. +1) Python__ 3.12.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and @@ -82,7 +82,7 @@ Choosing compilers NumPy needs C and C++ compilers, and for development versions also needs Cython. A Fortran compiler isn't needed to build NumPy itself; the ``numpy.f2py`` tests will be skipped when running the test suite if no Fortran -compiler is available. +compiler is available. For more options including selecting compilers, setting custom compiler flags and controlling parallelism, see diff --git a/doc/source/building/understanding_meson.rst b/doc/source/building/understanding_meson.rst index b990ff283271..0c29302c9abb 100644 --- a/doc/source/building/understanding_meson.rst +++ b/doc/source/building/understanding_meson.rst @@ -87,11 +87,11 @@ that's just an arbitrary name we picked here):: meson install -C build -It will then install to ``build-install/lib/python3.11/site-packages/numpy``, +It will then install to ``build-install/lib/python3.12/site-packages/numpy``, which is not on your Python path, so to add it do (*again, this is for learning purposes, using ``PYTHONPATH`` explicitly is typically not the best idea*):: - export PYTHONPATH=$PWD/build-install/lib/python3.11/site-packages/ + export PYTHONPATH=$PWD/build-install/lib/python3.12/site-packages/ Now we should be able to import ``numpy`` and run the tests. Remembering that we need to move out of the root of the repo to ensure we pick up the package diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index eb57477fc2a1..ba0639ebcf1c 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -4,10 +4,10 @@ """ import os -from distutils.core import setup import Cython from Cython.Build import cythonize +from distutils.core import setup from setuptools.extension import Extension import numpy as np diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 06d1306dd408..3777981724dc 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -1267,8 +1267,6 @@ def test_scalar_void_float_str(): assert str(scalar) == "(1.0, 2.0)" @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") -@pytest.mark.skipif(sys.version_info < (3, 11), - reason="asyncio.barrier was added in Python 3.11") def test_printoptions_asyncio_safe(): asyncio = pytest.importorskip("asyncio") diff --git a/pyproject.toml b/pyproject.toml index e7830c5248a1..a459ea2cc2ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.11" +requires-python = ">=3.12" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', diff --git a/ruff.toml b/ruff.toml index 4f87ca65c1a1..8e0081e34622 100644 --- a/ruff.toml +++ b/ruff.toml @@ -36,7 +36,8 @@ extend-select = [ "W", # pycodestyle/warning "PGH", # pygrep-hooks "PLE", # pylint/error - "UP", # pyupgrade + # TODO: re-enable after merging https://github.com/numpy/numpy/pull/30319 + # "UP", # pyupgrade ] ignore = [ # flake8-bugbear diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 81a342f20e4e..026c467f2e09 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -9,7 +9,7 @@ modified_clone: &MODIFIED_CLONE # it's a PR so clone the main branch then merge the changes from the PR git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - + # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time # However, if you do a PR against a maintenance branch we will want to # merge the PR into the maintenance branch, not main @@ -45,22 +45,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python3.11 -m venv .venv + python3.12 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python3.11 -m pip install -U pip - python3.11 -m pip install meson-python Cython pytest hypothesis + python3.12 -m pip install -U pip + python3.12 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python3.11 -m pytest --pyargs numpy -m "not slow" + python3.12 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index 75bf99c054cb..3798029dbe4b 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -2,6 +2,7 @@ import os import sys import unittest + from distutils.util import get_platform import numpy as np From 2582fb09ba0ec0f92efb0b138a48bb9a8643bba8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 08:24:59 +0100 Subject: [PATCH 0954/1718] MAINT: update allowed MSVC warnings --- .../check-warnings/msvc-allowed-warnings.txt | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 3f728ae29cd8..fa5597b18216 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -1,30 +1,21 @@ -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/_core/src/common/npy_cpu_features.c(451): warning C4098: 'npy__cpu_cpuid': 'void' function returning a value ../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c.c(194): warning C4244: 'return': conversion from 'double' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(230): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_c_lapack.c(250): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -../numpy/linalg/lapack_lite/f2c_c_lapack.c(1530): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data +../numpy/linalg/lapack_lite/f2c_config.c(1368): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1625): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(1645): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2865): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2882): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data ../numpy/linalg/lapack_lite/f2c_s_lapack.c(2894): warning C4244: '=': conversion from 'real' to 'integer', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26966): warning C4244: 'function': conversion from 'int64_t' to 'double' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(39070): warning C4244: 'function': conversion from 'int64_t' to 'double' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX512' +../numpy/random/src/mt19937/mt19937.c(88): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(92): warning C4146: unary minus operator applied to unsigned type, result still unsigned +../numpy/random/src/mt19937/mt19937.c(95): warning C4146: unary minus operator applied to unsigned type, result still unsigned +..\numpy\random\src/pcg64/pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned +D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(52): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(53): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(65): warning C4309: 'argument': truncation of constant value -D:\a\numpy\numpy\numpy\_core\src\npysort\x86-simd-sort/src/avx512-16bit-qsort.hpp(139): warning C4556: +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data From f7a42fb820da4b5c51a3c4d5394cea305a5cfa68 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 1 Dec 2025 08:30:35 +0100 Subject: [PATCH 0955/1718] CI: install the correct python version on freebsd --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 026c467f2e09..119575b2a7be 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -32,7 +32,7 @@ freebsd_test_task: install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf - pkg install -y python311 + pkg install -y python312 <<: *MODIFIED_CLONE From e88edc879bc020e2d99e4a2cce7dadc0a3047a16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:39:03 +0000 Subject: [PATCH 0956/1718] MAINT: Bump actions/checkout from 6.0.0 to 6.0.1 Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.0 to 6.0.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/1af3b93b6815bc44a9784bd300feb67ff0d1eeb3...8e8c483db84b4bee98b60c0593521ed34d9990e8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 22 +++++++++++----------- .github/workflows/linux_blas.yml | 18 +++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 17 files changed, 45 insertions(+), 45 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index cbeb3ca07349..2b124d115710 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index bb185282b083..8ad89759c906 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -86,7 +86,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 03dc0b41f987..e8f7083336e6 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -22,7 +22,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 28fe31642faa..a4bec8af6d82 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 2b7eb24f9812..099f5f67336b 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -29,7 +29,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 3817f3ebc0d8..cff7fb1d5b89 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04-ppc64le-p10 name: "Native PPC64LE" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 157f2c2054e6..516f0538572b 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-depth: 0 @@ -68,7 +68,7 @@ jobs: matrix: version: ["3.12", "3.13", "3.14", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -113,7 +113,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -153,7 +153,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -198,7 +198,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -237,7 +237,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -277,13 +277,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -315,7 +315,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -323,7 +323,7 @@ jobs: - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.12' - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 6d0ab1a93777..c4e2d55330c4 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -127,7 +127,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -162,7 +162,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -192,7 +192,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -220,7 +220,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -256,7 +256,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -283,7 +283,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -347,7 +347,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -384,7 +384,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e4688e50e5bc..268e1d916d1c 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -95,7 +95,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -202,7 +202,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a94eca64b59f..92e46c8053b8 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -58,7 +58,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -76,7 +76,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -123,7 +123,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -170,7 +170,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -185,7 +185,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -235,7 +235,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index c13df7226a1c..f2af68370b99 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -32,7 +32,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -121,7 +121,7 @@ jobs: version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index d8a1e3dbee97..1deb5ab82815 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -56,7 +56,7 @@ jobs: - [ubuntu-latest, '3.13'] - [windows-latest, '3.12'] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 03b471b10c63..040a154d2895 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 4f9bf10985a5..9303e511dde8 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 5d2ca7385ed5..dcac63be8a0a 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.12", "3.14"] steps: - - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 86d4c35ca8a4..436e929fe636 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -48,7 +48,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3b6f7dd6ff73..3ef295cc8f5b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -25,7 +25,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true @@ -139,7 +139,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true From db92fe804e4b567200de7141bb171604f6883d57 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 23:24:11 +0100 Subject: [PATCH 0957/1718] MAINT: bump ``ruff`` to ``0.14.7`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index c5ee0c381bb3..e27172e8b7b2 100644 --- a/environment.yml +++ b/environment.yml @@ -46,7 +46,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.14.0 + - ruff=0.14.7 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index da6bac6f7b84..73eafbaf52a1 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.0 +ruff==0.14.7 GitPython>=3.1.30 From 95e36ee856d93a39ab76e675aa5b8a3c78be6447 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 15:48:42 -0700 Subject: [PATCH 0958/1718] BUG: fix free-threaded races in RandomState --- numpy/_core/tests/test_multithreading.py | 13 ++++++++++++- numpy/random/mtrand.pyx | 15 ++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 9b10974839a4..44b2c34cd68b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -16,7 +16,7 @@ reason="tests in this module are already explicitly multi-threaded" ) -def test_parallel_randomstate_creation(): +def test_parallel_randomstate(): # if the coercion cache is enabled and not thread-safe, creating # RandomState instances simultaneously leads to a data race def func(seed): @@ -24,6 +24,17 @@ def func(seed): run_threaded(func, 500, pass_count=True) + # seeding and setting state shouldn't race with generating RNG samples + rng = np.random.RandomState() + + def func(seed): + base_rng = np.random.RandomState(seed) + state = base_rng.get_state() + rng.seed(seed) + rng.random() + rng.set_state(state) + + run_threaded(func, 8, pass_count=True) def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 8e7f437641ca..213d227cdeeb 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -251,8 +251,9 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - self._bit_generator._legacy_seeding(seed) - self._reset_gauss() + with cython.critical_section(self): + self._bit_generator._legacy_seeding(seed) + self._reset_gauss() def get_state(self, legacy=True): """ @@ -381,9 +382,13 @@ cdef class RandomState: st['has_gauss'] = state[3] st['gauss'] = state[4] - self._aug_state.gauss = st.get('gauss', 0.0) - self._aug_state.has_gauss = st.get('has_gauss', 0) - self._bit_generator.state = st + cdef double gauss = st.get('gauss', 0.0) + cdef int has_gauss = st.get('has_gauss', 0) + + with cython.critical_section(self): + self._aug_state.gauss = gauss + self._aug_state.has_gauss = has_gauss + self._bit_generator.state = st def random_sample(self, size=None): """ From 6c14af1a934723d6a8c624f1cbd7481d8928e51b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 16:07:11 -0700 Subject: [PATCH 0959/1718] MAINT: bump minimum required Cython version --- INSTALL.rst | 2 +- meson.build | 4 ++-- numpy/_core/tests/examples/cython/meson.build | 9 +++------ numpy/_core/tests/examples/limited_api/meson.build | 4 ++-- numpy/_core/tests/test_cython.py | 2 +- numpy/_core/tests/test_limited_api.py | 2 +- numpy/random/_examples/cython/meson.build | 9 +++------ numpy/random/tests/test_extending.py | 2 +- pyproject.toml | 2 +- requirements/build_requirements.txt | 2 +- 10 files changed, 16 insertions(+), 22 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 6e9d2cd242f5..214e2785eb33 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -20,7 +20,7 @@ Building NumPy requires the following installed software: e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.0.6 +2) Cython >= 3.1.0 3) pytest__ (optional) diff --git a/meson.build b/meson.build index 2cb7ce987ad5..9d398dbd180b 100644 --- a/meson.build +++ b/meson.build @@ -30,8 +30,8 @@ elif cc.get_id() == 'msvc' 'when building with MSVC') endif endif -if not cy.version().version_compare('>=3.0.6') - error('NumPy requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('NumPy requires Cython >= 3.1.0') endif py = import('python').find_installation(pure: false) diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index 8362c339ae73..9afe90103665 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -6,14 +6,11 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif -cython_args = [] -if cy.version().version_compare('>=3.1.0') - cython_args += ['-Xfreethreading_compatible=True'] -endif +cython_args = ['-Xfreethreading_compatible=True'] npy_include_path = run_command(py, [ '-c', diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 65287d8654f5..02496acd1f84 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -6,8 +6,8 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif npy_include_path = run_command(py, [ diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index c405a59e535e..0c00bab0333c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -19,7 +19,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.0.6" + required_version = "3.1.0" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 984210e53af7..9aa469cd83c8 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -17,7 +17,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.0.6" + required_version = "3.1.0" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 7aa367d13787..68f30827442a 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -7,14 +7,11 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.0.6') - error('tests requires Cython >= 3.0.6') +if not cy.version().version_compare('>=3.1.0') + error('tests requires Cython >= 3.1.0') endif -base_cython_args = [] -if cy.version().version_compare('>=3.1.0') - base_cython_args += ['-Xfreethreading_compatible=True'] -endif +base_cython_args = ['-Xfreethreading_compatible=True'] _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index a1e64ecbe343..19f56d9e3a83 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -38,7 +38,7 @@ else: from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = '3.0.6' + required_version = '3.1.0' if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/pyproject.toml b/pyproject.toml index e7830c5248a1..861d40ea1836 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "mesonpy" requires = [ "meson-python>=0.18.0", - "Cython>=3.0.6", # keep in sync with version check in meson.build + "Cython>=3.1.0", # keep in sync with version check in meson.build ] [project] diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 1f6eb1435cfc..52a062c92ebd 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 -Cython>=3.0.6 +Cython>=3.1.0 ninja spin==0.15 build From 258a202c126a9d2a5e34fc5770618b148d93feae Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 16:47:20 -0700 Subject: [PATCH 0960/1718] MAINT: try with an RLock instead --- numpy/random/bit_generator.pyx | 4 ++-- numpy/random/mtrand.pyx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 0bb9552a86ce..01b35a7a621a 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -38,7 +38,7 @@ from itertools import cycle import re from secrets import randbits -from threading import Lock +from threading import RLock from cpython.pycapsule cimport PyCapsule_New @@ -522,7 +522,7 @@ cdef class BitGenerator: """ def __init__(self, seed=None): - self.lock = Lock() + self.lock = RLock() self._bitgen.state = 0 if type(self) is BitGenerator: raise NotImplementedError('BitGenerator is a base class and cannot be instantized') diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 213d227cdeeb..ae1706eb884b 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -251,7 +251,7 @@ cdef class RandomState: """ if not isinstance(self._bit_generator, _MT19937): raise TypeError('can only re-seed a MT19937 BitGenerator') - with cython.critical_section(self): + with self.lock: self._bit_generator._legacy_seeding(seed) self._reset_gauss() @@ -385,7 +385,7 @@ cdef class RandomState: cdef double gauss = st.get('gauss', 0.0) cdef int has_gauss = st.get('has_gauss', 0) - with cython.critical_section(self): + with self.lock: self._aug_state.gauss = gauss self._aug_state.has_gauss = has_gauss self._bit_generator.state = st From 638b4fb4c7678ea2e9dbec7d8d4cf05cc46dfa7b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 2 Dec 2025 18:38:12 -0700 Subject: [PATCH 0961/1718] Revert "MAINT: bump minimum required Cython version" This reverts commit 6c14af1a934723d6a8c624f1cbd7481d8928e51b. --- INSTALL.rst | 2 +- meson.build | 4 ++-- numpy/_core/tests/examples/cython/meson.build | 9 ++++++--- numpy/_core/tests/examples/limited_api/meson.build | 4 ++-- numpy/_core/tests/test_cython.py | 2 +- numpy/_core/tests/test_limited_api.py | 2 +- numpy/random/_examples/cython/meson.build | 9 ++++++--- numpy/random/tests/test_extending.py | 2 +- pyproject.toml | 2 +- requirements/build_requirements.txt | 2 +- 10 files changed, 22 insertions(+), 16 deletions(-) diff --git a/INSTALL.rst b/INSTALL.rst index 214e2785eb33..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -20,7 +20,7 @@ Building NumPy requires the following installed software: e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.1.0 +2) Cython >= 3.0.6 3) pytest__ (optional) diff --git a/meson.build b/meson.build index 9d398dbd180b..2cb7ce987ad5 100644 --- a/meson.build +++ b/meson.build @@ -30,8 +30,8 @@ elif cc.get_id() == 'msvc' 'when building with MSVC') endif endif -if not cy.version().version_compare('>=3.1.0') - error('NumPy requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('NumPy requires Cython >= 3.0.6') endif py = import('python').find_installation(pure: false) diff --git a/numpy/_core/tests/examples/cython/meson.build b/numpy/_core/tests/examples/cython/meson.build index 9afe90103665..8362c339ae73 100644 --- a/numpy/_core/tests/examples/cython/meson.build +++ b/numpy/_core/tests/examples/cython/meson.build @@ -6,11 +6,14 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif -cython_args = ['-Xfreethreading_compatible=True'] +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif npy_include_path = run_command(py, [ '-c', diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 02496acd1f84..65287d8654f5 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -6,8 +6,8 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif npy_include_path = run_command(py, [ diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 0c00bab0333c..c405a59e535e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -19,7 +19,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.1.0" + required_version = "3.0.6" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 9aa469cd83c8..984210e53af7 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -17,7 +17,7 @@ from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = "3.1.0" + required_version = "3.0.6" if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 68f30827442a..7aa367d13787 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -7,11 +7,14 @@ cc = meson.get_compiler('c') cy = meson.get_compiler('cython') # Keep synced with pyproject.toml -if not cy.version().version_compare('>=3.1.0') - error('tests requires Cython >= 3.1.0') +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') endif -base_cython_args = ['-Xfreethreading_compatible=True'] +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 19f56d9e3a83..a1e64ecbe343 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -38,7 +38,7 @@ else: from numpy._utils import _pep440 # Note: keep in sync with the one in pyproject.toml - required_version = '3.1.0' + required_version = '3.0.6' if _pep440.parse(cython_version) < _pep440.Version(required_version): # too old or wrong cython, skip the test cython = None diff --git a/pyproject.toml b/pyproject.toml index 861d40ea1836..e7830c5248a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ build-backend = "mesonpy" requires = [ "meson-python>=0.18.0", - "Cython>=3.1.0", # keep in sync with version check in meson.build + "Cython>=3.0.6", # keep in sync with version check in meson.build ] [project] diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 52a062c92ebd..1f6eb1435cfc 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 -Cython>=3.1.0 +Cython>=3.0.6 ninja spin==0.15 build From 4a6bb2d843ba271f09a4115fd2c2c9dc82c78820 Mon Sep 17 00:00:00 2001 From: Kader Miyanyedi <48386782+Kadermiyanyedi@users.noreply.github.com> Date: Thu, 4 Dec 2025 00:35:19 +0300 Subject: [PATCH 0962/1718] MAINT: Enable linting with ruff E501 (#30356) --- numpy/_core/tests/test_arrayprint.py | 7 +++--- numpy/_core/tests/test_cpu_dispatcher.py | 6 ++--- numpy/_core/tests/test_dtype.py | 32 ++++++++++++++++++++---- ruff.toml | 3 --- 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 3777981724dc..e6cbb6f72229 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -277,11 +277,12 @@ def test_structure_format_mixed(self): # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) A[5:].fill(np.datetime64('NaT')) + date_string = '1970-01-01T00:00:00' assert_equal( np.array2string(A), - textwrap.dedent("""\ - [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) - ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + textwrap.dedent(f"""\ + [('{date_string}',) ('{date_string}',) ('{date_string}',) + ('{date_string}',) ('{date_string}',) ('NaT',) ('NaT',) ('NaT',) ('NaT',) ('NaT',)]""") ) finally: diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index 01fd53a3dbd0..04acf13c228d 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -20,9 +20,9 @@ def test_dispatcher(): highest_sfx = "" # no suffix for the baseline all_sfx = [] for feature in reversed(targets): - # skip baseline features, by the default `CCompilerOpt` do not generate separated objects - # for the baseline, just one object combined all of them via 'baseline' option - # within the configuration statements. + # skip baseline features, by the default `CCompilerOpt` do not generate + # separated objects for the baseline, just one object combined all of them + # via 'baseline' option within the configuration statements. if feature in __cpu_baseline__: continue # check compiler and running machine support diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 8819262c4e21..13cc84d61d1e 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1836,7 +1836,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1860,7 +1865,12 @@ class Union(ctypes.Union): ] expected = np.dtype({ "names": ['a', 'b', 'c', 'd'], - "formats": ['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], + "formats": [ + 'u1', + np.uint16, + np.uint32, + [('one', 'u1'), ('two', np.uint32)], + ], "offsets": [0, 0, 0, 0], "itemsize": ctypes.sizeof(Union) }) @@ -1892,7 +1902,15 @@ class PackedStructure(ctypes.Structure): ('g', ctypes.c_uint8) ] expected = np.dtype({ - "formats": [np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8], + "formats": [ + np.uint8, + np.uint16, + np.uint8, + np.uint16, + np.uint32, + np.uint32, + np.uint8, + ], "offsets": [0, 2, 4, 6, 8, 12, 16], "names": ['a', 'b', 'c', 'd', 'e', 'f', 'g'], "itemsize": 18}) @@ -1965,7 +1983,9 @@ def test_pairs(self, pair): class TestUserDType: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype(self): class mytype: pass @@ -1986,7 +2006,9 @@ class mytype: del a assert sys.getrefcount(o) == startcount - @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") + @pytest.mark.thread_unsafe( + reason="crashes when GIL disabled, dtype setup is thread-unsafe", + ) def test_custom_structured_dtype_errors(self): class mytype: pass diff --git a/ruff.toml b/ruff.toml index 8e0081e34622..c02a90e610e6 100644 --- a/ruff.toml +++ b/ruff.toml @@ -81,10 +81,7 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"numpy/_core/tests/test_arrayprint.py" = ["E501"] -"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] "numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_dtype.py" = ["E501"] "numpy/_core/tests/test_defchararray.py" = ["E501"] "numpy/_core/tests/test_einsum.py" = ["E501"] "numpy/_core/tests/test_multiarray.py" = ["E501"] From e0e4a764f7a329a821f222e8934dc11ec794387e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 4 Dec 2025 01:11:04 -0700 Subject: [PATCH 0963/1718] MAINT: Try pinning to Python 3.14.0t in macos tests. (#30369) --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f2af68370b99..4fe1576c86e0 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.12", "3.14t"] + version: ["3.12", "3.14.0t"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 From c6fce1a5d2cfa2e7072696d441469daf46f36203 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Dec 2025 09:17:25 +0100 Subject: [PATCH 0964/1718] CI: bump FreeBSD from 14.2 to 14.3 --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 119575b2a7be..977921d8236d 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -25,7 +25,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-2 + image: family/freebsd-14-3 platform: freebsd cpu: 1 memory: 4G From c1ab2e375c3e6ddbde7fcf61f18ca6a8bc3cf437 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 3 Dec 2025 22:20:00 +0100 Subject: [PATCH 0965/1718] TYP: remove unused ``type: ignore`` comments --- numpy/__init__.pyi | 216 +++++++++++++++---------------- numpy/_core/multiarray.pyi | 12 +- numpy/_typing/_nbit_base.pyi | 2 +- numpy/lib/_arrayterator_impl.pyi | 2 +- numpy/lib/_shape_base_impl.pyi | 8 +- numpy/lib/_type_check_impl.pyi | 24 ++-- numpy/lib/_ufunclike_impl.pyi | 4 +- numpy/lib/_user_array_impl.pyi | 10 +- numpy/lib/user_array.pyi | 2 +- numpy/linalg/_linalg.pyi | 6 +- numpy/ma/core.pyi | 141 ++++++++++---------- numpy/ma/extras.pyi | 1 + numpy/ma/mrecords.pyi | 7 +- numpy/matlib.pyi | 2 +- numpy/random/_generator.pyi | 74 +++++------ numpy/random/mtrand.pyi | 130 +++++++++---------- numpy/testing/__init__.pyi | 2 +- numpy/typing/__init__.pyi | 7 +- tools/stubtest/mypy.ini | 2 +- 19 files changed, 331 insertions(+), 321 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3c459952d6b4..8ef7c321fa76 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -430,7 +430,7 @@ from numpy.lib._arraysetops_impl import ( unique_values, ) -from numpy.lib._function_base_impl import ( # type: ignore[deprecated] +from numpy.lib._function_base_impl import ( select, piecewise, trim_zeros, @@ -2560,7 +2560,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, @@ -2799,9 +2799,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2811,11 +2811,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2828,9 +2828,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __matmul__ def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2840,11 +2840,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -2857,19 +2857,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2882,19 +2882,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mod__ def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -2907,19 +2907,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload @@ -2928,19 +2928,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __divmod__ def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... @overload def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload @@ -2950,11 +2950,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2964,15 +2964,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2998,11 +2998,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3012,15 +3012,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3046,11 +3046,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3060,15 +3060,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3084,11 +3084,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3098,15 +3098,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload @@ -3122,11 +3122,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3136,11 +3136,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3164,11 +3164,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mul__ def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3178,11 +3178,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload @@ -3268,19 +3268,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3298,19 +3298,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload @@ -3326,11 +3326,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3342,11 +3342,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3360,11 +3360,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3376,11 +3376,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... @overload def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload @@ -3393,7 +3393,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3404,7 +3404,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3415,7 +3415,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3426,7 +3426,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3437,7 +3437,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3448,7 +3448,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3459,7 +3459,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3470,7 +3470,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3481,7 +3481,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -3492,7 +3492,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... @overload @@ -5492,7 +5492,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] @overload @@ -5686,7 +5686,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... @overload def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] @overload diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 0293d193cbc4..812e0b562b9d 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -811,7 +811,7 @@ def fromstring( ) -> NDArray[Any]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any], _ReturnType], /, nin: L[1], nout: L[1], @@ -819,7 +819,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any], _ReturnType], /, nin: L[1], nout: L[1], @@ -827,7 +827,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: _IDType, ) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any, Any], _ReturnType], /, nin: L[2], nout: L[1], @@ -835,7 +835,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[[Any, Any], _ReturnType], /, nin: L[2], nout: L[1], @@ -843,7 +843,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: _IDType, ) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[..., _ReturnType], /, nin: _Nin, nout: L[1], @@ -851,7 +851,7 @@ def frompyfunc( # type: ignore[overload-overlap] identity: None = None, ) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... @overload -def frompyfunc( # type: ignore[overload-overlap] +def frompyfunc( func: Callable[..., _ReturnType], /, nin: _Nin, nout: L[1], diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index d88c9f4d9fd9..81810a2caa3b 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -15,7 +15,7 @@ from typing_extensions import deprecated class NBitBase: ... @final -class _256Bit(NBitBase): ... +class _256Bit(NBitBase): ... # type: ignore[deprecated] @final class _128Bit(_256Bit): ... diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 5fd589a3ac36..c70872f6e753 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -39,7 +39,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # - @overload # type: ignore[override] + @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 352f57dd810a..ec4cd2c595ac 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -210,13 +210,13 @@ def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... def get_array_wrap(*args: object) -> _ArrayWrap | None: ... @overload -def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... @overload def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... @overload diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 8b665cd9a400..dec5fc1dfaa4 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -64,7 +64,7 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +def real(val: _HasReal[_T]) -> _T: ... @overload def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... @overload @@ -72,7 +72,7 @@ def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +def imag(val: _HasImag[_T]) -> _T: ... @overload def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... @overload @@ -142,7 +142,7 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload @@ -202,41 +202,41 @@ def typename(char: L["O"]) -> L["object"]: ... @overload def common_type() -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... @overload -def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.float64 | np.integer], /, *ai: _HasDType[_RealMax64], ) -> type[np.float64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.longdouble], /, *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex64], /, *ai: _HasDType[_InexactMax32], ) -> type[np.complex64]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.complex128], /, *ai: _HasDType[_NumberMax64], ) -> type[np.complex128]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[np.clongdouble], /, *ai: _HasDType[np.number], ) -> type[np.clongdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_FloatMax32], array1: _HasDType[np.float32], /, @@ -257,7 +257,7 @@ def common_type( *ai: _HasDType[_Real], ) -> type[np.longdouble]: ... @overload -def common_type( # type: ignore[overload-overlap] +def common_type( a0: _HasDType[_InexactMax32], array1: _HasDType[np.complex64], /, diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index 0f00767356e0..c16ce811e28a 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -28,7 +28,7 @@ def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... @overload -def isposinf( # type: ignore[misc] +def isposinf( x: _FloatLike_co, out: None = None, ) -> np.bool: ... @@ -44,7 +44,7 @@ def isposinf( ) -> _ArrayT: ... @overload -def isneginf( # type: ignore[misc] +def isneginf( x: _FloatLike_co, out: None = None, ) -> np.bool: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 7f364b495450..f2e34eacc5a6 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -21,13 +21,13 @@ _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, cov _DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) # type: ignore[deprecated] +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) # type: ignore[deprecated] _RealContainerT = TypeVar( "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], # type: ignore[deprecated] ) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) # type: ignore[deprecated] _ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] @@ -112,7 +112,7 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... @overload def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... @overload diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi index 9b90d893326b..af90126ad6c9 100644 --- a/numpy/lib/user_array.pyi +++ b/numpy/lib/user_array.pyi @@ -1 +1 @@ -from ._user_array_impl import container as container +from ._user_array_impl import container as container # type: ignore[deprecated] diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 60320b021c71..b2b8ada44419 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -302,9 +302,9 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -# the ignored `overload-overlap` mypy error below is a false-positive +# @overload -def svdvals( # type: ignore[overload-overlap] +def svdvals( x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / ) -> NDArray[np.float64]: ... @overload @@ -313,7 +313,7 @@ def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... # TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensionl array otherwise +# a `(x.ndim - 2)`` dimensional array otherwise def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... # TODO: Returns `int` for <2D arrays and `intp` otherwise diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 62dc32c13d97..67d6b3591a4b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,3 +1,4 @@ +# mypy: disable-error-code=no-untyped-def # pyright: reportIncompatibleMethodOverride=false import datetime as dt @@ -1087,7 +1088,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... - @shape.setter # type: ignore[override] + @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @@ -1177,11 +1178,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1191,15 +1192,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1225,11 +1226,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] # signature equivalent to __add__ def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1239,15 +1240,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1273,11 +1274,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1287,15 +1288,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1311,11 +1312,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1325,15 +1326,15 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload - def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... @overload @@ -1349,11 +1350,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1363,13 +1364,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1391,11 +1392,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] # signature equivalent to __mul__ def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1405,13 +1406,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload - def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... @overload @@ -1495,19 +1496,19 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... @overload def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload @@ -1525,21 +1526,21 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... @overload def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... @overload - def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... @overload @@ -1553,11 +1554,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1567,11 +1568,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1585,11 +1586,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # type: ignore[override] def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... @overload def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1599,11 +1600,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... @overload - def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... @overload - def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... @overload - def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... @overload def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... @overload @@ -1638,7 +1639,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # type: ignore[overload-overlap] # mypy false positive + def reshape( # mypy false positive self, shape: Sequence[Never], /, @@ -2284,7 +2285,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.take @overload # type: ignore[override] - def take( # type: ignore[overload-overlap] + def take( self: _MaskedArray[_ScalarT], indices: _IntLike_co, axis: None = None, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 70881bd15c8a..580dcd486c28 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -243,6 +243,7 @@ def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... # TODO: everything below +# mypy: disable-error-code=no-untyped-def def count_masked(arr, axis=None): ... def masked_all(shape, dtype=float): ... # noqa: PYI014 diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 737a34ebdb70..f1319d4bf69d 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -18,6 +18,9 @@ __all__ = [ _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +### +# mypy: disable-error-code=no-untyped-def + class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): def __new__( cls, @@ -50,10 +53,10 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DT def __setattr__(self, attr, val): ... def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... - def view(self, dtype=None, type=None): ... + def view(self, dtype=None, type=None): ... # type: ignore[override] def harden_mask(self): ... def soften_mask(self): ... - def copy(self): ... + def copy(self): ... # type: ignore[override] def tolist(self, fill_value=None): ... def __reduce__(self): ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index d653a5a6cc98..53ac58c85d2e 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -2,7 +2,7 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt -from numpy import ( # noqa: F401 +from numpy import ( # type: ignore[deprecated] # noqa: F401 False_, ScalarType, True_, diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 1f7c342394e1..9e857fb000ef 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -66,32 +66,32 @@ class Generator: def spawn(self, n_children: int) -> list[Generator]: ... def bytes(self, length: int) -> bytes: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., out: None = None, ) -> float: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, *, out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, dtype: _DTypeLikeFloat32 = ..., out: NDArray[float32] | None = None, ) -> NDArray[float32]: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None, dtype: _DTypeLikeFloat64 = ..., @@ -102,7 +102,7 @@ class Generator: @overload def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... @overload - def standard_exponential( # type: ignore[misc] + def standard_exponential( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., @@ -145,7 +145,7 @@ class Generator: out: NDArray[float64] | None = None, ) -> NDArray[float64]: ... @overload - def random( # type: ignore[misc] + def random( self, size: None = None, dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., @@ -184,7 +184,7 @@ class Generator: a: _FloatLike_co, b: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def beta( self, @@ -193,7 +193,7 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... @overload def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... @@ -523,7 +523,7 @@ class Generator: low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def uniform( self, @@ -537,7 +537,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def normal( self, @@ -546,7 +546,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def standard_gamma( self, shape: _FloatLike_co, size: None = None, @@ -585,7 +585,7 @@ class Generator: @overload def gamma( self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gamma( self, @@ -596,7 +596,7 @@ class Generator: @overload def f( self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def f( self, @@ -611,7 +611,7 @@ class Generator: dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_f( self, @@ -621,7 +621,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... @overload def chisquare( self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -629,7 +629,7 @@ class Generator: @overload def noncentral_chisquare( self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_chisquare( self, @@ -638,7 +638,7 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... @overload def standard_t( self, df: _ArrayLikeFloat_co, size: None = None @@ -650,7 +650,7 @@ class Generator: @overload def vonmises( self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def vonmises( self, @@ -659,25 +659,25 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def pareto( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def weibull( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + def power(self, a: _FloatLike_co, size: None = None) -> float: ... @overload def power( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload @@ -686,7 +686,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def laplace( self, @@ -700,7 +700,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gumbel( self, @@ -714,7 +714,7 @@ class Generator: loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def logistic( self, @@ -728,7 +728,7 @@ class Generator: mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def lognormal( self, @@ -737,7 +737,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... @overload def rayleigh( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None @@ -745,7 +745,7 @@ class Generator: @overload def wald( self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def wald( self, @@ -760,7 +760,7 @@ class Generator: mode: _FloatLike_co, right: _FloatLike_co, size: None = None, - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def triangular( self, @@ -770,7 +770,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... @overload def binomial( self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -778,7 +778,7 @@ class Generator: @overload def negative_binomial( self, n: _FloatLike_co, p: _FloatLike_co, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def negative_binomial( self, @@ -787,19 +787,19 @@ class Generator: size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... @overload def poisson( self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... @overload def zipf( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[int64]: ... @overload - def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... @overload def geometric( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -807,7 +807,7 @@ class Generator: @overload def hypergeometric( self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def hypergeometric( self, @@ -817,7 +817,7 @@ class Generator: size: _ShapeLike | None = None, ) -> NDArray[int64]: ... @overload - def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... @overload def logseries( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c20d35193d45..c2cbb17aa98d 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -117,15 +117,15 @@ class RandomState: self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] ) -> None: ... @overload - def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] + def random_sample(self, size: None = None) -> float: ... @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def random(self, size: None = None) -> float: ... # type: ignore[misc] + def random(self, size: None = None) -> float: ... @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] + def beta(self, a: float, b: float, size: None = None) -> float: ... @overload def beta( self, @@ -134,29 +134,29 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload def exponential( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_exponential(self, size: None = None) -> float: ... @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... @overload - def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] + def tomaxint(self, size: None = None) -> int: ... @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, size: None = None, ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -164,7 +164,7 @@ class RandomState: dtype: type[bool] = ..., ) -> bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -172,7 +172,7 @@ class RandomState: dtype: type[np.bool] = ..., ) -> np.bool: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -180,7 +180,7 @@ class RandomState: dtype: type[int] = ..., ) -> int: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -188,7 +188,7 @@ class RandomState: dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> uint8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -196,7 +196,7 @@ class RandomState: dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> uint16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -204,7 +204,7 @@ class RandomState: dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> uint32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -212,7 +212,7 @@ class RandomState: dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -220,7 +220,7 @@ class RandomState: dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 ) -> ulong: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -228,7 +228,7 @@ class RandomState: dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> uint64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -236,7 +236,7 @@ class RandomState: dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> int8: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -244,7 +244,7 @@ class RandomState: dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> int16: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -252,7 +252,7 @@ class RandomState: dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> int32: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -260,7 +260,7 @@ class RandomState: dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -268,7 +268,7 @@ class RandomState: dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> long: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: int, high: int | None = None, @@ -276,14 +276,14 @@ class RandomState: dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 ) -> int64: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -291,7 +291,7 @@ class RandomState: dtype: _DTypeLikeBool = ..., ) -> NDArray[np.bool]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -299,7 +299,7 @@ class RandomState: dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 ) -> NDArray[int8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -307,7 +307,7 @@ class RandomState: dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 ) -> NDArray[int16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -315,7 +315,7 @@ class RandomState: dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 ) -> NDArray[int32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -323,7 +323,7 @@ class RandomState: dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 ) -> NDArray[int64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -331,7 +331,7 @@ class RandomState: dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 ) -> NDArray[uint8]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -339,7 +339,7 @@ class RandomState: dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 ) -> NDArray[uint16]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -347,7 +347,7 @@ class RandomState: dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 ) -> NDArray[uint32]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -355,7 +355,7 @@ class RandomState: dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 ) -> NDArray[uint64]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -363,7 +363,7 @@ class RandomState: dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 ) -> NDArray[long]: ... @overload - def randint( # type: ignore[misc] + def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -406,7 +406,7 @@ class RandomState: @overload def uniform( self, low: float = 0.0, high: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def uniform( self, @@ -425,7 +425,7 @@ class RandomState: @overload def random_integers( self, low: int, high: int | None = None, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def random_integers( self, @@ -434,15 +434,15 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal( # type: ignore[misc] + def standard_normal( self, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload def normal( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def normal( self, @@ -451,7 +451,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def standard_gamma( # type: ignore[misc] + def standard_gamma( self, shape: float, size: None = None, @@ -463,7 +463,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... @overload def gamma( self, @@ -472,7 +472,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload def f( self, @@ -483,7 +483,7 @@ class RandomState: @overload def noncentral_f( self, dfnum: float, dfden: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_f( self, @@ -493,7 +493,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def chisquare(self, df: float, size: None = None) -> float: ... @overload def chisquare( self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -501,7 +501,7 @@ class RandomState: @overload def noncentral_chisquare( self, df: float, nonc: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def noncentral_chisquare( self, @@ -510,7 +510,7 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + def standard_t(self, df: float, size: None = None) -> float: ... @overload def standard_t( self, df: _ArrayLikeFloat_co, size: None = None @@ -520,7 +520,7 @@ class RandomState: self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload def vonmises( self, @@ -529,31 +529,31 @@ class RandomState: size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def pareto(self, a: float, size: None = None) -> float: ... @overload def pareto( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def weibull(self, a: float, size: None = None) -> float: ... @overload def weibull( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + def power(self, a: float, size: None = None) -> float: ... @overload def power( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + def standard_cauchy(self, size: None = None) -> float: ... @overload def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... @overload def laplace( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def laplace( self, @@ -564,7 +564,7 @@ class RandomState: @overload def gumbel( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def gumbel( self, @@ -575,7 +575,7 @@ class RandomState: @overload def logistic( self, loc: float = 0.0, scale: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def logistic( self, @@ -586,7 +586,7 @@ class RandomState: @overload def lognormal( self, mean: float = 0.0, sigma: float = 1.0, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def lognormal( self, @@ -595,13 +595,13 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[float64]: ... @overload - def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload def rayleigh( self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[float64]: ... @overload - def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] + def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload def wald( self, @@ -612,7 +612,7 @@ class RandomState: @overload def triangular( self, left: float, mode: float, right: float, size: None = None - ) -> float: ... # type: ignore[misc] + ) -> float: ... @overload def triangular( self, @@ -624,7 +624,7 @@ class RandomState: @overload def binomial( self, n: int, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def binomial( self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -632,7 +632,7 @@ class RandomState: @overload def negative_binomial( self, n: float, p: float, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def negative_binomial( self, @@ -643,19 +643,19 @@ class RandomState: @overload def poisson( self, lam: float = 1.0, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def poisson( self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] + def zipf(self, a: float, size: None = None) -> int: ... @overload def zipf( self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None ) -> NDArray[long]: ... @overload - def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def geometric(self, p: float, size: None = None) -> int: ... @overload def geometric( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None @@ -663,7 +663,7 @@ class RandomState: @overload def hypergeometric( self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... # type: ignore[misc] + ) -> int: ... @overload def hypergeometric( self, @@ -673,7 +673,7 @@ class RandomState: size: _ShapeLike | None = None, ) -> NDArray[long]: ... @overload - def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + def logseries(self, p: float, size: None = None) -> int: ... @overload def logseries( self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 684a7c36adec..95db8728ef70 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,7 +2,7 @@ from unittest import TestCase from . import _private as _private, overrides from ._private import extbuild as extbuild -from ._private.utils import ( +from ._private.utils import ( # type: ignore[deprecated] BLAS_SUPPORTS_FPE, HAS_LAPACK64, HAS_REFCOUNT, diff --git a/numpy/typing/__init__.pyi b/numpy/typing/__init__.pyi index 7a4c7b41079c..5af10da218d9 100644 --- a/numpy/typing/__init__.pyi +++ b/numpy/typing/__init__.pyi @@ -1,3 +1,8 @@ -from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray +from numpy._typing import ( # type: ignore[deprecated] + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/tools/stubtest/mypy.ini b/tools/stubtest/mypy.ini index 0a3bb07b1b94..4c75171acffe 100644 --- a/tools/stubtest/mypy.ini +++ b/tools/stubtest/mypy.ini @@ -9,7 +9,7 @@ exclude = (?x)( ) namespace_packages = False -enable_error_code = ignore-without-code, redundant-expr, truthy-bool +enable_error_code = deprecated, ignore-without-code, redundant-expr, truthy-bool warn_unreachable = False strict = True strict_bytes = True From 1bd4db906b1d05d5f5920330f8a47f3b0eb736d8 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 4 Dec 2025 14:48:30 +0100 Subject: [PATCH 0966/1718] CI: use Python 3.12 for Qemu tests (#30357) * CI: use Python 3.12 for Qemu tests * CI: use the loong64 uv fork for loongarch64 * CI: remove the ppc64le qemu jobs * CI: `--break-system-packages` when installing uv on loongarch64 * CI: attempt to fix compiling `ninja` in the loongarch64 qemu job * CI: avoid building ninja in the loongarch64 qemu job --- .github/workflows/linux_qemu.yml | 45 ++++++++++++++------------------ 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 268e1d916d1c..916417ebc513 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -42,22 +42,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "ppc64le", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - - [ - "ppc64le - baseline(Power9)", - "powerpc64le-linux-gnu", - "ppc64le/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vsx3", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - "ppc64le" - ] - [ "s390x", "s390x-linux-gnu", @@ -124,7 +108,9 @@ jobs: docker run --platform=linux/${ARCH} --name the_container --interactive \ -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && - apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && + apt install -y cmake git curl ca-certificates && + curl -LsSf https://astral.sh/uv/install.sh | sh && + export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && rm -rf /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && @@ -140,8 +126,9 @@ jobs: git config --global --add safe.directory /numpy && # No need to build ninja from source, the host ninja is used for the build grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && - python -m pip install -r /tmp/build_requirements.txt && - python -m pip install pytest pytest-xdist hypothesis typing_extensions pytest-timeout && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis pytest-timeout rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container @@ -157,7 +144,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -170,7 +157,7 @@ jobs: -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' export F90=/usr/bin/gfortran - cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- --timeout=600 --durations=10 -k \"${RUNTIME_TEST_FILTER}\" '" @@ -232,6 +219,8 @@ jobs: ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && ln -s /host/usr/${TOOLCHAIN_NAME} /usr/${TOOLCHAIN_NAME} && ln -s /host/usr/lib/gcc-cross/${TOOLCHAIN_NAME} /usr/lib/gcc/${TOOLCHAIN_NAME} && + mkdir -p /usr/libexec/gcc && + rm -rf /usr/libexec/gcc/${TOOLCHAIN_NAME} && ln -s /host/usr/libexec/gcc/${TOOLCHAIN_NAME} /usr/libexec/gcc/${TOOLCHAIN_NAME} && rm -f /usr/bin/gcc && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gcc-14 /usr/bin/gcc && rm -f /usr/bin/g++ && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-g++-14 /usr/bin/g++ && rm -f /usr/bin/gfortran && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-gfortran-14 /usr/bin/gfortran && @@ -241,8 +230,14 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install --break-system-packages -r /numpy/requirements/build_requirements.txt && - python -m pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install --break-system-packages uv --extra-index-url https://mirrors.loong64.com/pypi/simple && + export PATH="/root/.local/bin:$PATH" && + uv venv --python 3.12 .venv && + source .venv/bin/activate && + uv pip install -r /tmp/build_requirements.txt pytest pytest-xdist hypothesis && + rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " docker commit the_container the_container mkdir -p "~/docker_${TOOLCHAIN_NAME}" @@ -257,7 +252,7 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} + source .venv/bin/activate && cd /numpy/ && spin build --clean -- ${MESON_OPTIONS} '" - name: Meson Log @@ -269,5 +264,5 @@ jobs: docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ -v $(pwd):/numpy -v /:/host the_container \ /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + source .venv/bin/activate && cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" From eeaf04662e07cc8e2041f3e25bbd3698949a0c02 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 4 Dec 2025 14:43:24 -0700 Subject: [PATCH 0967/1718] MAINT: don't assert RecursionError in monster dtype test (#30375) * MAINT: don't assert RecursionError in monster dtype test * MAINT: use contextlib --- .github/workflows/macos.yml | 2 +- numpy/_core/tests/test_dtype.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 4fe1576c86e0..f2af68370b99 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -118,7 +118,7 @@ jobs: build_runner: - [ macos-15-intel, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.12", "3.14.0t"] + version: ["3.12", "3.14t"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 13cc84d61d1e..72bcc3e22962 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,3 +1,4 @@ +import contextlib import ctypes import gc import inspect @@ -991,7 +992,9 @@ def test_tuple_recursion(self): d = np.int32 for i in range(100000): d = (d, (1,)) - with pytest.raises(RecursionError): + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): np.dtype(d) @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") From 8f9a6b2ebe6038bb866ca4a0c0cf4f31729cb50f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 5 Dec 2025 01:17:54 +0100 Subject: [PATCH 0968/1718] MAINT, TYP: bump `mypy` to `1.19.0` (#30323) * MAINT, TYP: bump `mypy` to `1.19.0` * TYP: adjust type-tests for mypy 1.19 --- environment.yml | 2 +- numpy/typing/tests/data/reveal/ufunc_config.pyi | 13 +++++++------ requirements/test_requirements.txt | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/environment.yml b/environment.yml index e27172e8b7b2..67b7cec26754 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.18.2 + - mypy=1.19.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 77c27eb3b4ca..f205b82b4f75 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -5,18 +5,19 @@ from collections.abc import Callable from typing import Any, assert_type import numpy as np +from numpy._core._ufunc_config import _ErrDict def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) -assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) -assert_type(np.geterr(), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(all=None), _ErrDict) +assert_type(np.seterr(divide="ignore"), _ErrDict) +assert_type(np.seterr(over="warn"), _ErrDict) +assert_type(np.seterr(under="call"), _ErrDict) +assert_type(np.seterr(invalid="raise"), _ErrDict) +assert_type(np.geterr(), _ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 33b5756b7362..66537dafdade 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -12,7 +12,7 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.18.2; platform_python_implementation != "PyPy" +mypy==1.19.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer From b9e3ed2e41f99ce146b16d8d1041842c38685430 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 5 Dec 2025 14:32:11 +0100 Subject: [PATCH 0969/1718] TYP: default to ``datetime64[Any]`` and ``timedelta64[Any]`` (#30364) * TYP: ``timedelta64[Any]`` by default * TYP: ``datetime64[Any]`` by default * TYP: ignore mypy deprecation warnings --- numpy/__init__.pyi | 183 ++++++++++-------- numpy/typing/tests/data/reveal/arithmetic.pyi | 21 +- .../tests/data/reveal/array_constructors.pyi | 10 +- .../typing/tests/data/reveal/arraysetops.pyi | 12 +- .../tests/data/reveal/lib_function_base.pyi | 6 +- numpy/typing/tests/data/reveal/linalg.pyi | 3 +- numpy/typing/tests/data/reveal/mod.pyi | 37 ++-- numpy/typing/tests/data/reveal/multiarray.pyi | 8 +- numpy/typing/tests/data/reveal/numeric.pyi | 8 +- 9 files changed, 163 insertions(+), 125 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8ef7c321fa76..d837c5b960d6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -744,7 +744,6 @@ _AnyShapeT = TypeVar( _AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) _AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) _AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) _AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) ### Type parameters (for internal use only) @@ -809,8 +808,8 @@ _FlexibleItemT_co = TypeVar( covariant=True, ) _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=Any, covariant=True) _TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) _BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) @@ -5402,7 +5401,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5416,9 +5415,16 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __pos__(self, /) -> Self: ... def __abs__(self, /) -> Self: ... + # + @overload + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... @overload + def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @@ -5426,118 +5432,115 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] __radd__ = __add__ + # @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... - @overload - def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... - __rmul__ = __mul__ - + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... @overload - def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... - @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __mod__(self, x: timedelta64, /) -> timedelta64: ... + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... - # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads - # reflect. However, mypy does not seem to like this, so we ignore the errors. + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. @overload - def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... @overload - def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... @overload - def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] @overload - def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload - def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... - # keep in sync with __mod__ @overload - def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... @overload - def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __mul__(self: timedelta64[None], x: _FloatLike_co, /) -> timedelta64[None]: ... @overload - def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + def __mul__(self, x: _IntLike_co, /) -> Self: ... @overload - def __divmod__( - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... - @overload - def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mul__(self, x: float | np.floating, /) -> timedelta64[_TD64ItemT_co | None]: ... @overload - def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... - @overload - def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... + __rmul__ = __mul__ - # keep in sync with __rmod__ + # keep in sync with __divmod__ @overload - def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[Never], x: timedelta64[dt.timedelta], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __mod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... @overload - def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... @overload - def __rdivmod__( # type: ignore[misc] - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... @overload - def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] - + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], /) -> timedelta64[dt.timedelta | None]: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __rdivmod__ + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + + # keep in sync with __mod__ @overload - def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + def __divmod__( + self: timedelta64[Never], x: timedelta64[Never] | timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __divmod__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> tuple[int64, timedelta64]: ... @overload - def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] - - # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. - # This confuses mypy, so we ignore the [misc] errors it reports. + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... @overload - def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[dt.timedelta], / + ) -> tuple[int64, timedelta64[dt.timedelta | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload - def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + # keep in sync with __rmod__ + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload - def __truediv__(self, b: timedelta64, /) -> float64: ... + def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload @@ -5545,24 +5548,26 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... - @overload - def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... @overload def __rtruediv__(self, a: timedelta64, /) -> float64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... @overload def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload - def __floordiv__(self, b: timedelta64, /) -> int64: ... + def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... @overload def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... - @overload - def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... # comparison ops @@ -5621,13 +5626,19 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + # def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # + @overload + def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... @overload def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... @overload + def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... + @overload def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... @overload def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... @@ -5636,15 +5647,20 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... @overload - def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + def __add__(self, x: timedelta64[None], /) -> datetime64[None]: ... @overload def __add__(self, x: _TD64Like_co, /) -> datetime64: ... __radd__ = __add__ + # + @overload + def __sub__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __sub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload @@ -5676,22 +5692,25 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): # NOTE: mypy gets confused by the non-commutativity of subtraction here @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... @overload - def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... @overload def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... @overload - def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... @overload def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... @overload - def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... @overload - def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + # @overload def __lt__(self, other: datetime64, /) -> bool_: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 491bce43fdae..68fa5b5230a6 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -406,14 +406,18 @@ assert_type(M8 - M8, np.timedelta64) assert_type(M8 - i, np.datetime64) assert_type(M8 - i8, np.datetime64) -assert_type(M8_none + m8, np.datetime64[None]) assert_type(M8_none + i, np.datetime64[None]) -assert_type(M8_none + i8, np.datetime64[None]) -assert_type(M8_none - M8, np.timedelta64[None]) -assert_type(M8_none - m8, np.datetime64[None]) assert_type(M8_none - i, np.datetime64[None]) + +assert_type(M8_none + i8, np.datetime64[None]) assert_type(M8_none - i8, np.datetime64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(M8_none + m8, np.datetime64[None]) # type: ignore[assert-type] +assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] +# NOTE: Mypy incorrectly infers `datetime64[Any]`, but pyright behaves correctly. +assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] + assert_type(m8 + m8, np.timedelta64) assert_type(m8 + i, np.timedelta64) assert_type(m8 + i8, np.timedelta64) @@ -428,7 +432,8 @@ assert_type(m8 / f4, np.timedelta64) assert_type(m8 / m8, np.float64) assert_type(m8 // m8, np.int64) assert_type(m8 % m8, np.timedelta64) -assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] assert_type(m8_none + m8, np.timedelta64[None]) assert_type(m8_none + i, np.timedelta64[None]) @@ -438,10 +443,12 @@ assert_type(m8_none - i8, np.timedelta64[None]) assert_type(m8_int + i, np.timedelta64[int]) assert_type(m8_int + m8_delta, np.timedelta64[int]) -assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int + m8, np.timedelta64) assert_type(m8_int - i, np.timedelta64[int]) assert_type(m8_int - m8_delta, np.timedelta64[int]) -assert_type(m8_int - m8, np.timedelta64[int | None]) +assert_type(m8_int - m8_int, np.timedelta64[int]) +assert_type(m8_int - m8_none, np.timedelta64[None]) +assert_type(m8_int - m8, np.timedelta64) assert_type(m8_delta + date, dt.date) assert_type(m8_delta + time, dt.datetime) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index ba8fc4db23c9..b7daf3397f9d 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -117,17 +117,17 @@ assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) _x_bool: bool _x_int: int _x_float: float -_x_timedelta: np.timedelta64 -_x_datetime: np.datetime64 +_x_timedelta: np.timedelta64[int] +_x_datetime: np.datetime64[int] assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) -assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64]]) assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 7e5ca5c5717b..36fc0603dcfd 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -23,7 +23,8 @@ assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), @@ -31,7 +32,8 @@ assert_type( ) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) @@ -40,11 +42,13 @@ assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 345635d06327..090af934a411 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -270,7 +270,8 @@ assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128] # median assert_type(np.median(AR_f8, keepdims=False), np.float64) assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) -assert_type(np.median(AR_m), np.timedelta64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.median(AR_m), np.timedelta64) # type: ignore[assert-type] assert_type(np.median(AR_O), Any) assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) @@ -332,7 +333,8 @@ assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) -assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) # type: ignore[assert-type] assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 60056516def0..3f73e842aa6c 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -51,7 +51,8 @@ assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] assert_type(np.linalg.qr(AR_i8), QRResult) assert_type(np.linalg.qr(AR_f8), QRResult) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index ef07dc0c8c8a..131e9259b6b5 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -27,13 +27,16 @@ f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] +# NOTE: the __divmod__ calls are workarounds for https://github.com/microsoft/pyright/issues/9663 + # Time structures assert_type(m % m, np.timedelta64) assert_type(m % m_nat, np.timedelta64[None]) assert_type(m % m_int0, np.timedelta64[None]) assert_type(m % m_int, np.timedelta64[int | None]) -assert_type(m_nat % m, np.timedelta64[None]) +# NOTE: Mypy incorrectly infers `timedelta64[Any]`, but pyright behaves correctly. +assert_type(m_nat % m, np.timedelta64[None]) # type: ignore[assert-type] assert_type(m_int % m_nat, np.timedelta64[None]) assert_type(m_int % m_int0, np.timedelta64[None]) assert_type(m_int % m_int, np.timedelta64[int | None]) @@ -46,20 +49,22 @@ assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) assert_type(AR_m % m, npt.NDArray[np.timedelta64]) assert_type(m % AR_m, npt.NDArray[np.timedelta64]) -assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) -assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 +# +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m.__divmod__(m), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] +assert_type(m.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) -assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) +# NOTE: Mypy incorrectly infers `tuple[Any, ...]`, but pyright behaves correctly. +assert_type(m_nat.__divmod__(m), tuple[np.int64, np.timedelta64[None]]) # type: ignore[assert-type] +assert_type(m_int.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_int.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_int.__divmod__(m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(m_td.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(m_td.__divmod__(m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) @@ -77,7 +82,6 @@ assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) @@ -118,7 +122,6 @@ assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) @@ -142,7 +145,6 @@ assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) @@ -172,7 +174,6 @@ assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 424f60df27e7..0fe907c0006b 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -170,9 +170,10 @@ assert_type(np.busday_count("2011-01", "2011-02"), np.int_) assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) -assert_type(np.busday_offset(M, m), np.datetime64) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.busday_offset(M, m), np.datetime64) # type: ignore[assert-type] +assert_type(np.busday_offset(M, 5), np.datetime64) # type: ignore[assert-type] assert_type(np.busday_offset(date_scalar, m), np.datetime64) -assert_type(np.busday_offset(M, 5), np.datetime64) assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) @@ -182,7 +183,8 @@ assert_type(np.is_busday("2012"), np.bool) assert_type(np.is_busday(date_scalar), np.bool) assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) -assert_type(np.datetime_as_string(M), np.str_) +# NOTE: Mypy incorrectly infers `Any`, but pyright behaves correctly. +assert_type(np.datetime_as_string(M), np.str_) # type: ignore[assert-type] assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 24f97d2d0784..44192fd64331 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -46,6 +46,8 @@ assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +# NOTE: Mypy incorrectly infers `np.ndarray[Any, Any]` for timedelta64 + # correlate assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -55,7 +57,7 @@ assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -72,7 +74,7 @@ assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float6 assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) -assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) @@ -89,7 +91,7 @@ assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.floa assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) -assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) # type: ignore[assert-type] assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) From 46600888986c7278189b987c974e73c2ce441c22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Dec 2025 11:53:34 -0700 Subject: [PATCH 0970/1718] MAINT: Bump github/codeql-action from 4.31.6 to 4.31.7 (#30383) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.6 to 4.31.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/fe4161a26a8629af62121b670040955b330f9af2...cf1bb45a277cb3c205638b2cd5c984db1c46a412) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2b124d115710..d7622b4d6dff 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/autobuild@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 # v4.31.6 + uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9303e511dde8..8488e97ef116 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v2.1.27 + uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v2.1.27 with: sarif_file: results.sarif From 6a7996c2025b8a191f9f5777a96443d955fab6fb Mon Sep 17 00:00:00 2001 From: partev Date: Fri, 5 Dec 2025 14:26:52 -0500 Subject: [PATCH 0971/1718] DOC: make descriptions of hyperbolic functions consistent (#29297) --- numpy/_core/code_generators/ufunc_docstrings.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index e976be723287..d4707da906b2 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -332,7 +332,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arcsinh', """ - Inverse hyperbolic sine element-wise. + Inverse hyperbolic sine, element-wise. Parameters ---------- @@ -534,7 +534,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'arctanh', """ - Inverse hyperbolic tangent element-wise. + Inverse hyperbolic tangent, element-wise. Parameters ---------- @@ -4271,7 +4271,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'tanh', """ - Compute hyperbolic tangent element-wise. + Hyperbolic tangent, element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``. From eade98761b21776b191643994dea21411598f486 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 27 Nov 2025 23:00:27 +0100 Subject: [PATCH 0972/1718] TYP: PEP 695 Python 3.12 syntax --- numpy/__init__.pyi | 1230 +++++++------- numpy/_array_api_info.pyi | 76 +- numpy/_core/_asarray.pyi | 16 +- numpy/_core/_dtype.pyi | 10 +- numpy/_core/_exceptions.pyi | 14 +- numpy/_core/_internal.pyi | 17 +- numpy/_core/_methods.pyi | 4 +- numpy/_core/_type_aliases.pyi | 4 +- numpy/_core/_ufunc_config.pyi | 10 +- numpy/_core/arrayprint.pyi | 22 +- numpy/_core/defchararray.pyi | 22 +- numpy/_core/einsumfunc.pyi | 39 +- numpy/_core/fromnumeric.pyi | 595 ++++--- numpy/_core/function_base.pyi | 48 +- numpy/_core/multiarray.pyi | 384 +++-- numpy/_core/numeric.pyi | 242 ++- numpy/_core/overrides.pyi | 25 +- numpy/_core/records.pyi | 18 +- numpy/_core/shape_base.pyi | 101 +- numpy/_core/strings.pyi | 8 +- numpy/_core/umath.pyi | 6 +- numpy/_typing/__init__.py | 18 +- numpy/_typing/_array_like.py | 106 +- numpy/_typing/_char_codes.py | 122 +- numpy/_typing/_dtype_like.py | 71 +- numpy/_typing/_nbit.py | 24 +- numpy/_typing/_nbit_base.py | 9 +- numpy/_typing/_nbit_base.pyi | 2 +- numpy/_typing/_nested_sequence.py | 12 +- numpy/_typing/_scalars.py | 22 +- numpy/_typing/_shape.py | 8 +- numpy/_typing/_ufunc.pyi | 173 +- numpy/_utils/__init__.pyi | 11 +- numpy/_utils/_inspect.pyi | 15 +- numpy/_utils/_pep440.pyi | 11 +- numpy/ctypeslib/_ctypeslib.pyi | 55 +- numpy/dtypes.pyi | 63 +- numpy/f2py/_backends/_meson.pyi | 3 +- numpy/f2py/auxfuncs.pyi | 21 +- numpy/f2py/cfuncs.pyi | 6 +- numpy/f2py/crackfortran.pyi | 28 +- numpy/f2py/f2py2e.pyi | 8 +- numpy/f2py/rules.pyi | 23 +- numpy/f2py/symbolic.pyi | 39 +- numpy/fft/_helper.pyi | 8 +- numpy/fft/_pocketfft.pyi | 4 +- numpy/lib/_arraypad_impl.pyi | 39 +- numpy/lib/_arraysetops_impl.pyi | 184 +-- numpy/lib/_arrayterator_impl.pyi | 11 +- numpy/lib/_datasource.pyi | 4 +- numpy/lib/_format_impl.pyi | 4 +- numpy/lib/_function_base_impl.pyi | 699 ++++---- numpy/lib/_histograms_impl.pyi | 4 +- numpy/lib/_index_tricks_impl.pyi | 45 +- numpy/lib/_iotools.pyi | 5 +- numpy/lib/_npyio_impl.pyi | 44 +- numpy/lib/_polynomial_impl.pyi | 23 +- numpy/lib/_shape_base_impl.pyi | 99 +- numpy/lib/_stride_tricks_impl.pyi | 24 +- numpy/lib/_twodim_base_impl.pyi | 170 +- numpy/lib/_type_check_impl.pyi | 63 +- numpy/lib/_ufunclike_impl.pyi | 45 +- numpy/lib/_user_array_impl.pyi | 91 +- numpy/lib/_utils_impl.pyi | 12 +- numpy/lib/recfunctions.pyi | 186 +-- numpy/linalg/_linalg.pyi | 25 +- numpy/ma/core.pyi | 1437 ++++++++++------- numpy/ma/extras.pyi | 101 +- numpy/matlib.pyi | 23 +- numpy/matrixlib/defmatrix.pyi | 90 +- numpy/polynomial/_polybase.pyi | 33 +- numpy/polynomial/_polytypes.pyi | 88 +- numpy/polynomial/chebyshev.pyi | 38 +- numpy/polynomial/hermite.pyi | 9 +- numpy/polynomial/hermite_e.pyi | 9 +- numpy/polynomial/polyutils.pyi | 30 +- numpy/random/_common.pyi | 7 +- numpy/random/_generator.pyi | 42 +- numpy/random/_pickle.pyi | 8 +- numpy/random/bit_generator.pyi | 3 +- numpy/testing/_private/utils.pyi | 83 +- numpy/typing/mypy_plugin.py | 4 +- numpy/typing/tests/data/fail/ma.pyi | 11 +- numpy/typing/tests/data/pass/ma.py | 5 +- .../tests/data/reveal/array_constructors.pyi | 21 +- .../typing/tests/data/reveal/bitwise_ops.pyi | 6 +- numpy/typing/tests/data/reveal/char.pyi | 6 +- numpy/typing/tests/data/reveal/chararray.pyi | 6 +- numpy/typing/tests/data/reveal/dtype.pyi | 4 +- numpy/typing/tests/data/reveal/flatiter.pyi | 8 +- numpy/typing/tests/data/reveal/ma.pyi | 20 +- numpy/typing/tests/data/reveal/matrix.pyi | 4 +- numpy/typing/tests/data/reveal/multiarray.pyi | 6 +- .../tests/data/reveal/nbit_base_example.pyi | 7 +- .../data/reveal/ndarray_assignability.pyi | 51 +- .../reveal/ndarray_shape_manipulation.pyi | 8 +- .../tests/data/reveal/polynomial_polybase.pyi | 31 +- .../data/reveal/polynomial_polyutils.pyi | 14 +- .../tests/data/reveal/polynomial_series.pyi | 12 +- numpy/typing/tests/data/reveal/rec.pyi | 4 +- numpy/typing/tests/data/reveal/scalars.pyi | 18 +- numpy/typing/tests/data/reveal/strings.pyi | 6 +- numpy/typing/tests/data/reveal/testing.pyi | 5 +- .../typing/tests/data/reveal/twodim_base.pyi | 14 +- 104 files changed, 3799 insertions(+), 4003 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d837c5b960d6..8ca91fd5b007 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,9 +1,6 @@ # ruff: noqa: I001 import builtins -import sys -import mmap import ctypes as ct -import array as _array import datetime as dt import inspect from abc import abstractmethod @@ -142,6 +139,7 @@ from numpy._typing._extended_precision import ( from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( + Buffer, Callable, Iterable, Iterator, @@ -149,19 +147,6 @@ from collections.abc import ( Sequence, ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer: TypeAlias = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - from typing import ( Any, ClassVar, @@ -177,7 +162,6 @@ from typing import ( SupportsFloat, SupportsInt, SupportsIndex, - TypeAlias, TypedDict, final, overload, @@ -189,7 +173,8 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, TypeVar, deprecated, override +from typing_extensions import CapsuleType, TypeVar, deprecated +from typing import override from numpy import ( char, @@ -724,80 +709,19 @@ __all__ = [ # noqa: RUF022 "emath", "show_config", "__version__", "__array_namespace_info__", ] # fmt: skip -### Constrained types (for internal use only) -# Only use these for functions; never as generic type parameter. - -_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], # 0-d - tuple[int], # 1-d - tuple[int, int], # 2-d - tuple[int, int, int], # 3-d - tuple[int, int, int, int], # 4-d - tuple[int, int, int, int, int], # 5-d - tuple[int, int, int, int, int, int], # 6-d - tuple[int, int, int, int, int, int, int], # 7-d - tuple[int, int, int, int, int, int, int, int], # 8-d - tuple[int, ...], # N-d -) -_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) -_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) -_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) -_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) - -### Type parameters (for internal use only) +### Type parameters (with defaults); for internal use only -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_RealT_co = TypeVar("_RealT_co", covariant=True) -_ImagT_co = TypeVar("_ImagT_co", covariant=True) - -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) - -_ArrayT = TypeVar("_ArrayT", bound=ndarray) _ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) -_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) -_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) -_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) -_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) -_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) -_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) -_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) -_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) -_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) -_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) -_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) -_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) -_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) -_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) -_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) - -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) -_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... - -_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_InexactT = TypeVar("_InexactT", bound=inexact) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_IntegerT = TypeVar("_IntegerT", bound=integer) -_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] +# intentionally invariant +_NBitT = TypeVar("_NBitT", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) _BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) _NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) _InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) @@ -808,71 +732,74 @@ _FlexibleItemT_co = TypeVar( covariant=True, ) _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) -_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=Any, covariant=True) -_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=Any, covariant=True) -_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) ### Type Aliases (for internal use only) -_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] -_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | np.bool[L[False]] +type _Truthy = L[True, 1] | np.bool[L[True]] + +type _1D = tuple[int] +type _2D = tuple[int, int] +type _2Tuple[T] = tuple[T, T] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_2Tuple: TypeAlias = tuple[_T, _T] +type _ArrayUInt_co = NDArray[unsignedinteger | bool_] +type _ArrayInt_co = NDArray[integer | bool_] +type _ArrayFloat64_co = NDArray[floating[_64Bit] | float32 | float16 | integer | bool_] +type _ArrayFloat_co = NDArray[floating | integer | bool_] +type _ArrayComplex128_co = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | bool_] +type _ArrayComplex_co = NDArray[inexact | integer | bool_] +type _ArrayNumber_co = NDArray[number | bool_] +type _ArrayTD64_co = NDArray[timedelta64 | integer | bool_] -_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] -_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] -_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] -_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] -_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] -_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] +type _ArrayString = ndarray[_AnyShape, dtype[str_] | dtypes.StringDType] +type _ArrayNumeric = NDArray[number | timedelta64 | object_] -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool -_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool -_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co +type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | np.bool +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +type _Complex128_co = complex | number[_64Bit] | _Complex64_co -_UnsignedIntegerCType: TypeAlias = type[ +type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] + +type _UnsignedIntegerCType = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong | ct.c_size_t | ct.c_void_p ] # fmt: skip -_SignedIntegerCType: TypeAlias = type[ +type _SignedIntegerCType = type[ ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong | ct.c_ssize_t ] # fmt: skip -_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] -_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +type _FloatingCType = type[ct.c_float | ct.c_double | ct.c_longdouble] +type _IntegerCType = _UnsignedIntegerCType | _SignedIntegerCType # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor # NOTE: `builtins.object` should not be included here -_BuiltinObjectLike: TypeAlias = ( +type _BuiltinObjectLike = ( slice | Decimal | Fraction | UUID | dt.date | dt.time | dt.timedelta | dt.tzinfo | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_ScalarT] +type _dtype[ScalarT: generic] = dtype[ScalarT] -_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +type _ByteOrderChar = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters -_ByteOrder: TypeAlias = L[ +type _ByteOrder = L[ "S", # swap the current order (default) "<", "L", "little", # little-endian ">", "B", "big", # big endian "=", "N", "native", # native order "|", "I", # ignore ] # fmt: skip -_DTypeKind: TypeAlias = L[ +type _DTypeKind = L[ "b", # boolean "i", # signed integer "u", # unsigned integer @@ -886,7 +813,7 @@ _DTypeKind: TypeAlias = L[ "V", # void "T", # unicode-string (variable-width) ] -_DTypeChar: TypeAlias = L[ +type _DTypeChar = L[ "?", # bool "b", # byte "B", # ubyte @@ -915,7 +842,7 @@ _DTypeChar: TypeAlias = L[ "c", # bytes_ (S1) "T", # StringDType ] -_DTypeNum: TypeAlias = L[ +type _DTypeNum = L[ 0, # bool 1, # byte 2, # ubyte @@ -944,35 +871,35 @@ _DTypeNum: TypeAlias = L[ 256, # user-defined 2056, # StringDType ] -_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] +type _DTypeBuiltinKind = L[0, 1, 2] -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12"] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] +type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] -_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None -_OrderACF: TypeAlias = L["A", "C", "F"] | None -_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 +type _OrderKACF = L["K", "A", "C", "F"] | None +type _OrderACF = L["A", "C", "F"] | None +type _OrderCF = L["C", "F"] | None # noqa: PYI047 -_ModeKind: TypeAlias = L["raise", "wrap", "clip"] -_PartitionKind: TypeAlias = L["introselect"] +type _ModeKind = L["raise", "wrap", "clip"] +type _PartitionKind = L["introselect"] # in practice, only the first case-insensitive character is considered (so e.g. # "QuantumSort3000" will be interpreted as quicksort). -_SortKind: TypeAlias = L[ +type _SortKind = L[ "Q", "quick", "quicksort", "M", "merge", "mergesort", "H", "heap", "heapsort", "S", "stable", "stablesort", -] -_SortSide: TypeAlias = L["left", "right"] +] # fmt: skip +type _SortSide = L["left", "right"] -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_NDIterFlagsKind: TypeAlias = L[ +type _NDIterFlagsKind = L[ "buffered", "c_index", "copy_if_overlap", @@ -987,7 +914,7 @@ _NDIterFlagsKind: TypeAlias = L[ "reduce_ok", "zerosize_ok", ] -_NDIterFlagsOp: TypeAlias = L[ +type _NDIterFlagsOp = L[ "aligned", "allocate", "arraymask", @@ -1005,27 +932,30 @@ _NDIterFlagsOp: TypeAlias = L[ "writemasked", ] -_MemMapModeKind: TypeAlias = L[ +type _MemMapModeKind = L[ "readonly", "r", "copyonwrite", "c", "readwrite", "r+", "write", "w+", -] +] # fmt: skip + +type _DT64Item = dt.date | int | None +type _TD64Item = dt.timedelta | int | None -_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] -_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] -_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] - -_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] -_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] -_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] -_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] -_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] -_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] -_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] -_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] -_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] -_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +type _DT64Date = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +type _DT64Now = L["NOW", "now", b"NOW", b"now"] +type _NaTValue = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +type _MonthUnit = L["Y", "M", b"Y", b"M"] +type _DayUnit = L["W", "D", b"W", b"D"] +type _DateUnit = L[_MonthUnit, _DayUnit] +type _NativeTimeUnit = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +type _IntTimeUnit = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +type _TimeUnit = L[_NativeTimeUnit, _IntTimeUnit] +type _NativeTD64Unit = L[_DayUnit, _NativeTimeUnit] +type _IntTD64Unit = L[_MonthUnit, _IntTimeUnit] +type _TD64Unit = L[_DateUnit, _TimeUnit] +type _TimeUnitSpec[UnitT: _TD64Unit] = _TD64Unit | tuple[_TD64Unit, SupportsIndex] ### TypedDict's (for internal use only) @@ -1070,30 +1000,30 @@ class _SupportsFileMethods(SupportsFlush, Protocol): class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... @type_check_only -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... +class _SupportsDLPack[StreamT](Protocol): + def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... @type_check_only -class _HasDType(Protocol[_T_co]): +class _HasDType[DTypeT](Protocol): # DTypeT bound was intentionally left out @property - def dtype(self, /) -> _T_co: ... + def dtype(self, /) -> DTypeT: ... @type_check_only -class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasRealAndImag[RealT, ImagT](Protocol): @property - def real(self, /) -> _RealT_co: ... + def real(self, /) -> RealT: ... @property - def imag(self, /) -> _ImagT_co: ... + def imag(self, /) -> ImagT: ... @type_check_only -class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + def type(self, /) -> type[_HasRealAndImag[RealT, ImagT]]: ... @type_check_only -class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): +class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + def dtype(self, /) -> _HasTypeWithRealAndImag[RealT, ImagT]: ... @type_check_only class _HasDateAttributes(Protocol): @@ -1167,7 +1097,7 @@ class _DTypeMeta(type): def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 names: tuple[builtins.str, ...] | None def __hash__(self) -> int: ... @@ -1183,16 +1113,16 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a - # `dtype: dtype[_ScalarT]` attribute + # `dtype: dtype[ScalarT]` attribute @overload - def __new__( + def __new__[ScalarT: generic]( cls, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], align: builtins.bool = False, copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_ScalarT]: ... + ) -> dtype[ScalarT]: ... # Builtin types # @@ -1588,9 +1518,9 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + def __mul__[DTypeT: dtype](self: DTypeT, value: L[1], /) -> DTypeT: ... @overload - def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __mul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... @@ -1598,7 +1528,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # literals as of mypy 0.902. Set the return-type to `dtype` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + def __rmul__[FlexibleDTypeT: dtype[flexible]](self: FlexibleDTypeT, value: SupportsIndex, /) -> FlexibleDTypeT: ... @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... @@ -1664,38 +1594,38 @@ class flatiter(Generic[_ArrayT_co]): @property def base(self, /) -> _ArrayT_co: ... @property - def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... + def coords[ShapeT: _Shape](self: flatiter[ndarray[ShapeT]], /) -> ShapeT: ... @property def index(self, /) -> int: ... # iteration def __len__(self, /) -> int: ... def __iter__(self, /) -> Self: ... - def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + def __next__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... # indexing @overload # nd: _[()] def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... @overload # 0d; _[] - def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + def __getitem__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], key: int | integer, /) -> ScalarT: ... @overload # 1d; _[[*]], _[:], _[...] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], /, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... @overload # 2d; _[[*[*]]] - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: list[list[int]], /, - ) -> ndarray[tuple[int, int], _DTypeT]: ... + ) -> ndarray[tuple[int, int], DTypeT]: ... @overload # ?d - def __getitem__( - self: flatiter[ndarray[Any, _DTypeT]], + def __getitem__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], key: NDArray[integer] | _NestedSequence[int], /, - ) -> ndarray[_AnyShape, _DTypeT]: ... + ) -> ndarray[_AnyShape, DTypeT]: ... # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any # type accepted by the relevant underlying `np.generic` constructor, which isn't @@ -1704,16 +1634,16 @@ class flatiter(Generic[_ArrayT_co]): # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to # avoid confusion - def __array__( - self: flatiter[ndarray[Any, _DTypeT]], + def __array__[DTypeT: dtype]( + self: flatiter[ndarray[Any, DTypeT]], dtype: None = None, /, *, copy: None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... # This returns a flat copy of the underlying array, not of the iterator itself - def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... + def copy[DTypeT: dtype](self: flatiter[ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1802,31 +1732,39 @@ class _ArrayOrScalarCommon: @overload # axis=index, out=None (default) def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + ) -> OutT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmax[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + ) -> OutT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + def argmin[OutT: _ArrayInt_co]( + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + ) -> OutT: ... # Keep in sync with `MaskedArray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... @overload # out=None (default) def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... @overload # out=ndarray - def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + def choose[ArrayT: ndarray](self, /, choices: ArrayLike, out: ArrayT, mode: _ModeKind = "raise") -> ArrayT: ... # TODO: Annotate kwargs with an unpacked `TypedDict` @overload # out: None (default) @@ -1836,36 +1774,42 @@ class _ArrayOrScalarCommon: @overload def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload # out: ndarray - def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload - def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + def clip[ArrayT: ndarray](self, /, min: None = None, *, max: ArrayLike, out: ArrayT, **kwargs: Any) -> ArrayT: ... @overload def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray](self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: ArrayT) -> ArrayT: ... @overload - def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + def compress[ArrayT: ndarray]( + self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT + ) -> ArrayT: ... # Keep in sync with `MaskedArray.cumprod` @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... # Keep in sync with `MaskedArray.cumsum` @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: ndarray]( + self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT + ) -> ArrayT: ... @overload def max( @@ -1879,27 +1823,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def min( @@ -1913,27 +1857,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def sum( @@ -1948,29 +1892,29 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def prod( @@ -1985,29 +1929,29 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def mean( @@ -2020,27 +1964,27 @@ class _ArrayOrScalarCommon: where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def std( @@ -2056,31 +2000,31 @@ class _ArrayOrScalarCommon: correction: float | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def var( @@ -2096,31 +2040,31 @@ class _ArrayOrScalarCommon: correction: float | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, *, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: builtins.bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @@ -2130,12 +2074,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def ndim(self) -> int: ... @property def size(self) -> int: ... + @property - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... + @property - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... @@ -2143,21 +2089,20 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): cls, shape: _ShapeLike, dtype: DTypeLike | None = ..., - buffer: _SupportsBuffer | None = ..., + buffer: Buffer | None = ..., offset: SupportsIndex = ..., strides: _ShapeLike | None = ..., order: _OrderKACF = ..., ) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2180,13 +2125,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # grant subclasses a bit more flexibility def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., return_scalar: builtins.bool = ..., /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... # Keep in sync with `MaskedArray.__getitem__` @overload @@ -2270,7 +2215,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def flat(self) -> flatiter[Self]: ... @overload # use the same output type as that of the underlying `generic` - def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + def item[T](self: NDArray[generic[T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> T: ... @overload # special casing for `StringDType`, which has no scalar type def item( self: ndarray[Any, dtypes.StringDType], @@ -2281,15 +2226,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` - def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + def tolist[T](self: ndarray[tuple[Never], dtype[generic[T]]], /) -> Any: ... @overload - def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + def tolist[T](self: ndarray[tuple[()], dtype[generic[T]]], /) -> T: ... @overload - def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + def tolist[T](self: ndarray[tuple[int], dtype[generic[T]]], /) -> list[T]: ... @overload - def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + def tolist[T](self: ndarray[tuple[int, int], dtype[generic[T]]], /) -> list[list[T]]: ... @overload - def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + def tolist[T](self: ndarray[tuple[int, int, int], dtype[generic[T]]], /) -> list[list[list[T]]]: ... @overload def tolist(self, /) -> Any: ... @@ -2331,23 +2276,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> np.bool | NDArray[np.bool]: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload def any( @@ -2368,23 +2313,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> np.bool | NDArray[np.bool]: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: ndarray]( self, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: SupportsIndex = False, where: _ArrayLikeBool_co = True, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -2441,7 +2386,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... @overload - def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... + def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... # `nonzero()` raises for 0d arrays/generics def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... @@ -2485,7 +2430,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array /, offset: SupportsIndex = 0, @@ -2493,28 +2438,28 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def trace( + def trace[ArrayT: ndarray]( self, # >= 2D array /, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def take( - self: NDArray[_ScalarT], + def take[ScalarT: generic]( + self: NDArray[ScalarT], indices: _IntLike_co, /, axis: SupportsIndex | None = ..., out: None = None, mode: _ModeKind = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def take( self, @@ -2525,24 +2470,24 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = ..., *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ma.MaskedArray.repeat` @overload @@ -2568,14 +2513,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): copy: builtins.bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ + AnyShapeT: ( + tuple[()], # 0d + tuple[int], # 1d + tuple[int, int], # 2d + tuple[int, int, int], # 3d + tuple[int, int, int, int], # 4d + tuple[int, int, int, int, int], # 5d + tuple[int, int, int, int, int, int], # 6d + tuple[int, int, int, int, int, int, int], # 7d + tuple[int, int, int, int, int, int, int, int], # 8d + ) + ]( self, - shape: _AnyShapeT, + shape: AnyShapeT, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -2638,14 +2595,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload - def astype( + def astype[ScalarT: generic]( self, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def astype( self, @@ -2660,26 +2617,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + def view[DTypeT: dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[T]) - def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload # (type: T) - def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... @overload # (_: T) - def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... @overload # (dtype: ?) def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... @overload # (dtype: ?, type: T) - def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> NDArray[ScalarT]: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[bool_ | number | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2692,11 +2649,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] - def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], dtype[ScalarT]], /) -> Iterator[ScalarT]: ... @overload # == 1-d & StringDType def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... + def __iter__[DTypeT: dtype]( + self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / + ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... @@ -2710,9 +2669,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __lt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2728,9 +2685,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __le__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2746,9 +2701,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __gt__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2764,9 +2717,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __ge__( - self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / - ) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... @overload def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload @@ -2776,31 +2727,33 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__(self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + # def __abs__(self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload - def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + def __abs__[ShapeT: _Shape, NBitT: NBitBase]( + self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, dtype[floating[NBitT]]]: ... @overload - def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 - def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 - def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 # Binary ops # TODO: Support the "1d @ 1d -> scalar" case @overload - def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2825,11 +2778,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __matmul__ - def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2854,13 +2807,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __mod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2879,13 +2834,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload # signature equivalent to __mod__ - def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2904,13 +2861,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: int | np.bool, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __divmod__[ScalarT: floating | integer]( + self: NDArray[np.bool], rhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2925,13 +2888,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload # signature equivalent to __divmod__ - def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: int | np.bool, / + ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload - def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... + def __rdivmod__[ScalarT: floating | integer]( + self: NDArray[np.bool], lhs: _ArrayLike[ScalarT], / + ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2947,13 +2916,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __add__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2995,13 +2964,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __radd__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3043,13 +3012,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3081,13 +3050,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3119,13 +3088,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __mul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3161,13 +3130,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... + def __rmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3265,13 +3234,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__floordiv__` @overload - def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3295,13 +3266,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rfloordiv__` @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: integer | floating]( + self: NDArray[ScalarT], other: int | np.bool, / + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3323,13 +3296,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... + def __pow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3357,13 +3330,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... + def __rpow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3510,136 +3483,138 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # += @overload # type: ignore[misc] - def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iadd__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __iadd__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __iadd__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __iadd__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __iadd__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... + def __iadd__[ArrayT: NDArray[bytes_]](self: ArrayT, other: _ArrayLikeBytes_co, /) -> ArrayT: ... @overload - def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... + def __iadd__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> ArrayT: ... @overload - def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iadd__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # -= @overload # type: ignore[misc] - def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __isub__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __isub__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __isub__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + def __isub__[ArrayT: NDArray[datetime64 | timedelta64]](self: ArrayT, other: _ArrayLikeTD64_co, /) -> ArrayT: ... @overload - def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __isub__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # *= @overload # type: ignore[misc] - def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... + @overload + def __imul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imul__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __imul__[ArrayT: NDArray[number | character]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... + def __imul__[ArrayT: _ArrayString](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # @= @overload # type: ignore[misc] - def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __imatmul__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __imatmul__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __imatmul__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __imatmul__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imatmul__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # **= @overload # type: ignore[misc] - def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __ipow__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + def __ipow__[ArrayT: NDArray[inexact]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + def __ipow__[ArrayT: NDArray[number]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ipow__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # /= @overload # type: ignore[misc] - def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + def __itruediv__[ArrayT: NDArray[complexfloating]](self: ArrayT, other: _ArrayLikeComplex_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + def __itruediv__[ArrayT: NDArray[inexact | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __itruediv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # //= # keep in sync with `__imod__` @overload # type: ignore[misc] - def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ifloordiv__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... + def __ifloordiv__[ArrayT: NDArray[floating | timedelta64]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ifloordiv__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # %= # keep in sync with `__ifloordiv__` @overload # type: ignore[misc] - def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __imod__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... + def __imod__[ArrayT: NDArray[floating]](self: ArrayT, other: _ArrayLikeFloat_co, /) -> ArrayT: ... @overload - def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... + def __imod__[ArrayT: NDArray[timedelta64]](self: ArrayT, other: _ArrayLike[timedelta64], /) -> ArrayT: ... @overload - def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __imod__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # <<= # keep in sync with `__irshift__` @overload # type: ignore[misc] - def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ilshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ilshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # >>= # keep in sync with `__ilshift__` @overload # type: ignore[misc] - def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __irshift__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __irshift__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # &= # keep in sync with `__ixor__` and `__ior__` @overload # type: ignore[misc] - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __iand__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __iand__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # ^= # keep in sync with `__iand__` and `__ior__` @overload # type: ignore[misc] - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ixor__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ixor__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # |= # keep in sync with `__iand__` and `__ixor__` @overload # type: ignore[misc] - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ArrayT: NDArray[bool_]](self: ArrayT, other: _ArrayLikeBool_co, /) -> ArrayT: ... @overload - def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + def __ior__[ArrayT: NDArray[integer]](self: ArrayT, other: _ArrayLikeInt_co, /) -> ArrayT: ... @overload - def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + def __ior__[ArrayT: NDArray[object_]](self: ArrayT, other: object, /) -> ArrayT: ... # def __dlpack__( @@ -3667,13 +3642,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + def __buffer__(self, flags: int, /) -> memoryview: ... @overload def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... # @overload @@ -3699,37 +3673,37 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( self, - array: ndarray[_ShapeT, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None, return_scalar: L[False], /, - ) -> ndarray[_ShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ScalarT: generic]( self, - array: ndarray[tuple[()], dtype[_ScalarT]], + array: ndarray[tuple[()], dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: dtype]( self, - array: ndarray[_Shape1T, _DTypeT], + array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> ndarray[_Shape1T, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( self, - array: ndarray[_ShapeT, dtype[_ScalarT]], + array: ndarray[ShapeT, dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... + ) -> ScalarT | ndarray[ShapeT, dtype[ScalarT]]: ... @property def base(self) -> None: ... @@ -3779,15 +3753,15 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # @overload - def astype( + def astype[ScalarT: generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", casting: _CastingKind = "unsafe", subok: builtins.bool = True, copy: builtins.bool | _CopyMode = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def astype( self, @@ -3804,12 +3778,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def view(self, type: type[ndarray] = ...) -> Self: ... @overload - def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], type: type[ndarray] = ...) -> ScalarT: ... @overload def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... @overload - def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... + def getfield[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT], offset: SupportsIndex = 0) -> ScalarT: ... @overload def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... @@ -3832,30 +3806,30 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = "raise", ) -> NDArray[Self]: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: ndarray]( self, indices: _ArrayLikeInt_co, /, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - @overload # (() | []) + @overload # (()) def reshape( self, shape: tuple[()] | list[Never], @@ -3864,15 +3838,15 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): order: _OrderACF = "C", copy: builtins.bool | None = None, ) -> Self: ... - @overload # ((1, *(1, ...))@_ShapeT) - def reshape( + @overload # (ShapeT: (index, ...)) + def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( self, - shape: _1NShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_1NShapeT, dtype[Self]]: ... + ) -> ndarray[ShapeT, dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( self, @@ -3881,7 +3855,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( self, @@ -3890,7 +3864,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int], dtype[Self]]: ... @overload # _(index, index) def reshape( self, @@ -3900,7 +3874,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload # _(index, index, index) def reshape( self, @@ -3911,7 +3885,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( self, @@ -3923,7 +3897,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( self, @@ -3936,7 +3910,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *sizes6_: SupportsIndex, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @@ -3952,25 +3926,25 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True ) -> np.bool: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def all( + def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def any( @@ -3983,31 +3957,31 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True ) -> np.bool: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def any( + def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[_ScalarT]], + out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, - ) -> _ScalarT: ... + ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property def dtype(self) -> _dtype[Self]: ... -class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): +class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @@ -4095,7 +4069,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __invert__(self, /) -> np.bool: ... @overload - def __add__(self, other: _NumberT, /) -> _NumberT: ... + def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload @@ -4106,7 +4080,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __add__(self, other: complex, /) -> complex128: ... @overload - def __radd__(self, other: _NumberT, /) -> _NumberT: ... + def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __radd__(self, other: builtins.bool, /) -> bool_: ... @overload @@ -4117,7 +4091,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __radd__(self, other: complex, /) -> complex128: ... @overload - def __sub__(self, other: _NumberT, /) -> _NumberT: ... + def __sub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __sub__(self, other: int, /) -> int_: ... @overload @@ -4126,7 +4100,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __sub__(self, other: complex, /) -> complex128: ... @overload - def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + def __rsub__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __rsub__(self, other: int, /) -> int_: ... @overload @@ -4135,7 +4109,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rsub__(self, other: complex, /) -> complex128: ... @overload - def __mul__(self, other: _NumberT, /) -> _NumberT: ... + def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload @@ -4146,7 +4120,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __mul__(self, other: complex, /) -> complex128: ... @overload - def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload def __rmul__(self, other: builtins.bool, /) -> bool_: ... @overload @@ -4157,7 +4131,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rmul__(self, other: complex, /) -> complex128: ... @overload - def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... @overload @@ -4168,7 +4142,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... @overload @@ -4179,21 +4153,21 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... @overload - def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + def __truediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __truediv__(self, other: float | integer | bool_, /) -> float64: ... @overload def __truediv__(self, other: complex, /) -> complex128: ... @overload - def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + def __rtruediv__[ScalarT: inexact](self, other: ScalarT, /) -> ScalarT: ... @overload def __rtruediv__(self, other: float | integer, /) -> float64: ... @overload def __rtruediv__(self, other: complex, /) -> complex128: ... @overload - def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4202,7 +4176,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __floordiv__(self, other: float, /) -> float64: ... @overload - def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4212,7 +4186,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __floordiv__ @overload - def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4222,7 +4196,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rfloordiv__ @overload - def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload def __rmod__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4232,7 +4206,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __mod__ @overload - def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... @overload @@ -4242,7 +4216,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rmod__ @overload - def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... @overload @@ -4251,14 +4225,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... @overload - def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... @overload def __lshift__(self, other: int, /) -> int_: ... @overload - def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rlshift__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4266,7 +4240,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __lshift__ @overload - def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... @overload @@ -4274,7 +4248,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): # keep in sync with __rlshift__ @overload - def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __rrshift__(self, other: builtins.bool, /) -> int8: ... @overload @@ -4289,13 +4263,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __and__(self, other: int, /) -> np.bool | intp: ... __rand__ = __and__ @overload - def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + def __xor__[ItemT: builtins.bool](self: np.bool[L[False]], other: ItemT | np.bool[ItemT], /) -> np.bool[ItemT]: ... @overload def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... @overload @@ -4303,7 +4277,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __xor__(self, other: int, /) -> np.bool | intp: ... __rxor__ = __xor__ @@ -4317,7 +4291,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... @overload - def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload def __or__(self, other: int, /) -> np.bool | intp: ... __ror__ = __or__ @@ -4350,7 +4324,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... -# NOTE: This should _not_ be `Final` or a `TypeAlias` +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` bool_ = bool # NOTE: The `object_` constructor returns the passed object, so instances with type @@ -4362,13 +4336,13 @@ class object_(_RealMixin, generic): @overload def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] @overload - def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + def __new__[AnyStrT: (LiteralString, str, bytes)](cls, value: AnyStrT, /) -> AnyStrT: ... # type: ignore[misc] @overload - def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + def __new__[ShapeT: _Shape](cls, value: ndarray[ShapeT, Any], /) -> ndarray[ShapeT, dtype[Self]]: ... # type: ignore[misc] @overload def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] @overload - def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + def __new__[T](cls, value: T, /) -> T: ... # type: ignore[misc] @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] @@ -4376,10 +4350,9 @@ class object_(_RealMixin, generic): def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... - if sys.version_info >= (3, 12): - def __release_buffer__(self, buffer: memoryview, /) -> None: ... + def __release_buffer__(self, buffer: memoryview, /) -> None: ... -class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): +class integer(_IntegralMixin, _RoundMixin, number[_NBitT, int]): @abstractmethod def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... @@ -4419,7 +4392,7 @@ class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): def __xor__(self, other: _IntLike_co, /) -> integer: ... def __rxor__(self, other: _IntLike_co, /) -> integer: ... -class signedinteger(integer[_NBit]): +class signedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4657,7 +4630,7 @@ int_ = intp long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBit1]): +class unsignedinteger(integer[_NBitT1]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4902,24 +4875,24 @@ class unsignedinteger(integer[_NBit1]): @overload def __ror__(self, other: signedinteger, /) -> signedinteger: ... -uint8: TypeAlias = unsignedinteger[_8Bit] -uint16: TypeAlias = unsignedinteger[_16Bit] -uint32: TypeAlias = unsignedinteger[_32Bit] -uint64: TypeAlias = unsignedinteger[_64Bit] +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] -ubyte: TypeAlias = unsignedinteger[_NBitByte] -ushort: TypeAlias = unsignedinteger[_NBitShort] -uintc: TypeAlias = unsignedinteger[_NBitIntC] -uintp: TypeAlias = unsignedinteger[_NBitIntP] -uint: TypeAlias = uintp -ulong: TypeAlias = unsignedinteger[_NBitLong] -ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = uintp +ulong = unsignedinteger[_NBitLong] +ulonglong = unsignedinteger[_NBitLongLong] -class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): +class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co]): @abstractmethod def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): +class floating(_RealMixin, _RoundMixin, inexact[_NBitT1, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... # arithmetic ops @@ -5072,8 +5045,8 @@ class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): def is_integer(self, /) -> builtins.bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... -float16: TypeAlias = floating[_16Bit] -float32: TypeAlias = floating[_32Bit] +float16 = floating[_16Bit] +float32 = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] @@ -5103,7 +5076,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... @@ -5112,7 +5085,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... @@ -5121,7 +5094,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... @@ -5130,7 +5103,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... @@ -5139,7 +5112,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... @@ -5148,7 +5121,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... @@ -5157,7 +5130,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5166,7 +5139,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5175,7 +5148,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __floordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5184,7 +5157,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rfloordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5194,8 +5167,8 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / + ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5205,8 +5178,8 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload def __rpow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / + ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5216,16 +5189,16 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] -half: TypeAlias = float16 -single: TypeAlias = float32 -double: TypeAlias = float64 -longdouble: TypeAlias = floating[_NBitLongDouble] +half = float16 +single = float32 +double = float64 +longdouble = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter # describing the two 64 bit floats representing its real and imaginary component -class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): +class complexfloating(inexact[_NBitT1, complex], Generic[_NBitT1, _NBitT2]): @overload def __new__( cls, @@ -5237,91 +5210,101 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... @property - def real(self) -> floating[_NBit1]: ... + def real(self) -> floating[_NBitT1]: ... @property - def imag(self) -> floating[_NBit2]: ... + def imag(self) -> floating[_NBitT2]: ... # NOTE: `__complex__` is technically defined in the concrete subtypes def __complex__(self, /) -> complex: ... - def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + def __abs__(self, /) -> floating[_NBitT1 | _NBitT2]: ... # type: ignore[override] @overload # type: ignore[override] - def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __add__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __radd__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __radd__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __sub__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rsub__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rsub__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __mul__[NBitT: NBitBase](self, other: number[NBitT], /) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rmul__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rmul__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __truediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rtruediv__[NBitT: NBitBase]( + self, other: number[NBitT], / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload def __pow__( self, other: complex | float64 | complex128, mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + ) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __pow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... @overload # type: ignore[override] - def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2]: ... @overload - def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBitT1, _NBitT2] | complex128: ... @overload - def __rpow__( - self, other: number[_NBit], mod: None = None, / - ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: number[NBitT], mod: None = None, / + ) -> complexfloating[_NBitT1, _NBitT2] | complexfloating[NBitT]: ... -complex64: TypeAlias = complexfloating[_32Bit] +complex64 = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): @property @@ -5342,38 +5325,36 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): @overload # type: ignore[override] def __add__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __sub__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __mul__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __truediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] @overload # type: ignore[override] def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / - ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] -csingle: TypeAlias = complex64 -cdouble: TypeAlias = complex128 -clongdouble: TypeAlias = complexfloating[_NBitLongDouble] +csingle = complex64 +cdouble = complex128 +clongdouble = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -5386,7 +5367,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __new__(cls, /) -> timedelta64[L[0]]: ... @overload - def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... @overload def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... @overload @@ -5401,7 +5382,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> timedelta64: ... + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5429,9 +5410,11 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __add__[DateOrTimeT: dt.date | dt.timedelta](self: timedelta64[dt.timedelta], x: DateOrTimeT, /) -> DateOrTimeT: ... @overload - def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __add__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ # @@ -5450,7 +5433,9 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + def __sub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. # This confuses mypy, so we ignore the [misc] errors it reports. @@ -5459,16 +5444,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... @overload - def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + def __rsub__[AnyItemT: (dt.timedelta, int, None)]( # type: ignore[misc] + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / + ) -> timedelta64[AnyItemT]: ... @overload def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + # @overload def __mul__(self: timedelta64[Never], x: _FloatLike_co, /) -> timedelta64: ... @overload @@ -5542,9 +5532,13 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __truediv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __truediv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... @@ -5560,17 +5554,19 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __floordiv__(self: timedelta64[Never], b: float | floating | integer, /) -> timedelta64: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: int | integer, / + ) -> timedelta64[AnyItemT]: ... @overload - def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + def __floordiv__[AnyItemT: (dt.timedelta, int, None)]( + self: timedelta64[AnyItemT], b: float | floating, / + ) -> timedelta64[AnyItemT | None]: ... @overload def __rfloordiv__(self, a: timedelta64, /) -> int64: ... @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... - # comparison ops - @overload def __lt__(self, other: _TD64Like_co, /) -> bool_: ... @overload @@ -5608,9 +5604,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + def __new__[AnyItemT: (dt.date, dt.datetime, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload - def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... @overload @@ -5618,13 +5614,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... @overload - def __new__( + def __new__( # type: ignore[overload-cannot-match] cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / ) -> datetime64[dt.datetime]: ... @overload - def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... # type: ignore[overload-cannot-match] @overload - def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> Self: ... # def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5633,9 +5629,9 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __add__(self: datetime64[Never], x: _TD64Like_co, /) -> datetime64: ... @overload - def __add__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __add__(self, x: _IntLike_co, /) -> Self: ... @overload - def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + def __add__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... @overload def __add__(self: datetime64[int | dt.datetime], x: timedelta64[Never], /) -> datetime64: ... @overload @@ -5658,7 +5654,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __sub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __sub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __sub__(self, x: _IntLike_co, /) -> Self: ... @overload def __sub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload @@ -5694,7 +5690,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __rsub__(self: datetime64[Never], x: datetime64, /) -> timedelta64: ... @overload - def __rsub__(self: datetime64[_AnyDT64Item], x: _IntLike_co, /) -> datetime64[_AnyDT64Item]: ... + def __rsub__(self, x: _IntLike_co, /) -> Self: ... @overload def __rsub__(self: datetime64[dt.date], x: dt.date, /) -> dt.timedelta: ... @overload @@ -6125,7 +6121,7 @@ class nditer: def value(self) -> tuple[NDArray[Any], ...]: ... class memmap(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] filename: str | None offset: int mode: str @@ -6140,15 +6136,15 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF = "C", ) -> memmap[Any, dtype[uint8]]: ... @overload - def __new__( + def __new__[ScalarT: generic]( subtype, filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], mode: _MemMapModeKind = "r+", offset: int = 0, shape: int | tuple[int, ...] | None = None, order: _OrderKACF = "C", - ) -> memmap[Any, dtype[_ScalarT]]: ... + ) -> memmap[Any, dtype[ScalarT]]: ... @overload def __new__( subtype, @@ -6160,9 +6156,9 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF = "C", ) -> memmap[Any, dtype]: ... def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( + def __array_wrap__( # type: ignore[override] self, - array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] + array: memmap[_ShapeT_co, _DTypeT_co], context: tuple[ufunc, tuple[Any, ...], int] | None = None, return_scalar: builtins.bool = False, ) -> Any: ... @@ -6203,9 +6199,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int]]: ... @overload - def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 396125143e92..0bad9c65b137 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,18 +1,9 @@ -from typing import ( - Literal, - Never, - TypeAlias, - TypedDict, - TypeVar, - final, - overload, - type_check_only, -) +from typing import Literal, Never, TypedDict, final, overload, type_check_only import numpy as np -_Device: TypeAlias = Literal["cpu"] -_DeviceLike: TypeAlias = _Device | None +type _Device = Literal["cpu"] +type _DeviceLike = _Device | None _Capabilities = TypedDict( "_Capabilities", @@ -32,33 +23,22 @@ _DefaultDTypes = TypedDict( }, ) -_KindBool: TypeAlias = Literal["bool"] -_KindInt: TypeAlias = Literal["signed integer"] -_KindUInt: TypeAlias = Literal["unsigned integer"] -_KindInteger: TypeAlias = Literal["integral"] -_KindFloat: TypeAlias = Literal["real floating"] -_KindComplex: TypeAlias = Literal["complex floating"] -_KindNumber: TypeAlias = Literal["numeric"] -_Kind: TypeAlias = ( - _KindBool - | _KindInt - | _KindUInt - | _KindInteger - | _KindFloat - | _KindComplex - | _KindNumber -) - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T3 = TypeVar("_T3") -_Permute1: TypeAlias = _T1 | tuple[_T1] -_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] -_Permute3: TypeAlias = ( - tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] - | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] - | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] -) +type _KindBool = Literal["bool"] +type _KindInt = Literal["signed integer"] +type _KindUInt = Literal["unsigned integer"] +type _KindInteger = Literal["integral"] +type _KindFloat = Literal["real floating"] +type _KindComplex = Literal["complex floating"] +type _KindNumber = Literal["numeric"] +type _Kind = _KindBool | _KindInt | _KindUInt | _KindInteger | _KindFloat | _KindComplex | _KindNumber + +type _Permute1[T1] = T1 | tuple[T1] +type _Permute2[T1, T2] = tuple[T1, T2] | tuple[T2, T1] +type _Permute3[T1, T2, T3] = ( + tuple[T1, T2, T3] | tuple[T1, T3, T2] + | tuple[T2, T1, T3] | tuple[T2, T3, T1] + | tuple[T3, T1, T2] | tuple[T3, T2, T1] +) # fmt: skip @type_check_only class _DTypesBool(TypedDict): @@ -113,7 +93,7 @@ class _DTypesUnion(TypedDict, total=False): complex64: np.dtype[np.complex64] complex128: np.dtype[np.complex128] -_EmptyDict: TypeAlias = dict[Never, Never] +type _EmptyDict = dict[Never, Never] @final class __array_namespace_info__: @@ -121,11 +101,7 @@ class __array_namespace_info__: def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... - def default_dtypes( - self, - *, - device: _DeviceLike = None, - ) -> _DefaultDTypes: ... + def default_dtypes(self, *, device: _DeviceLike = None) -> _DefaultDTypes: ... def devices(self) -> list[_Device]: ... @overload @@ -175,20 +151,14 @@ class __array_namespace_info__: self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindInteger] - | _Permute2[_KindInt, _KindUInt] - ), + kind: _Permute1[_KindInteger] | _Permute2[_KindInt, _KindUInt], ) -> _DTypesInteger: ... @overload def dtypes( self, *, device: _DeviceLike = None, - kind: ( - _Permute1[_KindNumber] - | _Permute3[_KindInteger, _KindFloat, _KindComplex] - ), + kind: _Permute1[_KindNumber] | _Permute3[_KindInteger, _KindFloat, _KindComplex], ) -> _DTypesNumber: ... @overload def dtypes( diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 6bef69d8e4ea..07adc83fbcff 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,30 +1,28 @@ from collections.abc import Iterable -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc __all__ = ["require"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -_Requirements: TypeAlias = Literal[ +type _Requirements = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E: TypeAlias = Literal["E", "ENSUREARRAY"] -_RequirementsWithE: TypeAlias = _Requirements | _E +type _E = Literal["E", "ENSUREARRAY"] +type _RequirementsWithE = _Requirements | _E @overload -def require( - a: _ArrayT, +def require[ArrayT: NDArray[Any]]( + a: ArrayT, dtype: None = None, requirements: _Requirements | Iterable[_Requirements] | None = None, *, like: _SupportsArrayFunc | None = None -) -> _ArrayT: ... +) -> ArrayT: ... @overload def require( a: object, diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi index 28adecf4ad2f..4d34ae9efb99 100644 --- a/numpy/_core/_dtype.pyi +++ b/numpy/_core/_dtype.pyi @@ -1,13 +1,11 @@ -from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only -from typing_extensions import ReadOnly, TypeVar +from typing import Final, Literal as L, TypedDict, overload, type_check_only +from typing_extensions import ReadOnly import numpy as np ### -_T = TypeVar("_T") - -_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] +type _Name = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] @type_check_only class _KindToStemType(TypedDict): @@ -50,7 +48,7 @@ def _name_get(dtype: np.dtype) -> str: ... # @overload -def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +def _unpack_field[T](dtype: np.dtype, offset: int, title: T) -> tuple[np.dtype, int, T]: ... @overload def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index b340fde3e463..00c1cdbaa575 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -1,17 +1,11 @@ from collections.abc import Iterable -from typing import Any, Final, TypeVar, overload +from typing import Any, Final, overload import numpy as np from numpy import _CastingKind ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) -_ExceptionT = TypeVar("_ExceptionT", bound=Exception) - -### - class UFuncTypeError(TypeError): ufunc: Final[np.ufunc] def __init__(self, /, ufunc: np.ufunc) -> None: ... @@ -48,7 +42,7 @@ class _ArrayMemoryError(MemoryError): def _size_to_string(num_bytes: int) -> str: ... @overload -def _unpack_tuple(tup: tuple[_T]) -> _T: ... +def _unpack_tuple[T](tup: tuple[T]) -> T: ... @overload -def _unpack_tuple(tup: _TupleT) -> _TupleT: ... -def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... +def _unpack_tuple[TupleT: tuple[()] | tuple[Any, Any, *tuple[Any, ...]]](tup: TupleT) -> TupleT: ... +def _display_as_base[ExceptionT: Exception](cls: type[ExceptionT]) -> type[ExceptionT]: ... diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 6e37022ffd56..179e077629b6 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -8,9 +8,6 @@ import numpy as np import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) -_T_co = TypeVar("_T_co", covariant=True) -_CT = TypeVar("_CT", bound=ct._CData) _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) ### @@ -43,18 +40,18 @@ class _ctypes(Generic[_PT_co]): def _as_parameter_(self) -> ct.c_void_p: ... # - def data_as(self, /, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def data_as[CastT: ct._CanCastTo](self, /, obj: type[CastT]) -> CastT: ... + def shape_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... + def strides_as[CT: ct._CData](self, /, obj: type[CT]) -> ct.Array[CT]: ... -class dummy_ctype(Generic[_T_co]): - _cls: type[_T_co] +class dummy_ctype[T_co]: + _cls: type[T_co] - def __init__(self, /, cls: type[_T_co]) -> None: ... + def __init__(self, /, cls: type[T_co]) -> None: ... def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] def __mul__(self, other: object, /) -> Self: ... - def __call__(self, /, *other: object) -> _T_co: ... + def __call__(self, /, *other: object) -> T_co: ... def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi index 3c80683f003b..651c78d3530b 100644 --- a/numpy/_core/_methods.pyi +++ b/numpy/_core/_methods.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Any, Concatenate, TypeAlias +from typing import Any, Concatenate import numpy as np @@ -7,7 +7,7 @@ from . import _exceptions as _exceptions ### -_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] +type _Reduce2 = Callable[Concatenate[object, ...], Any] ### diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index e28541cc8987..c7efe989caa5 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,5 +1,5 @@ from collections.abc import Collection -from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only +from typing import Final, Literal as L, TypedDict, type_check_only import numpy as np @@ -35,7 +35,7 @@ class _CNamesDict(TypedDict): c_names_dict: Final[_CNamesDict] -_AbstractTypeName: TypeAlias = L[ +type _AbstractTypeName = L[ "generic", "flexible", "character", diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index f1f0d88fe165..039aa1d51223 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,7 +1,7 @@ from _typeshed import SupportsWrite from collections.abc import Callable from types import TracebackType -from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only +from typing import Any, Final, Literal, TypedDict, type_check_only __all__ = [ "seterr", @@ -13,10 +13,8 @@ __all__ = [ "errstate", ] -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] - -_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ErrDict(TypedDict): @@ -45,7 +43,7 @@ class errstate: under: _ErrKind | None = None, invalid: _ErrKind | None = None, ) -> None: ... - def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __call__[FuncT: Callable[..., object]](self, /, func: FuncT) -> FuncT: ... def __enter__(self) -> None: ... def __exit__( self, diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 307f844634ca..167cc3f3a097 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -3,15 +3,7 @@ from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager -from typing import ( - Any, - Final, - Literal, - SupportsIndex, - TypeAlias, - TypedDict, - type_check_only, -) +from typing import Any, Final, Literal, SupportsIndex, TypedDict, type_check_only import numpy as np from numpy._typing import NDArray, _CharLike_co, _FloatLike_co @@ -29,12 +21,12 @@ __all__ = [ ### -_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] -_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] -_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] -_Sign: TypeAlias = Literal["-", "+", " "] -_Trim: TypeAlias = Literal["k", ".", "0", "-"] -_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] +type _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +type _LegacyNoStyle = Literal["1.21", "1.25", "2.1", False] +type _Legacy = Literal["1.13", _LegacyNoStyle] +type _Sign = Literal["-", "+", " "] +type _Trim = Literal["k", ".", "0", "-"] +type _ReprFunc = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 406d11ea0eb7..bc587ed846ba 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,13 +1,6 @@ -from typing import ( - Any, - Literal as L, - Self, - SupportsIndex, - SupportsInt, - TypeAlias, - overload, -) -from typing_extensions import Buffer, TypeVar +from collections.abc import Buffer +from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload +from typing_extensions import TypeVar import numpy as np from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ @@ -83,14 +76,13 @@ __all__ = [ ] _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_CharacterT = TypeVar("_CharacterT", bound=np.character) _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] +type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 6d34883e6625..3e42ef6dc238 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import _OrderKACF, number +from numpy import _OrderKACF from numpy._typing import ( NDArray, _ArrayLikeBool_co, @@ -22,14 +22,9 @@ from numpy._typing import ( __all__ = ["einsum", "einsum_path"] -_ArrayT = TypeVar( - "_ArrayT", - bound=NDArray[np.bool | number], -) - -_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None -_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe: TypeAlias = Literal["unsafe"] +type _OptimizeKind = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +type _CastingSafe = Literal["no", "equiv", "safe", "same_kind"] +type _CastingUnsafe = Literal["unsafe"] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order @@ -104,27 +99,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeComplex_co | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload def einsum( @@ -149,27 +144,27 @@ def einsum( optimize: _OptimizeKind = False, ) -> Any: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeObject_co, - out: _ArrayT, + out: OutT, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., casting: _CastingSafe = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... @overload -def einsum( +def einsum[OutT: NDArray[np.bool | np.number]]( subscripts: str | _ArrayLikeInt_co, /, *operands: Any, - out: _ArrayT, + out: OutT, casting: _CastingUnsafe, dtype: _DTypeLikeObject | None = ..., order: _OrderKACF = ..., optimize: _OptimizeKind = False, -) -> _ArrayT: ... +) -> OutT: ... # NOTE: `einsum_call` is a hidden kwarg unavailable for public use. # It is therefore excluded from the signatures below. diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2a9762240e3d..b929328a9443 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -7,9 +7,7 @@ from typing import ( Never, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -17,7 +15,6 @@ from typing import ( import numpy as np from numpy import ( - _AnyShapeT, _CastingKind, _ModeKind, _OrderACF, @@ -28,12 +25,10 @@ from numpy import ( complexfloating, float16, floating, - generic, int64, int_, intp, object_, - timedelta64, uint64, ) from numpy._globals import _NoValueType @@ -57,6 +52,7 @@ from numpy._typing import ( _NestedSequence, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, ) @@ -107,18 +103,11 @@ __all__ = [ "var", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) - @type_check_only -class _SupportsShape(Protocol[_ShapeT_co]): +class _SupportsShape[ShapeT_co: _Shape](Protocol): # NOTE: it matters that `self` is positional only @property - def shape(self, /) -> _ShapeT_co: ... + def shape(self, /) -> ShapeT_co: ... @type_check_only class _UFuncKwargs(TypedDict, total=False): @@ -129,20 +118,23 @@ class _UFuncKwargs(TypedDict, total=False): casting: _CastingKind # a "sequence" that isn't a string, bytes, bytearray, or memoryview -_T = TypeVar("_T") -_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = complex | bytes | str +type _PyScalar = complex | bytes | str + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +### # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise", -) -> _ScalarT: ... +) -> ScalarT: ... @overload def take( a: ArrayLike, @@ -152,13 +144,13 @@ def take( mode: _ModeKind = "raise", ) -> Any: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -168,50 +160,50 @@ def take( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... -@overload -def reshape( # shape: index - a: _ArrayLike[_ScalarT], +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: SupportsIndex, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT - a: _ArrayLike[_ScalarT], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload # shape: Sequence[index] -def reshape( - a: _ArrayLike[_ScalarT], +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, shape: Sequence[SupportsIndex], order: _OrderACF = "C", *, copy: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # shape: index def reshape( a: ArrayLike, @@ -221,15 +213,15 @@ def reshape( *, copy: bool | None = None, ) -> np.ndarray[tuple[int], np.dtype]: ... -@overload -def reshape( # shape: (int, ...) @ _AnyShapeT +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape]( a: ArrayLike, /, - shape: _AnyShapeT, + shape: ShapeT, order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[_AnyShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT, np.dtype]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -248,12 +240,12 @@ def choose( mode: _ModeKind = "raise", ) -> Any: ... @overload -def choose( +def choose[ScalarT: np.generic]( a: _ArrayLikeInt_co, - choices: _ArrayLike[_ScalarT], + choices: _ArrayLike[ScalarT], out: None = None, mode: _ModeKind = "raise", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def choose( a: _ArrayLikeInt_co, @@ -262,26 +254,26 @@ def choose( mode: _ModeKind = "raise", ) -> NDArray[Any]: ... @overload -def choose( +def choose[ArrayT: np.ndarray]( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `ma.core.repeat` @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... @overload -def repeat( - a: _ArrayLike[_ScalarT], +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def repeat( a: ArrayLike, @@ -305,17 +297,17 @@ def put( # keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` @overload -def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... +def swapaxes[ArrayT: np.ndarray](a: ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> ArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... @overload -def transpose( - a: _ArrayLike[_ScalarT], +def transpose[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def transpose( a: ArrayLike, @@ -323,19 +315,19 @@ def transpose( ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # @overload -def partition( - a: _ArrayLike[_ScalarT], +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, axis: SupportsIndex | None = -1, kind: _PartitionKind = "introselect", order: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def partition( a: _ArrayLike[np.void], @@ -364,14 +356,14 @@ def argpartition( # @overload -def sort( - a: _ArrayLike[_ScalarT], +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def sort( a: ArrayLike, @@ -408,21 +400,21 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload -def argmax( +def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload def argmin( @@ -441,21 +433,21 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... @overload -def argmin( +def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _BoolOrIntArrayT, + out: BoolOrIntArrayT, keepdims: bool | _NoValueType = ..., -) -> _BoolOrIntArrayT: ... +) -> BoolOrIntArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -475,43 +467,36 @@ def searchsorted( # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +def resize[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], new_shape: ShapeT +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... @overload -def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... @overload -def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +def resize[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT) -> np.ndarray[ShapeT, np.dtype]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def squeeze( - a: _ScalarT, - axis: _ShapeLike | None = None, -) -> _ScalarT: ... +def squeeze[ScalarT: np.generic](a: ScalarT, axis: _ShapeLike | None = None) -> ScalarT: ... @overload -def squeeze( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike | None = None, -) -> NDArray[_ScalarT]: ... +def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload -def squeeze( - a: ArrayLike, - axis: _ShapeLike | None = None, -) -> NDArray[Any]: ... +def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # keep in sync with `ma.core.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, # >= 2D array -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, @@ -531,29 +516,27 @@ def trace( out: None = None, ) -> Any: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, # >= 2D array offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... - -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + out: ArrayT, +) -> ArrayT: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Array1D[ScalarT]: ... @overload def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload @@ -575,7 +558,7 @@ def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], @overload def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload -def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are @@ -591,12 +574,12 @@ def shape(a: memoryview | bytearray) -> tuple[int]: ... def shape(a: ArrayLike) -> _AnyShape: ... @overload -def compress( +def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array - a: _ArrayLike[_ScalarT], + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array @@ -605,25 +588,25 @@ def compress( out: None = None, ) -> NDArray[Any]: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def compress( +def compress[ArrayT: np.ndarray]( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, axis: SupportsIndex | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip( - a: _ScalarT, +def clip[ScalarT: np.generic]( + a: ScalarT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -632,7 +615,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarT: ... @overload def clip( a: _ScalarLike_co, @@ -646,8 +629,8 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... @overload -def clip( - a: _ArrayLike[_ScalarT], +def clip[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -656,7 +639,7 @@ def clip( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def clip( a: ArrayLike, @@ -670,29 +653,29 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> NDArray[Any]: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _ArrayT, + out: ArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def clip( +def clip[ArrayT: np.ndarray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _ArrayT, + out: ArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ArrayT: ... +) -> ArrayT: ... @overload def clip( a: ArrayLike, @@ -707,67 +690,67 @@ def clip( ) -> Any: ... @overload -def sum( - a: _ArrayLike[_ScalarT], +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( - a: _ArrayLike[_ScalarT], +def sum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, dtype: None = None, out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def sum( +def sum[ScalarT: np.generic]( a: ArrayLike, axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def sum( a: ArrayLike, @@ -779,26 +762,26 @@ def sum( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `any` @overload @@ -820,23 +803,23 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def all( +def all[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def all( +def all[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `all` @overload @@ -858,32 +841,32 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def any( +def any[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def any( +def any[ArrayT: np.ndarray]( a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # @overload -def cumsum( - a: _ArrayLike[_ScalarT], +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: SupportsIndex | None = None, dtype: None = None, out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, @@ -892,20 +875,20 @@ def cumsum( out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def cumsum( +def cumsum[ScalarT: np.generic]( a: ArrayLike, axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, @@ -914,31 +897,31 @@ def cumsum( out: None = None, ) -> NDArray[Any]: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumsum( +def cumsum[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumulative_sum( - x: _ArrayLike[_ScalarT], +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], /, *, axis: SupportsIndex | None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, @@ -950,15 +933,15 @@ def cumulative_sum( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ScalarT: np.generic]( x: ArrayLike, /, *, axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_sum( x: ArrayLike, @@ -970,23 +953,23 @@ def cumulative_sum( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_sum( +def cumulative_sum[ArrayT: np.ndarray]( x: ArrayLike, /, *, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, - out: _ArrayT, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( - a: _ArrayLike[_ScalarT], +def ptp[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def ptp( a: ArrayLike, @@ -995,30 +978,30 @@ def ptp( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amax( - a: _ArrayLike[_ScalarT], +def amax[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def amax( a: ArrayLike, @@ -1029,34 +1012,34 @@ def amax( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amax( +def amax[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amin( - a: _ArrayLike[_ScalarT], +def amin[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def amin( a: ArrayLike, @@ -1067,24 +1050,24 @@ def amin( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def amin( +def amin[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. @@ -1154,26 +1137,26 @@ def prod( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def prod( +def prod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1185,26 +1168,26 @@ def prod( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1250,20 +1233,20 @@ def cumprod( out: None = None, ) -> NDArray[object_]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def cumprod( +def cumprod[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1272,20 +1255,20 @@ def cumprod( out: None = None, ) -> NDArray[Any]: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def cumprod( +def cumprod[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1349,15 +1332,15 @@ def cumulative_prod( include_initial: bool = False, ) -> NDArray[object_]: ... @overload -def cumulative_prod( +def cumulative_prod[ScalarT: np.generic]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, axis: SupportsIndex | None = None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1369,15 +1352,15 @@ def cumulative_prod( include_initial: bool = False, ) -> NDArray[Any]: ... @overload -def cumulative_prod( +def cumulative_prod[ArrayT: np.ndarray]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, - out: _ArrayT, + out: ArrayT, include_initial: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... def ndim(a: ArrayLike) -> int: ... @@ -1391,11 +1374,11 @@ def around( out: None = None, ) -> float16: ... @overload -def around( - a: _NumberOrObjectT, +def around[NumberOrObjectT: np.number | np.object_]( + a: NumberOrObjectT, decimals: SupportsIndex = 0, out: None = None, -) -> _NumberOrObjectT: ... +) -> NumberOrObjectT: ... @overload def around( a: _ComplexLike_co | object_, @@ -1409,11 +1392,11 @@ def around( out: None = None, ) -> NDArray[float16]: ... @overload -def around( - a: _ArrayLike[_NumberOrObjectT], +def around[NumberOrObjectT: np.number | np.object_]( + a: _ArrayLike[NumberOrObjectT], decimals: SupportsIndex = 0, out: None = None, -) -> NDArray[_NumberOrObjectT]: ... +) -> NDArray[NumberOrObjectT]: ... @overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1421,18 +1404,18 @@ def around( out: None = None, ) -> NDArray[Any]: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def around( +def around[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = 0, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload @@ -1464,77 +1447,77 @@ def mean( keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> timedelta64: ... +) -> np.timedelta64: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None, keepdims: Literal[True, 1], *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, *, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload -def mean( +def mean[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1573,10 +1556,10 @@ def std( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., @@ -1584,20 +1567,20 @@ def std( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def std( +def std[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1612,31 +1595,31 @@ def std( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload def var( @@ -1665,10 +1648,10 @@ def var( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., @@ -1676,20 +1659,20 @@ def var( where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def var( +def var[ScalarT: np.generic]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1704,31 +1687,31 @@ def var( correction: float | _NoValueType = ..., ) -> Any: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 19e1238c4e15..060a44d416ea 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,5 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Literal as L, SupportsIndex, overload import numpy as np from numpy._typing import ( @@ -13,9 +13,9 @@ from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] -_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +### @overload def linspace( @@ -54,29 +54,29 @@ def linspace( device: L["cpu"] | None = None, ) -> NDArray[np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, retstep: L[False], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -126,17 +126,17 @@ def linspace( device: L["cpu"] | None = None, ) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload -def linspace( +def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +) -> tuple[NDArray[ScalarT], ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -181,26 +181,26 @@ def logspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, base: _ArrayLikeComplex_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def logspace( +def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, base: _ArrayLikeComplex_co = 10.0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -240,24 +240,24 @@ def geomspace( axis: SupportsIndex = 0, ) -> NDArray[np.complexfloating]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex, endpoint: bool, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def geomspace( +def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 812e0b562b9d..e516c3cdab72 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,7 +1,7 @@ # TODO: Sort out any and all missing functions in this namespace import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem -from collections.abc import Callable, Iterable, Sequence +from collections.abc import Buffer, Callable, Iterable, Sequence from typing import ( Any, ClassVar, @@ -9,8 +9,6 @@ from typing import ( Literal as L, Protocol, SupportsIndex, - TypeAlias, - TypeVar, final, overload, type_check_only, @@ -19,7 +17,6 @@ from typing_extensions import CapsuleType import numpy as np from numpy import ( # type: ignore[attr-defined] # Python >=3.12 - _AnyShapeT, _CastingKind, _CopyMode, _ModeKind, @@ -27,7 +24,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _NDIterFlagsOp, _OrderCF, _OrderKACF, - _SupportsBuffer, _SupportsFileMethods, broadcast, busdaycalendar, @@ -41,7 +37,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 float64, floating, from_dlpack, - generic, int_, interp, intp, @@ -183,22 +178,11 @@ __all__ = [ "zeros", ] -_ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=ndarray) -_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -# TODO: fix the names of these typevars -_ReturnType = TypeVar("_ReturnType") -_IDType = TypeVar("_IDType") -_Nin = TypeVar("_Nin", bound=int) -_Nout = TypeVar("_Nout", bound=int) - -_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] -_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] # Valid time units -_UnitKind: TypeAlias = L[ +type _UnitKind = L[ "Y", "M", "D", @@ -212,7 +196,7 @@ _UnitKind: TypeAlias = L[ "fs", "as", ] -_RollKind: TypeAlias = L[ # `raise` is deliberately excluded +type _RollKind = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -222,9 +206,16 @@ _RollKind: TypeAlias = L[ # `raise` is deliberately excluded "modifiedpreceding", ] +type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here +type _ToDates = dt.date | _NestedSequence[dt.date] +type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] + @type_check_only -class _SupportsArray(Protocol[_ArrayT_co]): - def __array__(self, /) -> _ArrayT_co: ... +class _SupportsArray[ArrayT_co: np.ndarray](Protocol): + def __array__(self, /) -> ArrayT_co: ... @type_check_only class _ConstructorEmpty(Protocol): @@ -241,27 +232,27 @@ class _ConstructorEmpty(Protocol): like: _SupportsArrayFunc | None = None, ) -> _Array1D[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[tuple[int], _DTypeT]: ... + ) -> ndarray[tuple[int], DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: SupportsIndex, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array1D[_ScalarT]: ... + ) -> _Array1D[ScalarT]: ... @overload def __call__( self, @@ -276,49 +267,49 @@ class _ConstructorEmpty(Protocol): # known shape @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, + shape: ShapeT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, float64]: ... + ) -> _Array[ShapeT, float64]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, DTypeT: np.dtype]( self, /, - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShapeT, _DTypeT]: ... + ) -> ndarray[ShapeT, DTypeT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape, ScalarT: np.generic]( self, /, - shape: _AnyShapeT, - dtype: type[_ScalarT], + shape: ShapeT, + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, _ScalarT]: ... + ) -> _Array[ShapeT, ScalarT]: ... @overload - def __call__( + def __call__[ShapeT: _Shape]( self, /, - shape: _AnyShapeT, + shape: ShapeT, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> _Array[_AnyShapeT, Incomplete]: ... + ) -> _Array[ShapeT, Incomplete]: ... # unknown shape @overload @@ -332,25 +323,25 @@ class _ConstructorEmpty(Protocol): like: _SupportsArrayFunc | None = None, ) -> NDArray[float64]: ... @overload - def __call__( + def __call__[DTypeT: np.dtype]( self, /, shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShape, _DTypeT]: ... + ) -> ndarray[_AnyShape, DTypeT]: ... @overload - def __call__( + def __call__[ScalarT: np.generic]( self, /, shape: _ShapeLike, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @overload def __call__( self, @@ -391,7 +382,7 @@ set_datetimeparse_function: Final[Callable[..., object]] = ... def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... def format_longfloat(x: np.longdouble, precision: int) -> str: ... -def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def scalar[DTypeT: np.dtype](dtype: DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], DTypeT]: ... def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... @@ -410,8 +401,8 @@ zeros: Final[_ConstructorEmpty] = ... empty: Final[_ConstructorEmpty] = ... @overload -def empty_like( - prototype: _ArrayT, +def empty_like[ArrayT: np.ndarray]( + prototype: ArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -419,10 +410,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def empty_like( - prototype: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + prototype: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -430,18 +421,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( prototype: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def empty_like( prototype: Incomplete, @@ -455,8 +446,8 @@ def empty_like( ) -> NDArray[Incomplete]: ... @overload -def array( - object: _ArrayT, +def array[ArrayT: np.ndarray]( + object: ArrayT, dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -465,10 +456,10 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _SupportsArray[_ArrayT], +def array[ArrayT: np.ndarray]( + object: _SupportsArray[ArrayT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -477,10 +468,10 @@ def array( ndmin: L[0] = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def array( - object: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + object: _ArrayLike[ScalarT], dtype: None = None, *, copy: bool | _CopyMode | None = True, @@ -489,11 +480,11 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( object: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, copy: bool | _CopyMode | None = True, order: _OrderKACF = "K", @@ -501,7 +492,7 @@ def array( ndmin: int = 0, ndmax: int = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def array( object: Any, @@ -542,25 +533,25 @@ def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> # NOTE: Allow any sequence of array-like objects @overload -def concatenate( - arrays: _ArrayLike[_ScalarT], +def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def concatenate( +def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def concatenate( arrays: SupportsLenAndGetItem[ArrayLike], @@ -572,25 +563,25 @@ def concatenate( casting: _CastingKind | None = "same_kind", ) -> NDArray[Incomplete]: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def concatenate( +def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... @@ -609,7 +600,7 @@ def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... @overload def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload -def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... @overload def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @@ -633,7 +624,7 @@ def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: Suppo def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -_BitOrder: TypeAlias = L["big", "little"] +type _BitOrder = L["big", "little"] @overload def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... @@ -657,32 +648,32 @@ def unpackbits( bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -_MaxWork: TypeAlias = L[-1, 0] +type _MaxWork = L[-1, 0] # any two python objects will be accepted, not just `ndarray`s def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... @overload -def asarray( - a: _ArrayLike[_ScalarT], +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asarray( +def asarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asarray( a: Any, @@ -695,35 +686,35 @@ def asarray( ) -> NDArray[Any]: ... @overload -def asanyarray( - a: _ArrayT, # Preserve subclass-information +def asanyarray[ArrayT: np.ndarray]( + a: ArrayT, # Preserve subclass-information dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def asanyarray( - a: _ArrayLike[_ScalarT], +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asanyarray( +def asanyarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., *, device: L["cpu"] | None = ..., copy: bool | None = ..., like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asanyarray( a: Any, @@ -736,19 +727,19 @@ def asanyarray( ) -> NDArray[Any]: ... @overload -def ascontiguousarray( - a: _ArrayLike[_ScalarT], +def ascontiguousarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ascontiguousarray( +def ascontiguousarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( a: Any, @@ -758,19 +749,19 @@ def ascontiguousarray( ) -> NDArray[Any]: ... @overload -def asfortranarray( - a: _ArrayLike[_ScalarT], +def asfortranarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def asfortranarray( +def asfortranarray[ScalarT: np.generic]( a: Any, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def asfortranarray( a: Any, @@ -792,14 +783,14 @@ def fromstring( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromstring( +def fromstring[ScalarT: np.generic]( string: str | bytes, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, sep: str, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromstring( string: str | bytes, @@ -811,69 +802,69 @@ def fromstring( ) -> NDArray[Any]: ... @overload -def frompyfunc( - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin1_Nout1[ReturnT, None]: ... @overload -def frompyfunc( - func: Callable[[Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any], ReturnT], /, nin: L[1], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin1_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +) -> _PyFunc_Nin2_Nout1[ReturnT, None]: ... @overload -def frompyfunc( - func: Callable[[Any, Any], _ReturnType], /, +def frompyfunc[ReturnT, IdentityT]( + func: Callable[[Any, Any], ReturnT], /, nin: L[2], nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... + identity: IdentityT, +) -> _PyFunc_Nin2_Nout1[ReturnT, IdentityT]: ... @overload -def frompyfunc( - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, identity: None = None, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +) -> _PyFunc_Nin3P_Nout1[ReturnT, None, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _ReturnType], /, - nin: _Nin, +def frompyfunc[ReturnT, NInT: int, IdentityT]( + func: Callable[..., ReturnT], /, + nin: NInT, nout: L[1], *, - identity: _IDType, -) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... + identity: IdentityT, +) -> _PyFunc_Nin3P_Nout1[ReturnT, IdentityT, NInT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, identity: None = None, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +) -> _PyFunc_Nin1P_Nout2P[ReturnT, None, NInT, NOutT]: ... @overload -def frompyfunc( - func: Callable[..., _2PTuple[_ReturnType]], /, - nin: _Nin, - nout: _Nout, +def frompyfunc[ReturnT, NInT: int, NOutT: int, IdentityT]( + func: Callable[..., _2PTuple[ReturnT]], /, + nin: NInT, + nout: NOutT, *, - identity: _IDType, -) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... + identity: IdentityT, +) -> _PyFunc_Nin1P_Nout2P[ReturnT, IdentityT, NInT, NOutT]: ... @overload def frompyfunc( func: Callable[..., Any], /, @@ -894,15 +885,15 @@ def fromfile( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def fromfile( +def fromfile[ScalarT: np.generic]( file: StrOrBytesPath | _SupportsFileMethods, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., sep: str = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, @@ -915,13 +906,13 @@ def fromfile( ) -> NDArray[Any]: ... @overload -def fromiter( +def fromiter[ScalarT: np.generic]( iter: Iterable[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromiter( iter: Iterable[Any], @@ -933,7 +924,7 @@ def fromiter( @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: None = None, count: SupportsIndex = ..., offset: SupportsIndex = ..., @@ -941,17 +932,17 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[float64]: ... @overload -def frombuffer( - buffer: _SupportsBuffer, - dtype: _DTypeLike[_ScalarT], +def frombuffer[ScalarT: np.generic]( + buffer: Buffer, + dtype: _DTypeLike[ScalarT], count: SupportsIndex = ..., offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def frombuffer( - buffer: _SupportsBuffer, + buffer: Buffer, dtype: DTypeLike | None = ..., count: SupportsIndex = ..., offset: SupportsIndex = ..., @@ -959,22 +950,19 @@ def frombuffer( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... -_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) - # keep in sync with ma.core.arange # NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array1D[_ArangeScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -1056,12 +1044,6 @@ def arange( # def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... -# The datetime functions perform unsafe casts to `datetime64[D]`, -# so a lot of different argument types are allowed here - -_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] -_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] - @overload def busday_count( begindates: _ScalarLike_co | dt.date, @@ -1081,24 +1063,24 @@ def busday_count( out: None = None, ) -> NDArray[int_]: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates = (), busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_count( +def busday_count[OutT: np.ndarray]( begindates: ArrayLike | _ToDates, enddates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload @@ -1122,7 +1104,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"] = "raise", @@ -1130,18 +1112,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: _ArrayLike[datetime64] | _ToDates, offsets: _ArrayLikeTD64_co | _ToDeltas, roll: L["raise"], weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def busday_offset( dates: _ScalarLike_co | dt.date, @@ -1163,7 +1145,7 @@ def busday_offset( out: None = None, ) -> NDArray[datetime64]: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, @@ -1171,18 +1153,18 @@ def busday_offset( holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def busday_offset( +def busday_offset[OutT: np.ndarray]( dates: ArrayLike | _ToDates, offsets: ArrayLike | _ToDeltas, roll: _RollKind, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload def is_busday( @@ -1201,24 +1183,24 @@ def is_busday( out: None = None, ) -> NDArray[np.bool]: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike = "1111100", holidays: ArrayLike | _ToDates | None = None, busdaycal: busdaycalendar | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... @overload -def is_busday( +def is_busday[OutT: np.ndarray]( dates: ArrayLike | _ToDates, weekmask: ArrayLike, holidays: ArrayLike | _ToDates | None, busdaycal: busdaycalendar | None, - out: _ArrayT, -) -> _ArrayT: ... + out: OutT, +) -> OutT: ... -_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo +type _TimezoneContext = L["naive", "UTC", "local"] | dt.tzinfo @overload def datetime_as_string( @@ -1252,7 +1234,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys: TypeAlias = L[ +type _GetItemKeys = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1265,7 +1247,7 @@ _GetItemKeys: TypeAlias = L[ "FNC", "FORC", ] -_SetItemKeys: TypeAlias = L[ +type _SetItemKeys = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 4ad2881cf17e..f7399ef44856 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -7,7 +7,6 @@ from typing import ( Literal as L, SupportsAbs, SupportsIndex, - TypeAlias, TypeGuard, TypeVar, overload, @@ -631,23 +630,6 @@ __all__ = [ "zeros_like", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) -_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - -_AnyShapeT = TypeVar( - "_AnyShapeT", - tuple[()], - tuple[int], - tuple[int, int], - tuple[int, int, int], - tuple[int, int, int, int], - tuple[int, ...], -) _AnyNumericScalarT = TypeVar( "_AnyNumericScalarT", np.int8, np.int16, np.int32, np.int64, @@ -658,63 +640,63 @@ _AnyNumericScalarT = TypeVar( np.object_, ) -_CorrelateMode: TypeAlias = L["valid", "same", "full"] +type _CorrelateMode = L["valid", "same", "full"] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Array4D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int, int], np.dtype[ScalarT]] -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool -_TD64_co: TypeAlias = np.timedelta64 | _Int_co +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool +type _TD64_co = np.timedelta64 | _Int_co -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] -_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] -_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] +type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] -_DTypeLikeInt: TypeAlias = type[int] | _IntCodes -_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes -_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes +type _DTypeLikeInt = type[int] | _IntCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes | _DoubleCodes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes | _CDoubleCodes ### # keep in sync with `ones_like` @overload -def zeros_like( - a: _ArrayT, +def zeros_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def zeros_like( - a: _ArrayLike[_ScalarT], +def zeros_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def zeros_like( +def zeros_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def zeros_like( a: object, @@ -730,35 +712,35 @@ ones: Final[_ConstructorEmpty] # keep in sync with `zeros_like` @overload -def ones_like( - a: _ArrayT, +def ones_like[ArrayT: np.ndarray]( + a: ArrayT, dtype: None = None, order: _OrderKACF = "K", subok: L[True] = True, shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ones_like( - a: _ArrayLike[_ScalarT], +def ones_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def ones_like( +def ones_like[ScalarT: np.generic]( a: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def ones_like( a: object, @@ -773,35 +755,35 @@ def ones_like( # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview # 1-D shape @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: SupportsIndex, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[tuple[int], _DTypeT]: ... +) -> np.ndarray[tuple[int], DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: SupportsIndex, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[tuple[int], _ScalarT]: ... +) -> _Array[tuple[int], ScalarT]: ... @overload def full( shape: SupportsIndex, @@ -814,76 +796,76 @@ def full( ) -> _Array[tuple[int], Any]: ... # known shape @overload -def full( - shape: _AnyShapeT, - fill_value: _ScalarT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, _ScalarT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload -def full( - shape: _AnyShapeT, +def full[ShapeT: _Shape]( + shape: ShapeT, fill_value: Any, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array[_AnyShapeT, Any]: ... +) -> _Array[ShapeT, Any]: ... # unknown shape @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, - fill_value: _ScalarT, + fill_value: ScalarT, dtype: None = None, order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full( +def full[DTypeT: np.dtype]( shape: _ShapeLike, fill_value: Any, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> np.ndarray[Any, _DTypeT]: ... +) -> np.ndarray[Any, DTypeT]: ... @overload -def full( +def full[ScalarT: np.generic]( shape: _ShapeLike, fill_value: Any, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full( shape: _ShapeLike, @@ -896,8 +878,8 @@ def full( ) -> NDArray[Any]: ... @overload -def full_like( - a: _ArrayT, +def full_like[ArrayT: np.ndarray]( + a: ArrayT, fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -905,10 +887,10 @@ def full_like( shape: None = None, *, device: L["cpu"] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def full_like( - a: _ArrayLike[_ScalarT], +def full_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], fill_value: object, dtype: None = None, order: _OrderKACF = "K", @@ -916,18 +898,18 @@ def full_like( shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def full_like( +def full_like[ScalarT: np.generic]( a: object, fill_value: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: py_bool = True, shape: _ShapeLike | None = None, *, device: L["cpu"] | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def full_like( a: object, @@ -1015,7 +997,7 @@ def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> @overload def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... @overload -def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... +def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: ArrayT) -> ArrayT: ... # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload @@ -1077,15 +1059,15 @@ def cross( # @overload -def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... +def roll[ArrayT: np.ndarray](a: ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> ArrayT: ... @overload -def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def roll[ScalarT: np.generic](a: _ArrayLike[ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # -def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... -def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def rollaxis[ArrayT: np.ndarray](a: ArrayT, axis: int, start: int = 0) -> ArrayT: ... +def moveaxis[ArrayT: np.ndarray](a: ArrayT, source: _ShapeLike, destination: _ShapeLike) -> ArrayT: ... def normalize_axis_tuple( axis: int | Iterable[int], ndim: int, @@ -1099,7 +1081,7 @@ def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = Fa @overload # 0d, dtype=, sparse=True def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... @overload # 0d, dtype=, sparse=False (default) -def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[()], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array1D[ScalarT]: ... @overload # 0d, dtype=, sparse=False (default) def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... @overload # 1d, dtype=int (default), sparse=False (default) @@ -1107,9 +1089,9 @@ def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = F @overload # 1d, dtype=int (default), sparse=True def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... @overload # 1d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array2D[ScalarT]: ... @overload # 1d, dtype=, sparse=True -def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[_Array1D[ScalarT]]: ... @overload # 1d, dtype=, sparse=False (default) def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... @overload # 1d, dtype=, sparse=True @@ -1121,11 +1103,11 @@ def indices( dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] ) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... @overload # 2d, dtype=, sparse=False (default) -def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> _Array3D[ScalarT]: ... @overload # 2d, dtype=, sparse=True -def indices( - dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] -) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +def indices[ScalarT: np.generic]( + dimensions: tuple[int, int], dtype: _DTypeLike[ScalarT], sparse: L[True] +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... @overload # 2d, dtype=, sparse=False (default) def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... @overload # 2d, dtype=, sparse=True @@ -1135,23 +1117,23 @@ def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] @overload # ?d, dtype=int (default), sparse=True def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... @overload # ?d, dtype=, sparse=False (default) -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[False] = False) -> NDArray[ScalarT]: ... @overload # ?d, dtype=, sparse=True -def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +def indices[ScalarT: np.generic](dimensions: Sequence[int], dtype: _DTypeLike[ScalarT], sparse: L[True]) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, dtype=, sparse=False (default) def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... @overload # ?d, dtype=, sparse=True def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... # -def fromfunction( - function: Callable[..., _T], +def fromfunction[ReturnT]( + function: Callable[..., ReturnT], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> _T: ... +) -> ReturnT: ... # def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... @@ -1164,7 +1146,7 @@ def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsInde @overload # dtype: None (default) def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... @overload # dtype: known scalar type -def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[ScalarT]: ... @overload # dtype: like bool def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... @overload # dtype: like int_ @@ -1195,21 +1177,21 @@ def isclose( equal_nan: py_bool = False, ) -> np.bool: ... @overload # known shape, same shape or scalar -def isclose( - a: np.ndarray[_ShapeT], - b: np.ndarray[_ShapeT] | _NumberLike_co, +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + b: np.ndarray[ShapeT] | _NumberLike_co, rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # same shape or scalar, known shape -def isclose( - a: np.ndarray[_ShapeT] | _NumberLike_co, - b: np.ndarray[_ShapeT], +def isclose[ShapeT: _Shape]( + a: np.ndarray[ShapeT] | _NumberLike_co, + b: np.ndarray[ShapeT], rtol: ArrayLike = 1e-5, atol: ArrayLike = 1e-8, equal_nan: py_bool = False, -) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... @overload # 1d sequence, <=1d array-like def isclose( a: Sequence[_NumberLike_co], @@ -1257,20 +1239,20 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... # @overload -def astype( - x: ndarray[_ShapeT], - dtype: _DTypeLike[_ScalarT], +def astype[ShapeT: _Shape, ScalarT: np.generic]( + x: ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +) -> ndarray[ShapeT, dtype[ScalarT]]: ... @overload -def astype( - x: ndarray[_ShapeT], +def astype[ShapeT: _Shape]( + x: ndarray[ShapeT], dtype: DTypeLike | None, /, *, copy: py_bool = True, device: L["cpu"] | None = None, -) -> ndarray[_ShapeT]: ... +) -> ndarray[ShapeT]: ... diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 6ef52566d782..627165e98d3d 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,13 +1,10 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar +from typing import Any, Final, NamedTuple from numpy._utils import set_module as set_module -_T = TypeVar("_T") -_Tss = ParamSpec("_Tss") -_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) - -_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] +type _FuncLike = type | Callable[..., object] +type _Dispatcher[**_Tss] = Callable[_Tss, Iterable[object]] ### @@ -21,27 +18,27 @@ class ArgSpec(NamedTuple): defaults: tuple[Any, ...] def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... +def finalize_array_function_like[FuncLikeT: _FuncLike](public_api: FuncLikeT) -> FuncLikeT: ... # -def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... +def verify_matching_signatures[**Tss](implementation: Callable[Tss, object], dispatcher: _Dispatcher[Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks # for any `__array_function__` of the values of specific arguments that the dispatcher # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. -def array_function_dispatch( - dispatcher: _Dispatcher[_Tss] | None = None, +def array_function_dispatch[**Tss, FuncLikeT: _FuncLike]( + dispatcher: _Dispatcher[Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncLikeT], _FuncLikeT]: ... +) -> Callable[[FuncLikeT], FuncLikeT]: ... # -def array_function_from_dispatcher( - implementation: Callable[_Tss, _T], +def array_function_from_dispatcher[**Tss, T]( + implementation: Callable[Tss, T], module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[Tss]], Callable[Tss, T]]: ... diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 2b133630cf35..326a0fe6e476 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,18 +1,17 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false from _typeshed import Incomplete, StrOrBytesPath -from collections.abc import Iterable, Sequence +from collections.abc import Buffer, Iterable, Sequence from typing import ( Any, ClassVar, Literal, Protocol, SupportsIndex, - TypeAlias, overload, type_check_only, ) -from typing_extensions import Buffer, TypeVar +from typing_extensions import TypeVar import numpy as np from numpy import _ByteOrder, _OrderKACF @@ -39,12 +38,11 @@ __all__ = [ "record", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +# Explicit covariant type variables are needed because mypy isn't very good at variance inference right now. _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] +type _RecArray[_ScalarT: np.generic] = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): @@ -236,8 +234,8 @@ def fromfile( # exported in `numpy.rec` @overload -def array( - obj: _ScalarT | NDArray[_ScalarT], +def array[ScalarT: np.generic]( + obj: ScalarT | NDArray[ScalarT], dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, @@ -248,7 +246,7 @@ def array( aligned: bool = False, byteorder: None = None, copy: bool = True, -) -> _RecArray[_ScalarT]: ... +) -> _RecArray[ScalarT]: ... @overload def array( obj: ArrayLike, @@ -338,4 +336,4 @@ def array( ) -> _RecArray[record]: ... # exported in `numpy.rec` -def find_duplicate(list: Iterable[_T]) -> list[_T]: ... +def find_duplicate[T](list: Iterable[T]) -> list[T]: ... diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b87cae8a5f0f..b41602ae8d47 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import _CastingKind, generic +import numpy as np +from numpy import _CastingKind from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ @@ -15,21 +16,17 @@ __all__ = [ "vstack", ] -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - -### - # keep in sync with `numpy.ma.extras.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -39,11 +36,15 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -53,11 +54,15 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_3d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[NDArray[ScalarT1], NDArray[ScalarT2]]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[NDArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload @@ -66,23 +71,23 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[A def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... # used by numpy.lib._shape_base_impl -def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... +def _arrays_for_stack_dispatcher[T](arrays: Sequence[T]) -> tuple[T, ...]: ... # keep in sync with `numpy.ma.extras.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -93,19 +98,19 @@ def vstack( # keep in sync with `numpy.ma.extras.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -116,23 +121,23 @@ def hstack( # keep in sync with `numpy.ma.extras.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -143,31 +148,31 @@ def stack( casting: _CastingKind = "same_kind" ) -> NDArray[Any]: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def stack( +def stack[OutT: np.ndarray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _ArrayT: ... +) -> OutT: ... @overload -def unstack( - array: _ArrayLike[_ScalarT], +def unstack[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], /, *, axis: int = 0, -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload def unstack( array: ArrayLike, @@ -177,6 +182,6 @@ def unstack( ) -> tuple[NDArray[Any], ...]: ... @overload -def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def block[ScalarT: np.generic](arrays: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 270760412670..475da159f783 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,4 +1,4 @@ -from typing import TypeAlias, overload +from typing import overload import numpy as np from numpy._globals import _NoValueType @@ -62,9 +62,9 @@ __all__ = [ "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] -_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray +type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] +type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi index faf6fb545d5f..498a0af2c008 100644 --- a/numpy/_core/umath.pyi +++ b/numpy/_core/umath.pyi @@ -1,7 +1,7 @@ import contextvars from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Final, Literal, TypeAlias, TypedDict, Unpack, type_check_only +from typing import Any, Final, Literal, TypedDict, Unpack, type_check_only from typing_extensions import CapsuleType from numpy import ( @@ -206,8 +206,8 @@ __all__ = [ ### -_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] +type _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] +type _ErrCall = Callable[[str, int], Any] | SupportsWrite[str] @type_check_only class _ExtOjbDict(TypedDict, total=False): diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 22eb6e3f30bb..82479a3fcb08 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,8 +1,7 @@ """Private counterpart of ``numpy.typing``.""" -import sys - from ._array_like import ( + ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -85,6 +84,7 @@ # from ._dtype_like import ( + DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, @@ -157,17 +157,3 @@ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, ) - -# wrapping the public aliases in `TypeAliasType` helps with introspection readability -if sys.version_info >= (3, 12): - from typing import TypeAliasType - - from ._array_like import ArrayLike as _ArrayLikeAlias - from ._dtype_like import DTypeLike as _DTypeLikeAlias - - ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) - DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) - -else: - from ._array_like import ArrayLike as ArrayLike - from ._dtype_like import DTypeLike as DTypeLike diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 6b071f4a0319..b8cb2c7872c1 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,27 +1,18 @@ -import sys -from collections.abc import Callable, Collection, Sequence -from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable +from collections.abc import Buffer, Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np -from numpy import dtype - -from ._nbit_base import _32Bit, _64Bit -from ._nested_sequence import _NestedSequence -from ._shape import _AnyShape if TYPE_CHECKING: - StringDType = np.dtypes.StringDType + from numpy.dtypes import StringDType else: - # at runtime outside of type checking importing this from numpy.dtypes - # would lead to a circular import from numpy._core.multiarray import StringDType -_T = TypeVar("_T") -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) -_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape -NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] +type NDArray[ScalarT: np.generic] = np.ndarray[_AnyShape, np.dtype[ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -29,8 +20,8 @@ # Concrete implementations of the protocol are responsible for adding # any and all remaining overloads @runtime_checkable -class _SupportsArray(Protocol[_DTypeT_co]): - def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... +class _SupportsArray[DTypeT: np.dtype](Protocol): + def __array__(self) -> np.ndarray[Any, DTypeT]: ... @runtime_checkable @@ -46,61 +37,54 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence: TypeAlias = ( - _T - | Sequence[_T] - | Sequence[Sequence[_T]] - | Sequence[Sequence[Sequence[_T]]] - | Sequence[Sequence[Sequence[Sequence[_T]]]] +type _FiniteNestedSequence[T] = ( + T + | Sequence[T] + | Sequence[Sequence[T]] + | Sequence[Sequence[Sequence[T]]] + | Sequence[Sequence[Sequence[Sequence[T]]]] ) # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike: TypeAlias = ( - _SupportsArray[dtype[_ScalarT]] - | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +type _ArrayLike[ScalarT: np.generic] = ( + _SupportsArray[np.dtype[ScalarT]] + | _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] ) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike: TypeAlias = ( - _SupportsArray[_DTypeT] - | _NestedSequence[_SupportsArray[_DTypeT]] - | _T - | _NestedSequence[_T] +type _DualArrayLike[DTypeT: np.dtype, BuiltinT] = ( + _SupportsArray[DTypeT] + | _NestedSequence[_SupportsArray[DTypeT]] + | BuiltinT + | _NestedSequence[BuiltinT] ) -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _Buffer -else: - @runtime_checkable - class _Buffer(Protocol): - def __buffer__(self, flags: int, /) -> memoryview: ... - -ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] +type ArrayLike = Buffer | _DualArrayLike[np.dtype, complex | bytes | str] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] -_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] -_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] -_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] -_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] -_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co -_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] -_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] -_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] - -_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] -_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] -_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] -_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] -_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] - -__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool -__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool -_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] -_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] +type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] +type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] +type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] +type _ArrayLikeFloat_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.floating], float] +type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] +type _ArrayLikeNumber_co = _ArrayLikeComplex_co +type _ArrayLikeTD64_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.timedelta64], int] +type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] +type _ArrayLikeObject_co = _ArrayLike[np.object_] + +type _ArrayLikeVoid_co = _ArrayLike[np.void] +type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] +type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] +type _ArrayLikeString_co = _DualArrayLike[StringDType, str] +type _ArrayLikeAnyString_co = _DualArrayLike[np.dtype[np.character] | StringDType, bytes | str] + +type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +type __Complex128_co = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] +type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. -_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] +type _ArrayLikeInt = _DualArrayLike[np.dtype[np.integer], int] diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 7b6fad228d56..6d1e06dc894c 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,59 +1,59 @@ from typing import Literal -_BoolCodes = Literal[ +type _BoolCodes = Literal[ "bool", "bool_", "?", "|?", "=?", "?", "b1", "|b1", "=b1", "b1", ] # fmt: skip -_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] - -_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] - -_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] - -_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -_ShortCodes = Literal["short", "h", "|h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -_LongCodes = Literal["long", "l", "|l", "=l", "l"] -_IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] - -_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -_UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] - -_HalfCodes = Literal["half", "e", "|e", "=e", "e"] -_SingleCodes = Literal["single", "f", "|f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] - -_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] - -_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] -_VoidCodes = Literal["void", "V", "|V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] - -_DT64Codes = Literal[ +type _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +type _Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +type _Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +type _Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +type _Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +type _ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +type _ShortCodes = Literal["short", "h", "|h", "=h", "h"] +type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +type _LongCodes = Literal["long", "l", "|l", "=l", "l"] +type _IntCodes = _IntPCodes +type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +type _UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +type _UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +type _UIntCodes = _UIntPCodes +type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +type _HalfCodes = Literal["half", "e", "|e", "=e", "e"] +type _SingleCodes = Literal["single", "f", "|f", "=f", "f"] +type _DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +type _CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +type _CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +type _BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] +type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +type _DT64Codes = Literal[ "datetime64", "|datetime64", "=datetime64", "datetime64", "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", @@ -98,7 +98,7 @@ "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", ] -_TD64Codes = Literal[ +type _TD64Codes = Literal[ "timedelta64", "|timedelta64", "=timedelta64", "timedelta64", "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", @@ -146,7 +146,7 @@ # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor -_StringCodes = Literal["T", "|T", "=T", "T"] +type _StringCodes = Literal["T", "|T", "=T", "T"] # NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't # the case for a `Union` of `Literal`s. @@ -154,7 +154,7 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -_UnsignedIntegerCodes = Literal[ +type _UnsignedIntegerCodes = Literal[ _UInt8Codes, _UInt16Codes, _UInt32Codes, @@ -166,7 +166,7 @@ _ULongCodes, _ULongLongCodes, ] -_SignedIntegerCodes = Literal[ +type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, @@ -178,7 +178,7 @@ _LongCodes, _LongLongCodes, ] -_FloatingCodes = Literal[ +type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, @@ -187,21 +187,21 @@ _DoubleCodes, _LongDoubleCodes ] -_ComplexFloatingCodes = Literal[ +type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, _CSingleCodes, _CDoubleCodes, _CLongDoubleCodes, ] -_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] -_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] -_NumberCodes = Literal[_IntegerCodes, _InexactCodes] +type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -_CharacterCodes = Literal[_StrCodes, _BytesCodes] -_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_StrCodes, _BytesCodes] +type _FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] -_GenericCodes = Literal[ +type _GenericCodes = Literal[ _BoolCodes, _NumberCodes, _FlexibleCodes, diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 34c4bd44f519..09ed1a0084de 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,5 +1,5 @@ -from collections.abc import Sequence # noqa: F811 -from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar +from collections.abc import Sequence +from typing import Any, NotRequired, Protocol, TypedDict, runtime_checkable import numpy as np @@ -18,48 +18,43 @@ _VoidCodes, ) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) +type _DTypeLikeNested = Any # TODO: wait for support for recursive types -_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types - -# Mandatory keys -class _DTypeDictBase(TypedDict): +class _DTypeDict(TypedDict): names: Sequence[str] formats: Sequence[_DTypeLikeNested] - - -# Mandatory + optional keys -class _DTypeDict(_DTypeDictBase, total=False): # Only `str` elements are usable as indexing aliases, # but `titles` can in principle accept any object - offsets: Sequence[int] - titles: Sequence[Any] - itemsize: int - aligned: bool + offsets: NotRequired[Sequence[int]] + titles: NotRequired[Sequence[Any]] + itemsize: NotRequired[int] + aligned: NotRequired[bool] -class _HasDType(Protocol[_DTypeT_co]): +# A protocol for anything with the dtype attribute +@runtime_checkable +class _HasDType[DTypeT: np.dtype](Protocol): @property - def dtype(self) -> _DTypeT_co: ... + def dtype(self) -> DTypeT: ... -class _HasNumPyDType(Protocol[_DTypeT_co]): +class _HasNumPyDType[DTypeT: np.dtype](Protocol): @property - def __numpy_dtype__(self, /) -> _DTypeT_co: ... + def __numpy_dtype__(self, /) -> DTypeT: ... -_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] +type _SupportsDType[DTypeT: np.dtype] = _HasDType[DTypeT] | _HasNumPyDType[DTypeT] # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] +type _DTypeLike[ScalarT: np.generic] = ( + type[ScalarT] | np.dtype[ScalarT] | _SupportsDType[np.dtype[ScalarT]] +) # Would create a dtype[np.void] -_VoidDTypeLike: TypeAlias = ( +type _VoidDTypeLike = ( # If a tuple, then it can be either: # - (flexible_dtype, itemsize) # - (fixed_dtype, shape) @@ -80,31 +75,29 @@ def __numpy_dtype__(self, /) -> _DTypeT_co: ... # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes -_DTypeLikeInt: TypeAlias = ( - type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes -) -_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes -_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes -_DTypeLikeComplex: TypeAlias = ( +type _DTypeLikeBool = type[bool] | _DTypeLike[np.bool] | _BoolCodes +type _DTypeLikeInt = type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +type _DTypeLikeUInt = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +type _DTypeLikeFloat = type[float] | _DTypeLike[np.floating] | _FloatingCodes +type _DTypeLikeComplex = ( type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes ) -_DTypeLikeComplex_co: TypeAlias = ( +type _DTypeLikeComplex_co = ( type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes ) -_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes -_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes -_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes -_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes -_DTypeLikeVoid: TypeAlias = ( +type _DTypeLikeDT64 = _DTypeLike[np.timedelta64] | _TD64Codes +type _DTypeLikeTD64 = _DTypeLike[np.datetime64] | _DT64Codes +type _DTypeLikeBytes = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +type _DTypeLikeStr = type[str] | _DTypeLike[np.str_] | _StrCodes +type _DTypeLikeVoid = ( type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes ) -_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes +type _DTypeLikeObject = type[object] | _DTypeLike[np.object_] | _ObjectCodes # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str +type DTypeLike = type | str | np.dtype | _SupportsDType[np.dtype] | _VoidDTypeLike # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 60bce3245c7a..1ad5f017eeb9 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,19 +1,17 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import TypeAlias - from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte: TypeAlias = _8Bit -_NBitShort: TypeAlias = _16Bit -_NBitIntC: TypeAlias = _32Bit -_NBitIntP: TypeAlias = _32Bit | _64Bit -_NBitInt: TypeAlias = _NBitIntP -_NBitLong: TypeAlias = _32Bit | _64Bit -_NBitLongLong: TypeAlias = _64Bit +type _NBitByte = _8Bit +type _NBitShort = _16Bit +type _NBitIntC = _32Bit +type _NBitIntP = _32Bit | _64Bit +type _NBitInt = _NBitIntP +type _NBitLong = _32Bit | _64Bit +type _NBitLongLong = _64Bit -_NBitHalf: TypeAlias = _16Bit -_NBitSingle: TypeAlias = _32Bit -_NBitDouble: TypeAlias = _64Bit -_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit +type _NBitHalf = _16Bit +type _NBitSingle = _32Bit +type _NBitDouble = _64Bit +type _NBitLongDouble = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 28d3e63c1769..28a60ecbe00f 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -30,14 +30,13 @@ class NBitBase: .. code-block:: python - >>> from typing import TypeVar, TYPE_CHECKING + >>> from typing import TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt - >>> S = TypeVar("S", bound=npt.NBitBase) - >>> T = TypeVar("T", bound=npt.NBitBase) - - >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + >>> def add[S: npt.NBitBase, T: npt.NBitBase]( + ... a: np.floating[S], b: np.integer[T] + ... ) -> np.floating[S | T]: ... return a + b >>> a = np.float16() diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index 81810a2caa3b..bd317c896094 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -8,7 +8,7 @@ from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 @deprecated( "`NBitBase` is deprecated and will be removed from numpy.typing in the " - "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "future. Use `@typing.overload` or a type parameter with a scalar-type as upper " "bound, instead. (deprecated in NumPy 2.3)", ) @final diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index e3362a9f21fe..13711be397e9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,17 +1,15 @@ """A module containing the `_NestedSequence` protocol.""" -from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator __all__ = ["_NestedSequence"] -_T_co = TypeVar("_T_co", covariant=True) - @runtime_checkable -class _NestedSequence(Protocol[_T_co]): +class _NestedSequence[T](Protocol): """A protocol for representing nested sequences. Warning @@ -54,7 +52,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": """Implement ``self[x]``.""" raise NotImplementedError @@ -62,11 +60,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b0de66d89aa1..2d36c4961c42 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,20 +1,20 @@ -from typing import Any, TypeAlias +from typing import Any import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co: TypeAlias = str | bytes +type _CharLike_co = str | bytes # The `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co: TypeAlias = bool | np.bool -_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool -_IntLike_co: TypeAlias = int | np.integer | np.bool -_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool -_ComplexLike_co: TypeAlias = complex | np.number | np.bool -_NumberLike_co: TypeAlias = _ComplexLike_co -_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +type _BoolLike_co = bool | np.bool +type _UIntLike_co = bool | np.unsignedinteger | np.bool +type _IntLike_co = int | np.integer | np.bool +type _FloatLike_co = float | np.floating | np.integer | np.bool +type _ComplexLike_co = complex | np.number | np.bool +type _NumberLike_co = _ComplexLike_co +type _TD64Like_co = int | np.timedelta64 | np.integer | np.bool # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void -_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic +type _VoidLike_co = tuple[Any, ...] | np.void +type _ScalarLike_co = complex | str | bytes | np.generic diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index e297aef2f554..132943b283c8 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,8 +1,8 @@ from collections.abc import Sequence -from typing import Any, SupportsIndex, TypeAlias +from typing import Any, SupportsIndex -_Shape: TypeAlias = tuple[int, ...] -_AnyShape: TypeAlias = tuple[Any, ...] +type _Shape = tuple[int, ...] +type _AnyShape = tuple[Any, ...] # Anything that can be coerced to a shape tuple -_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] +type _ShapeLike = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 9d3fa0e5335c..b9dc5fd5b975 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -10,16 +10,13 @@ from _typeshed import Incomplete from types import EllipsisType from typing import ( Any, - Generic, Literal, LiteralString, Never, NoReturn, Protocol, SupportsIndex, - TypeAlias, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -27,31 +24,19 @@ from typing import ( import numpy as np from numpy import _CastingKind, _OrderKACF, ufunc -from numpy.typing import NDArray -from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._array_like import ArrayLike, NDArray, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike from ._scalars import _ScalarLike_co from ._shape import _ShapeLike -_T = TypeVar("_T") -_2Tuple: TypeAlias = tuple[_T, _T] -_3Tuple: TypeAlias = tuple[_T, _T, _T] -_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] +type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] +type _4Tuple[T] = tuple[T, T, T, T] -_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] -_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] -_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] - -_NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", covariant=True) -_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) -_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) - -_NIn = TypeVar("_NIn", bound=int, covariant=True) -_NOut = TypeVar("_NOut", bound=int, covariant=True) -_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +type _2PTuple[T] = tuple[T, T, *tuple[T, ...]] +type _3PTuple[T] = tuple[T, T, T, *tuple[T, ...]] +type _4PTuple[T] = tuple[T, T, T, T, *tuple[T, ...]] @type_check_only class _SupportsArrayUFunc(Protocol): @@ -89,15 +74,15 @@ class _ReduceKwargs(TypedDict, total=False): # pyright: reportIncompatibleMethodOverride=false @type_check_only -class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -158,15 +143,15 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -353,15 +338,15 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i ) -> None: ... @type_check_only -class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin1_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -427,15 +412,15 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] +class _UFunc_Nin2_Nout2[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -487,15 +472,15 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] @type_check_only -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] +class _GUFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT, SignatureT: LiteralString](ufunc): # type: ignore[misc] @property - def __name__(self) -> _NameType: ... + def __name__(self) -> NameT: ... @property - def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + def __qualname__(self) -> NameT: ... # pyright: ignore[reportIncompatibleVariableOverride] @property - def ntypes(self) -> _NTypes: ... + def ntypes(self) -> NTypesT: ... @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -503,7 +488,7 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def nargs(self) -> Literal[3]: ... @property - def signature(self) -> _Signature: ... + def signature(self) -> SignatureT: ... # Scalar for 1D array-likes; ndarray otherwise @overload @@ -580,9 +565,9 @@ class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): signature: str | _4PTuple[DTypeLike] @type_check_only -class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin1_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[1]: ... @property @@ -601,7 +586,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -609,15 +594,15 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -635,9 +620,9 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] +class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property def nin(self) -> Literal[2]: ... @property @@ -657,7 +642,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -666,16 +651,16 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno /, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, /, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -705,28 +690,28 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, ) -> NDArray[np.object_]: ... @overload - def accumulate( + def accumulate[OutT: np.ndarray]( self, array: ArrayLike, /, axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: OutT, + ) -> OutT: ... @overload # type: ignore[override] - def reduce( # out=array + def reduce[OutT: np.ndarray]( # out=array self, array: ArrayLike, /, axis: _ShapeLike | None = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ArrayT: ... + ) -> OutT: ... @overload # out=... def reduce( self, @@ -761,10 +746,10 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno out: EllipsisType | None = None, keepdims: bool = False, **kwargs: Unpack[_ReduceKwargs], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload # type: ignore[override] - def reduceat( + def reduceat[OutT: np.ndarray]( self, array: ArrayLike, /, @@ -772,8 +757,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno axis: SupportsIndex = 0, dtype: DTypeLike | None = None, *, - out: _ArrayT | tuple[_ArrayT], - ) -> _ArrayT: ... + out: OutT | tuple[OutT], + ) -> OutT: ... @overload def reduceat( self, @@ -804,7 +789,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def outer( self, @@ -814,17 +799,17 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno *, out: EllipsisType | None = None, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def outer( + def outer[OutT: np.ndarray]( self, A: ArrayLike, B: ArrayLike, /, *, - out: _ArrayT, + out: OutT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def outer( self, @@ -855,11 +840,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno ) -> None: ... @type_check_only -class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] +class _PyFunc_Nin3P_Nout1[ReturnT, IdentT, NInT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property def nout(self) -> Literal[1]: ... @property @@ -877,7 +862,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co: ... + ) -> ReturnT: ... @overload def __call__( self, @@ -888,18 +873,18 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ReturnType_co | NDArray[np.object_]: ... + ) -> ReturnT | NDArray[np.object_]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, x2: ArrayLike, x3: ArrayLike, /, *xs: ArrayLike, - out: _ArrayT | tuple[_ArrayT], + out: OutT | tuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], - ) -> _ArrayT: ... + ) -> OutT: ... @overload def __call__( self, @@ -919,13 +904,13 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] @type_check_only -class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] +class _PyFunc_Nin1P_Nout2P[ReturnT, IdentT, NInT: int, NOutT: int](ufunc): # type: ignore[misc] @property - def identity(self) -> _IDType: ... + def identity(self) -> IdentT: ... @property - def nin(self) -> _NIn: ... + def nin(self) -> NInT: ... @property - def nout(self) -> _NOut: ... + def nout(self) -> NOutT: ... @property def ntypes(self) -> Literal[1]: ... @property @@ -939,7 +924,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: _ScalarLike_co, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co]: ... + ) -> _2PTuple[ReturnT]: ... @overload def __call__( self, @@ -948,16 +933,16 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) *xs: ArrayLike, out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + ) -> _2PTuple[ReturnT | NDArray[np.object_]]: ... @overload - def __call__( + def __call__[OutT: np.ndarray]( self, x1: ArrayLike, /, *xs: ArrayLike, - out: _2PTuple[_ArrayT], + out: _2PTuple[OutT], **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], - ) -> _2PTuple[_ArrayT]: ... + ) -> _2PTuple[OutT]: ... @overload def __call__( self, diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index b630777ced99..7a78cabe60f3 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,14 +1,11 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable -from typing import Protocol, TypeVar, overload, type_check_only +from typing import Protocol, overload, type_check_only from ._convertions import asbytes as asbytes, asunicode as asunicode ### -_T = TypeVar("_T") -_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) - @type_check_only class _HasModule(Protocol): __module__: str @@ -18,11 +15,11 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... +def set_module[ModuleT: _HasModule](module: str) -> Callable[[ModuleT], ModuleT]: ... # -def _rename_parameter( +def _rename_parameter[T]( old_names: Iterable[str], new_names: Iterable[str], dep_version: str | None = None, -) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... +) -> Callable[[Callable[..., T]], Callable[..., T]]: ... diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index 40546d2f4497..dd738025b728 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,21 +1,18 @@ import types from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping -from typing import Any, Final, TypeAlias, TypeVar, overload +from typing import Any, Final, overload from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] ### -_T = TypeVar("_T") -_RT = TypeVar("_RT") +type _StrSeq = SupportsLenAndGetItem[str] +type _NestedSeq[T] = list[T | _NestedSeq[T]] | tuple[T | _NestedSeq[T], ...] -_StrSeq: TypeAlias = SupportsLenAndGetItem[str] -_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] - -_JoinFunc: TypeAlias = Callable[[list[_T]], _T] -_FormatFunc: TypeAlias = Callable[[_T], str] +type _JoinFunc[T] = Callable[[list[T]], T] +type _FormatFunc[T] = Callable[[T], str] ### @@ -43,7 +40,7 @@ def joinseq(seq: _StrSeq) -> str: ... @overload def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... @overload -def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... +def strseq[VT, RT](object: _NestedSeq[VT], convert: Callable[[VT], RT], join: _JoinFunc[RT]) -> RT: ... # def formatargspec( diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 11ae02e57a59..593960274814 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -7,17 +7,15 @@ from typing import ( Generic, Literal as L, NamedTuple, - TypeVar, final, type_check_only, ) -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] ### -_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) _CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) ### @@ -71,7 +69,12 @@ class _BaseVersion(Generic[_CmpKeyT_co]): def __le__(self, other: _BaseVersion, /) -> bool: ... def __ge__(self, other: _BaseVersion, /) -> bool: ... def __gt__(self, other: _BaseVersion, /) -> bool: ... - def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + def _compare[CmpKeyT: tuple[object, ...]]( + self, + /, + other: _BaseVersion[CmpKeyT], + method: Callable[[_CmpKeyT_co, CmpKeyT], bool], + ) -> bool: ... class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): _version: Final[str] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 8881141f8ed5..1d4780aeb422 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -4,19 +4,16 @@ import ctypes from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from ctypes import c_int64 as _c_intp -from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload +from typing import Any, ClassVar, Literal as L, overload import numpy as np from numpy import ( byte, double, - dtype, - generic, intc, long, longdouble, longlong, - ndarray, short, single, ubyte, @@ -54,12 +51,7 @@ from numpy._typing import ( __all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] -# TODO: Add a proper `_Shape` bound once we've got variadic typevars -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None) -_ScalarT = TypeVar("_ScalarT", bound=generic) - -_FlagsKind: TypeAlias = L[ +type _FlagsKind = L[ "C_CONTIGUOUS", "CONTIGUOUS", "C", "F_CONTIGUOUS", "FORTRAN", "F", "ALIGNED", "A", @@ -68,27 +60,28 @@ _FlagsKind: TypeAlias = L[ "WRITEBACKIFCOPY", "X", ] -# TODO: Add a shape typevar once we have variadic typevars (PEP 646) -class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): +# TODO: Add a shape type parameter +class _ndptr[OptionalDTypeT: np.dtype | None](ctypes.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` - _dtype_: ClassVar[_DTypeOptionalT] - _shape_: ClassVar[_AnyShape | None] - _ndim_: ClassVar[int | None] - _flags_: ClassVar[list[_FlagsKind] | None] + _dtype_: OptionalDTypeT = ... + _shape_: ClassVar[_AnyShape | None] = ... + _ndim_: ClassVar[int | None] = ... + _flags_: ClassVar[list[_FlagsKind] | None] = ... @overload # type: ignore[override] @classmethod - def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ... + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... @overload @classmethod - def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ... + def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] + +class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): + _dtype_: DTypeT = ... + _shape_: ClassVar[_AnyShape] = ... # pyright: ignore[reportIncompatibleVariableOverride] -class _concrete_ndptr(_ndptr[_DTypeT]): - _dtype_: ClassVar[_DTypeT] - _shape_: ClassVar[_AnyShape] @property - def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... + def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... @@ -102,13 +95,13 @@ def ndpointer( flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[None]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ... +) -> type[_concrete_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, @@ -116,21 +109,21 @@ def ndpointer( *, shape: _ShapeLike, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_concrete_ndptr[dtype]]: ... +) -> type[_concrete_ndptr[np.dtype]]: ... @overload -def ndpointer( - dtype: _DTypeLike[_ScalarT], +def ndpointer[ScalarT: np.generic]( + dtype: _DTypeLike[ScalarT], ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype[_ScalarT]]]: ... +) -> type[_ndptr[np.dtype[ScalarT]]]: ... @overload def ndpointer( dtype: DTypeLike | None, ndim: int | None = None, shape: None = None, flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, -) -> type[_ndptr[dtype]]: ... +) -> type[_ndptr[np.dtype]]: ... @overload def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... @@ -170,7 +163,7 @@ def as_ctypes_type(dtype: str) -> type[Any]: ... @overload def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload -def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 3e34113edd4f..920e23c85f1f 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -7,7 +7,6 @@ from typing import ( Never, NoReturn, Self, - TypeAlias, final, overload, type_check_only, @@ -52,17 +51,20 @@ __all__ = [ # noqa: RUF022 "StringDType", ] -# Helper base classes (typing-only) +# Type parameters + +_ItemSizeT_co = TypeVar("_ItemSizeT_co", bound=int, default=int, covariant=True) +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +# Helper base classes (typing-only) @type_check_only -class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] +class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property - def base(self) -> np.dtype[_ScalarT_co]: ... + def base(self) -> np.dtype[ScalarT]: ... @property def fields(self) -> None: ... @property @@ -77,7 +79,7 @@ class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: ignore[misc] +class _LiteralDType[ScalarT_co: np.generic](_SimpleDType[ScalarT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -85,21 +87,17 @@ class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]): # type: i # Helper mixins (typing-only): -_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) -_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) -_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) - @type_check_only -class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): +class _TypeCodes[KindT: LiteralString, CharT: LiteralString, NumT: int]: @final @property - def kind(self) -> _KindT_co: ... + def kind(self) -> KindT: ... @final @property - def char(self) -> _CharT_co: ... + def char(self) -> CharT: ... @final @property - def num(self) -> _NumT_co: ... + def num(self) -> NumT: ... @type_check_only class _NoOrder: @@ -113,17 +111,14 @@ class _NativeOrder: @property def byteorder(self) -> L["="]: ... -_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) -_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) - @type_check_only -class _NBit(Generic[_DataSize_co, _ItemSize_co]): +class _NBit[AlignmentT: int, ItemSizeT: int]: @final @property - def alignment(self) -> _DataSize_co: ... + def alignment(self) -> AlignmentT: ... @final @property - def itemsize(self) -> _ItemSize_co: ... + def itemsize(self) -> ItemSizeT: ... @type_check_only class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... @@ -238,7 +233,7 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -# NOTE: Don't make these `Final`: it will break stubtest +# NOTE: Don't make these `Final[_]` or a `type _` it will break stubtest ByteDType = Int8DType UByteDType = UInt8DType ShortDType = Int16DType @@ -426,11 +421,11 @@ class ObjectDType( # type: ignore[misc] class BytesDType( # type: ignore[misc] _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], _SimpleDType[np.bytes_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> BytesDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -442,11 +437,11 @@ class BytesDType( # type: ignore[misc] class StrDType( # type: ignore[misc] _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, - _NBit[L[4], _ItemSize_co], + _NBit[L[4], _ItemSizeT_co], _SimpleDType[np.str_], - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): - def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + def __new__[ItemSizeT: int](cls, size: ItemSizeT, /) -> StrDType[ItemSizeT]: ... @property def hasobject(self) -> L[False]: ... @property @@ -458,12 +453,12 @@ class StrDType( # type: ignore[misc] class VoidDType( # type: ignore[misc] _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, - _NBit[L[1], _ItemSize_co], + _NBit[L[1], _ItemSizeT_co], np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] - Generic[_ItemSize_co], + Generic[_ItemSizeT_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment - def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + def __new__(cls, length: _ItemSizeT_co, /) -> NoReturn: ... @property def base(self) -> Self: ... @property @@ -483,9 +478,9 @@ class VoidDType( # type: ignore[misc] # Other: -_DateUnit: TypeAlias = L["Y", "M", "W", "D"] -_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] -_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit +type _DateUnit = L["Y", "M", "W", "D"] +type _TimeUnit = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +type _DateTimeUnit = _DateUnit | _TimeUnit @final class DateTime64DType( # type: ignore[misc] @@ -577,8 +572,6 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... -_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) - @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index 5c85c61586fc..55ff9f7ae78d 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable from pathlib import Path -from typing import Final, Literal as L -from typing_extensions import override +from typing import Final, Literal as L, override from ._backend import Backend diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 32e381cf9a1c..fbf0ad764aae 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,7 +1,7 @@ from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show -from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload +from typing import Any, Final, Literal as L, Never, overload from .cfuncs import errmess @@ -106,15 +106,12 @@ __all__ = [ ### -_VT = TypeVar("_VT") -_RT = TypeVar("_RT") +type _Var = Mapping[str, list[str]] +type _ROut = Mapping[str, str] +type _F2CMap = Mapping[str, Mapping[str, str]] -_Var: TypeAlias = Mapping[str, list[str]] -_ROut: TypeAlias = Mapping[str, str] -_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] - -_Bool: TypeAlias = bool | L[0, 1] -_Intent: TypeAlias = L[ +type _Bool = bool | L[0, 1] +type _Intent = L[ "INTENT_IN", "INTENT_OUT", "INTENT_INOUT", @@ -140,9 +137,9 @@ class throw_error: def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError # -def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... -def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_and[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_or[VT, RT](*f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... +def l_not[VT, RT](f: tuple[str, Callable[[VT], RT]]) -> Callable[[VT], RT]: ... # def outmess(t: str) -> None: ... diff --git a/numpy/f2py/cfuncs.pyi b/numpy/f2py/cfuncs.pyi index 5887177752c3..2187368797a4 100644 --- a/numpy/f2py/cfuncs.pyi +++ b/numpy/f2py/cfuncs.pyi @@ -1,11 +1,11 @@ -from typing import Final, TypeAlias +from typing import Final from .__version__ import version ### -_NeedListDict: TypeAlias = dict[str, list[str]] -_NeedDict: TypeAlias = dict[str, str] +type _NeedListDict = dict[str, list[str]] +type _NeedDict = dict[str, str] ### diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 742d358916a2..09213e156636 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,28 +1,16 @@ import re from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping -from typing import ( - IO, - Any, - Concatenate, - Final, - Literal as L, - Never, - ParamSpec, - TypeAlias, - overload, -) +from typing import IO, Any, Concatenate, Final, Literal as L, Never, overload from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict ### -_Tss = ParamSpec("_Tss") - -_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None -_VisitItem: TypeAlias = tuple[str | None, _VisitResult] -_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] +type _VisitResult = list[Any] | dict[str, Any] | None +type _VisitItem = tuple[str | None, _VisitResult] +type _VisitFunc[**Tss] = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, Tss], _VisitItem | None] ### @@ -243,13 +231,13 @@ def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[ def crack2fortran(block: Mapping[str, Any]) -> str: ... # -def traverse( +def traverse[**Tss]( obj: tuple[str | None, _VisitResult], - visit: _VisitFunc[_Tss], + visit: _VisitFunc[Tss], parents: list[tuple[str | None, _VisitResult]] = [], result: list[Any] | dict[str, Any] | None = None, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> _VisitItem | _VisitResult: ... # diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 686898041b9d..4dd6a9f73ec3 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -2,17 +2,13 @@ import argparse import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType -from typing import Any, Final, NotRequired, TypedDict, type_check_only -from typing_extensions import TypeVar, override +from typing import Any, Final, NotRequired, TypedDict, override, type_check_only from .__version__ import version from .auxfuncs import _Bool, outmess as outmess ### -_KT = TypeVar("_KT", bound=Hashable) -_VT = TypeVar("_VT") - @type_check_only class _F2PyDict(TypedDict): csrc: list[str] @@ -54,7 +50,7 @@ def main() -> None: ... def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... -def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def dict_append[KT: Hashable, VT](d_out: MutableMapping[KT, VT], d_in: Mapping[KT, VT]) -> None: ... def filter_files( prefix: str, suffix: str, diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index 30439f6b8351..c45d42289363 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,17 +1,12 @@ from collections.abc import Callable, Iterable, Mapping -from typing import Any, Final, Literal as L, TypeAlias -from typing_extensions import TypeVar +from typing import Any, Final, Literal as L from .__version__ import version from .auxfuncs import _Bool, _Var -### - -_VT = TypeVar("_VT", default=str) - -_Predicate: TypeAlias = Callable[[_Var], _Bool] -_RuleDict: TypeAlias = dict[str, _VT] -_DefDict: TypeAlias = dict[_Predicate, _VT] +type _Predicate = Callable[[_Var], _Bool] +type _RuleDict[VT] = dict[str, VT] +type _DefDict[VT] = dict[_Predicate, VT] ### @@ -24,9 +19,9 @@ sepdict: Final[dict[str, str]] = ... generationtime: Final[int] = ... typedef_need_dict: Final[_DefDict[str]] = ... -module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... -routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... -defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +module_rules: Final[_RuleDict[str | list[str] | _RuleDict[str]]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict[str] | _RuleDict[str]]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict[str]]]] = ... rout_rules: Final[list[_RuleDict[str | Any]]] = ... aux_rules: Final[list[_RuleDict[str | Any]]] = ... arg_rules: Final[list[_RuleDict[str | Any]]] = ... @@ -34,8 +29,8 @@ check_rules: Final[list[_RuleDict[str | Any]]] = ... stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... -def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... -def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict[str]: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict[str], str]: ... # namespace pollution k: str diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 06be2bb16044..542dde12791f 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -1,28 +1,25 @@ from collections.abc import Callable, Mapping from enum import Enum -from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload +from typing import Any, Generic, Literal as L, Self, overload from typing_extensions import TypeVar __all__ = ["Expr"] ### -_Tss = ParamSpec("_Tss") -_ExprT = TypeVar("_ExprT", bound=Expr) -_ExprT1 = TypeVar("_ExprT1", bound=Expr) -_ExprT2 = TypeVar("_ExprT2", bound=Expr) +# Explicit covariance is required here due to the inexpressible read-only attributes. _OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) _LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) _DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) _LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) _RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) -_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] -_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] +type _RelCOrPy = L["==", "!=", "<", "<=", ">", ">="] +type _RelFortran = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] -_ToExpr: TypeAlias = Expr | complex | str -_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] -_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] +type _ToExpr = Expr | complex | str +type _ToExprN = _ToExpr | tuple[_ToExprN, ...] +type _NestedString = str | tuple[_NestedString, ...] | list[_NestedString] ### @@ -97,8 +94,8 @@ class Precedence(Enum): NONE = 100 class Expr(Generic[_OpT_co, _DataT_co]): - op: _OpT_co - data: _DataT_co + op: _OpT_co # read-only + data: _DataT_co # read-only @staticmethod def parse(s: str, language: Language = ...) -> Expr: ... @@ -149,7 +146,7 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + def __getitem__[ExprT: Expr](self, index: ExprT | tuple[ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, ExprT]]: ... @overload def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... @@ -158,9 +155,9 @@ class Expr(Generic[_OpT_co, _DataT_co]): # @overload - def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + def traverse[**Tss](self, /, visit: Callable[Tss, None], *args: Tss.args, **kwargs: Tss.kwargs) -> Expr: ... @overload - def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + def traverse[**Tss, ExprT: Expr](self, /, visit: Callable[Tss, ExprT], *args: Tss.args, **kwargs: Tss.kwargs) -> ExprT: ... # def contains(self, /, other: Expr) -> bool: ... @@ -176,23 +173,23 @@ class Expr(Generic[_OpT_co, _DataT_co]): def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... class _Pair(Generic[_LeftT_co, _RightT_co]): - left: _LeftT_co - right: _RightT_co + left: _LeftT_co # read-only + right: _RightT_co # read-only def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... # @overload - def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... @overload - def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + def substitute[ExprT: Expr](self: _Pair[ExprT, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... @overload - def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + def substitute[ExprT: Expr](self: _Pair[object, ExprT], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... @overload def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... class _FromStringWorker(Generic[_LanguageT_co]): - language: _LanguageT_co + language: _LanguageT_co # read-only original: str | None quotes_map: dict[str, str] diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 1ea451ec2eb1..8f1a3c7bab89 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, Literal as L, TypeVar, overload +from typing import Any, Final, Literal as L, overload from numpy import complexfloating, floating, generic, integer from numpy._typing import ( @@ -12,8 +12,6 @@ from numpy._typing import ( __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - ### integer_types: Final[tuple[type[int], type[integer]]] = ... @@ -21,13 +19,13 @@ integer_types: Final[tuple[type[int], type[integer]]] = ... ### @overload -def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def fftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +def ifftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 3234c64ed169..d34404edb149 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Literal as L, TypeAlias +from typing import Literal as L from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co @@ -21,7 +21,7 @@ __all__ = [ "ifftn", ] -_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None +type _NormKind = L["backward", "ortho", "forward"] | None def fft( a: ArrayLike, diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index e7aacea43254..da7c89859d86 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,20 +1,10 @@ -from typing import ( - Any, - Literal as L, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Protocol, overload, type_check_only -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt __all__ = ["pad"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - @type_check_only class _ModeFunc(Protocol): def __call__( @@ -26,7 +16,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind: TypeAlias = L[ +type _ModeKind = L[ "constant", "edge", "linear_ramp", @@ -40,19 +30,22 @@ _ModeKind: TypeAlias = L[ "empty", ] -# TODO: In practice each keyword argument is exclusive to one or more -# specific modes. Consider adding more overloads to express this in the future. - -_PadWidth: TypeAlias = ( +type _PadWidth = ( _ArrayLikeInt | dict[int, int] | dict[int, tuple[int, int]] | dict[int, int | tuple[int, int]] ) + +### + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + # Expand `**kwargs` into explicit keyword-only arguments @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeKind = "constant", *, @@ -60,7 +53,7 @@ def pad( constant_values: ArrayLike = 0, end_values: ArrayLike = 0, reflect_type: L["odd", "even"] = "even", -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, @@ -73,12 +66,12 @@ def pad( reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload -def pad( - array: _ArrayLike[_ScalarT], +def pad[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], pad_width: _PadWidth, mode: _ModeFunc, **kwargs: Any, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def pad( array: ArrayLike, diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index da687da03dde..b0b918bd3e23 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,13 +1,4 @@ -from typing import ( - Any, - Generic, - Literal as L, - NamedTuple, - SupportsIndex, - TypeAlias, - overload, -) -from typing_extensions import TypeVar +from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np from numpy._typing import ( @@ -32,16 +23,13 @@ __all__ = [ "unique_values", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) - # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_EitherSCT = TypeVar( - "_EitherSCT", +_AnyScalarT = TypeVar( + "_AnyScalarT", np.bool, np.int8, np.int16, np.int32, np.int64, np.intp, np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, @@ -52,23 +40,23 @@ _EitherSCT = TypeVar( np.integer, np.floating, np.complexfloating, np.character, ) # fmt: skip -_AnyArray: TypeAlias = NDArray[Any] -_IntArray: TypeAlias = NDArray[np.intp] +type _NumericScalar = np.number | np.timedelta64 | np.object_ +type _IntArray = NDArray[np.intp] ### -class UniqueAllResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueAllResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] indices: _IntArray inverse_indices: _IntArray counts: _IntArray -class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueCountsResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] counts: _IntArray -class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): - values: NDArray[_ScalarT] +class UniqueInverseResult[ScalarT: np.generic](NamedTuple): + values: NDArray[ScalarT] inverse_indices: _IntArray # @@ -79,11 +67,11 @@ def ediff1d( to_begin: ArrayLike | None = None, ) -> NDArray[np.int8]: ... @overload -def ediff1d( - ary: _ArrayLike[_NumericT], +def ediff1d[NumericT: _NumericScalar]( + ary: _ArrayLike[NumericT], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[_NumericT]: ... +) -> NDArray[NumericT]: ... @overload def ediff1d( ary: _ArrayLike[np.datetime64[Any]], @@ -95,12 +83,12 @@ def ediff1d( ary: _ArrayLikeNumber_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> _AnyArray: ... +) -> np.ndarray: ... # @overload # known scalar-type, FFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, return_counts: L[False] = False, @@ -108,7 +96,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown scalar-type, FFF def unique( ar: ArrayLike, @@ -119,10 +107,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # known scalar-type, TFF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, return_counts: L[False] = False, @@ -130,7 +118,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( ar: ArrayLike, @@ -141,10 +129,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FTF (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[False] = False, @@ -152,10 +140,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FTF (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -163,7 +151,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FTF (positional) def unique( ar: ArrayLike, @@ -174,7 +162,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FTF (keyword) def unique( ar: ArrayLike, @@ -185,10 +173,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, FFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[False], return_counts: L[True], @@ -196,10 +184,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, *, @@ -207,7 +195,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FFT (positional) def unique( ar: ArrayLike, @@ -218,7 +206,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # unknown scalar-type, FFT (keyword) def unique( ar: ArrayLike, @@ -229,10 +217,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray]: ... @overload # known scalar-type, TTF -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[False] = False, @@ -240,7 +228,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTF def unique( ar: ArrayLike, @@ -251,10 +239,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False], return_counts: L[True], @@ -262,10 +250,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, TFT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, *, @@ -273,7 +261,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (positional) def unique( ar: ArrayLike, @@ -284,7 +272,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TFT (keyword) def unique( ar: ArrayLike, @@ -295,10 +283,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (positional) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], return_counts: L[True], @@ -306,10 +294,10 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, FTT (keyword) -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], @@ -317,7 +305,7 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (positional) def unique( ar: ArrayLike, @@ -328,7 +316,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, FTT (keyword) def unique( ar: ArrayLike, @@ -339,10 +327,10 @@ def unique( axis: SupportsIndex | None = None, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # known scalar-type, TTT -def unique( - ar: _ArrayLike[_ScalarT], +def unique[ScalarT: np.generic]( + ar: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], return_counts: L[True], @@ -350,7 +338,7 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, TTT def unique( ar: ArrayLike, @@ -361,69 +349,69 @@ def unique( *, equal_nan: bool = True, sorted: bool = True, -) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray, _IntArray]: ... # @overload -def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +def unique_all[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueAllResult[ScalarT]: ... @overload def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... # @overload -def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +def unique_counts[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueCountsResult[ScalarT]: ... @overload def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... # @overload -def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +def unique_inverse[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> UniqueInverseResult[ScalarT]: ... @overload def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... # @overload -def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload -def unique_values(x: ArrayLike) -> _AnyArray: ... +def unique_values(x: ArrayLike) -> np.ndarray: ... # @overload # known scalar-type, return_indices=False (default) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, -) -> NDArray[_EitherSCT]: ... +) -> NDArray[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # known scalar-type, return_indices=True (keyword) def intersect1d( - ar1: _ArrayLike[_EitherSCT], - ar2: _ArrayLike[_EitherSCT], + ar1: _ArrayLike[_AnyScalarT], + ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, return_indices: L[False] = False, -) -> _AnyArray: ... +) -> np.ndarray: ... @overload # unknown scalar-type, return_indices=True (positional) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... @overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, @@ -431,25 +419,25 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... # @overload -def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # @overload -def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> NDArray[_AnyScalarT]: ... @overload -def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> np.ndarray: ... # @overload -def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... @overload -def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... # def isin( diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index c70872f6e753..a1a4428885fd 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -2,7 +2,7 @@ from collections.abc import Generator from types import EllipsisType -from typing import Any, Final, TypeAlias, overload +from typing import Any, Final, overload from typing_extensions import TypeVar import numpy as np @@ -10,12 +10,11 @@ from numpy._typing import _AnyShape, _Shape __all__ = ["Arrayterator"] +# Type parameter default syntax (PEP 696) requires Python 3.13+ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] +type _AnyIndex = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -31,7 +30,7 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): @property # type: ignore[misc] def shape(self) -> _ShapeT_co: ... @property - def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... @@ -42,4 +41,4 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, dtype: DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, DTypeT]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index dba0434a5fab..33af9cf1b197 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,8 @@ from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path -from typing import IO, Any, TypeAlias +from typing import IO, Any -_Mode: TypeAlias = OpenBinaryMode | OpenTextMode +type _Mode = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index b45df02796d7..f8b9a7ab88a9 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,6 +1,6 @@ import os from _typeshed import SupportsRead, SupportsWrite -from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard +from typing import Any, BinaryIO, Final, TypeGuard import numpy as np import numpy.typing as npt @@ -8,7 +8,7 @@ from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] +type _DTypeDescr = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] ### diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index d68918560b69..769c321b9988 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -5,15 +5,13 @@ from typing import ( Concatenate, Literal as L, Never, - ParamSpec, Protocol, SupportsIndex, SupportsInt, - TypeAlias, overload, type_check_only, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs import numpy as np from numpy import _OrderKACF @@ -36,6 +34,7 @@ from numpy._typing import ( _NestedSequence as _SeqND, _NumberLike_co, _ScalarLike_co, + _Shape, _ShapeLike, _SupportsArray, ) @@ -80,39 +79,15 @@ __all__ = [ "quantile", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. -_Tss = ParamSpec("_Tss") +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_FloatingT = TypeVar("_FloatingT", bound=np.floating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) -_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) -_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) -_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) - -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) -_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) -_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) -_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) -_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) - -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] - -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) - -_integer_co: TypeAlias = np.integer | np.bool -_float64_co: TypeAlias = np.float64 | _integer_co -_floating_co: TypeAlias = np.floating | _integer_co +type _integer_co = np.integer | np.bool +type _float64_co = np.float64 | _integer_co +type _floating_co = np.floating | _integer_co # non-trivial scalar-types that will become `complex128` in `sort_complex()`, # i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` -_SortsToComplex128: TypeAlias = ( +type _SortsToComplex128 = ( np.bool | np.int32 | np.uint32 @@ -124,33 +99,37 @@ _SortsToComplex128: TypeAlias = ( | np.timedelta64 | np.object_ ) +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble -_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] -_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] -_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _ArrayMax2D[ScalarT: np.generic] = np.ndarray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] # workaround for mypy and pyright not following the typing spec for overloads -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] -_Seq1D: TypeAlias = Sequence[_T] -_Seq2D: TypeAlias = Sequence[Sequence[_T]] -_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] -_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] +type _Seq1D[T] = Sequence[T] +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[Sequence[Sequence[T]]] +type _ListSeqND[T] = list[T] | _SeqND[list[T]] -_Tuple2: TypeAlias = tuple[_T, _T] -_Tuple3: TypeAlias = tuple[_T, _T, _T] -_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] +type _Tuple2[T] = tuple[T, T] +type _Tuple3[T] = tuple[T, T, T] +type _Tuple4[T] = tuple[T, T, T, T] -_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] -_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] -_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] +type _Mesh1[ScalarT: np.generic] = tuple[_Array1D[ScalarT]] +type _Mesh2[ScalarT: np.generic, ScalarT1: np.generic] = tuple[_Array2D[ScalarT], _Array2D[ScalarT1]] +type _Mesh3[ScalarT: np.generic, ScalarT1: np.generic, ScalarT2: np.generic] = tuple[ + _Array3D[ScalarT], _Array3D[ScalarT1], _Array3D[ScalarT2] +] -_IndexLike: TypeAlias = slice | _ArrayLikeInt_co +type _IndexLike = slice | _ArrayLikeInt_co -_Indexing: TypeAlias = L["ij", "xy"] -_InterpolationMethod = L[ +type _Indexing = L["ij", "xy"] +type _InterpolationMethod = L[ "inverted_cdf", "averaged_inverted_cdf", "closest_observation", @@ -168,24 +147,24 @@ _InterpolationMethod = L[ # The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can # return any (usually 1d) array-like or scalar-like compatible with the input. -_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] -_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] +type _PiecewiseFunction[ScalarT: np.generic, **Tss] = Callable[Concatenate[NDArray[ScalarT], Tss], ArrayLike] +type _PiecewiseFunctions[ScalarT: np.generic, **Tss] = _SizedIterable[_PiecewiseFunction[ScalarT, Tss] | _ScalarLike_co] @type_check_only -class _TrimZerosSequence(Protocol[_T_co]): +class _TrimZerosSequence[T](Protocol): def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload - def __getitem__(self, key: slice, /) -> _T_co: ... + def __getitem__(self, key: slice, /) -> T: ... @type_check_only -class _SupportsRMulFloat(Protocol[_T_co]): - def __rmul__(self, other: float, /) -> _T_co: ... +class _SupportsRMulFloat[T](Protocol): + def __rmul__(self, other: float, /) -> T: ... @type_check_only -class _SizedIterable(Protocol[_T_co]): - def __iter__(self) -> Iterable[_T_co]: ... +class _SizedIterable[T](Protocol): + def __iter__(self) -> Iterable[T]: ... def __len__(self) -> int: ... ### @@ -212,18 +191,18 @@ class vectorize: def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... @overload -def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... +def rot90[ArrayT: np.ndarray](m: ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> ArrayT: ... @overload -def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... +def rot90[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[ScalarT]: ... @overload def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... # NOTE: Technically `flip` also accept scalars, but that has no effect and complicates # the overloads significantly, so we ignore that case here. @overload -def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... +def flip[ArrayT: np.ndarray](m: ArrayT, axis: int | tuple[int, ...] | None = None) -> ArrayT: ... @overload -def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... +def flip[ScalarT: np.generic](m: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[ScalarT]: ... @overload def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... @@ -235,77 +214,77 @@ def iterable(y: object) -> TypeIs[Iterable[Any]]: ... # NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will # therefore always return an array. @overload # inexact array, keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> _ArrayInexactT: ... +) -> ArrayT: ... @overload # inexact array, returned=True keepdims=True -def average( - a: _ArrayInexactT, +def average[ArrayT: NDArray[np.inexact]]( + a: ArrayT, axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[_ArrayInexactT]: ... +) -> _Tuple2[ArrayT]: ... @overload # inexact array-like, axis=None -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> _InexactT: ... +) -> ScalarT: ... @overload # inexact array-like, axis= -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[False] | _NoValueType = ..., -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, returned: L[False] = False, *, keepdims: L[True], -) -> NDArray[_InexactT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact array-like, axis=None, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[_InexactT]: ... +) -> _Tuple2[ScalarT]: ... @overload # inexact array-like, axis=, returned=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...], weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[False] | _NoValueType = ..., -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # inexact array-like, returned=True, keepdims=True -def average( - a: _ArrayLike[_InexactT], +def average[ScalarT: np.inexact]( + a: _ArrayLike[ScalarT], axis: int | tuple[int, ...] | None = None, weights: _ArrayLikeNumber_co | None = None, *, returned: L[True], keepdims: L[True], -) -> _Tuple2[NDArray[_InexactT]]: ... +) -> _Tuple2[NDArray[ScalarT]]: ... @overload # bool or integer array-like, axis=None def average( a: _SeqND[float] | _ArrayLikeInt_co, @@ -471,15 +450,19 @@ def average( # @overload -def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... +def asarray_chkfinite[ArrayT: np.ndarray](a: ArrayT, dtype: None = None, order: _OrderKACF = None) -> ArrayT: ... @overload -def asarray_chkfinite( - a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None -) -> _Array[_ShapeT, _ScalarT]: ... +def asarray_chkfinite[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> _Array[ShapeT, ScalarT]: ... @overload -def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], dtype: None = None, order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload -def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +def asarray_chkfinite[ScalarT: np.generic]( + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None +) -> NDArray[ScalarT]: ... @overload def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... @@ -488,33 +471,33 @@ def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKA # practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any # array-like. @overload -def piecewise( - x: _Array[_ShapeT, _ScalarT], +def piecewise[ShapeT: _Shape, ScalarT: np.generic, **Tss]( + x: _Array[ShapeT, ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> _Array[_ShapeT, _ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> _Array[ShapeT, ScalarT]: ... @overload -def piecewise( - x: _ArrayLike[_ScalarT], +def piecewise[ScalarT: np.generic, **Tss]( + x: _ArrayLike[ScalarT], condlist: ArrayLike, - funclist: _PiecewiseFunctions[Any, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[Any, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def piecewise( +def piecewise[ScalarT: np.generic, **Tss]( x: ArrayLike, condlist: ArrayLike, - funclist: _PiecewiseFunctions[_ScalarT, _Tss], - *args: _Tss.args, - **kw: _Tss.kwargs, -) -> NDArray[_ScalarT]: ... + funclist: _PiecewiseFunctions[ScalarT, Tss], + *args: Tss.args, + **kw: Tss.kwargs, +) -> NDArray[ScalarT]: ... # NOTE: condition is usually boolean, but anything with zero/non-zero semantics works @overload -def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +def extract[ScalarT: np.generic](condition: ArrayLike, arr: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... @overload @@ -533,17 +516,17 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... # NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an # error at runtime @overload -def select( +def select[ArrayT: np.ndarray]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayT], + choicelist: Sequence[ArrayT], default: _ScalarLike_co = 0, -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def select( +def select[ScalarT: np.generic]( condlist: _SizedIterable[_ArrayLikeBool_co], - choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], + choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], default: _ScalarLike_co = 0, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def select( condlist: _SizedIterable[_ArrayLikeBool_co], @@ -553,44 +536,44 @@ def select( # keep roughly in sync with `ma.core.copy` @overload -def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... +def copy[ArrayT: np.ndarray](a: ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> ArrayT: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... # @overload # ?d, known inexact scalar-type -def gradient( - f: _ArrayNoD[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _ArrayNoD[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors -) -> _Array1D[_InexactTimeT] | Any: ... +) -> _Array1D[ScalarT] | Any: ... @overload # 1d, known inexact scalar-type -def gradient( - f: _Array1D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array1D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Array1D[_InexactTimeT]: ... +) -> _Array1D[ScalarT]: ... @overload # 2d, known inexact scalar-type -def gradient( - f: _Array2D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array2D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +) -> _Mesh2[ScalarT, ScalarT]: ... @overload # 3d, known inexact scalar-type -def gradient( - f: _Array3D[_InexactTimeT], +def gradient[ScalarT: np.inexact | np.timedelta64]( + f: _Array3D[ScalarT], *varargs: _ArrayLikeNumber_co, axis: _ShapeLike | None = None, edge_order: L[1, 2] = 1, -) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +) -> _Mesh3[ScalarT, ScalarT, ScalarT]: ... @overload # ?d, datetime64 scalar-type def gradient( f: _ArrayNoD[np.datetime64], @@ -671,37 +654,37 @@ def gradient( # @overload # n == 0; return input unchanged -def diff( - a: _T, +def diff[T]( + a: T, n: L[0], axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., # = _NoValue append: ArrayLike | _NoValueType = ..., # = _NoValue -) -> _T: ... +) -> T: ... @overload # known array-type -def diff( - a: _ArrayNumericT, +def diff[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known shape, datetime64 -def diff( - a: _Array[_ShapeT, np.datetime64], +def diff[ShapeT: _Shape]( + a: _Array[ShapeT, np.datetime64], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> _Array[_ShapeT, np.timedelta64]: ... +) -> _Array[ShapeT, np.timedelta64]: ... @overload # unknown shape, known scalar-type -def diff( - a: _ArrayLike[_ScalarNumericT], +def diff[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], n: int = 1, axis: SupportsIndex = -1, prepend: ArrayLike | _NoValueType = ..., append: ArrayLike | _NoValueType = ..., -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # unknown shape, datetime64 def diff( a: _ArrayLike[np.datetime64], @@ -787,23 +770,23 @@ def interp( period: _FloatLike_co | None = None, ) -> np.complex128: ... @overload # float array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, left: _FloatLike_co | None = None, right: _FloatLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # complex array -def interp( - x: _Array[_ShapeT, _floating_co], +def interp[ShapeT: _Shape]( + x: _Array[ShapeT, _floating_co], xp: _ArrayLikeFloat_co, fp: _ArrayLike1D[np.complexfloating] | list[complex], left: _NumberLike_co | None = None, right: _NumberLike_co | None = None, period: _FloatLike_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # float sequence def interp( x: _Seq1D[_FloatLike_co], @@ -870,7 +853,7 @@ def interp( # @overload # 0d T: floating -> 0d T -def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +def angle[FloatingT: np.floating](z: FloatingT, deg: bool = False) -> FloatingT: ... @overload # 0d complex | float | ~integer -> 0d float64 def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... @overload # 0d complex64 -> 0d float32 @@ -878,13 +861,13 @@ def angle(z: np.complex64, deg: bool = False) -> np.float32: ... @overload # 0d clongdouble -> 0d longdouble def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... @overload # T: nd floating -> T -def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +def angle[ArrayFloatingT: NDArray[np.floating]](z: ArrayFloatingT, deg: bool = False) -> ArrayFloatingT: ... @overload # nd T: complex128 | ~integer -> nd float64 -def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[ShapeT, np.float64]: ... @overload # nd T: complex64 -> nd float32 -def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.complex64], deg: bool = False) -> _Array[ShapeT, np.float32]: ... @overload # nd T: clongdouble -> nd longdouble -def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +def angle[ShapeT: _Shape](z: _Array[ShapeT, np.clongdouble], deg: bool = False) -> _Array[ShapeT, np.longdouble]: ... @overload # 1d complex -> 1d float64 def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... @overload # 2d complex -> 2d float64 @@ -896,21 +879,21 @@ def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | # @overload # known array-type -def unwrap( - p: _ArrayFloatObjT, +def unwrap[ArrayT: NDArray[np.floating | np.object_]]( + p: ArrayT, discont: float | None = None, axis: int = -1, *, period: float = ..., # = τ -) -> _ArrayFloatObjT: ... +) -> ArrayT: ... @overload # known shape, float64 -def unwrap( - p: _Array[_ShapeT, _float64_co], +def unwrap[ShapeT: _Shape]( + p: _Array[ShapeT, _float64_co], discont: float | None = None, axis: int = -1, *, period: float = ..., # = τ -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # 1d float64-like def unwrap( p: _Seq1D[float | _float64_co], @@ -954,28 +937,28 @@ def unwrap( # @overload -def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +def sort_complex[ArrayT: NDArray[np.complexfloating]](a: ArrayT) -> ArrayT: ... @overload # complex64, shape known -def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[ShapeT, np.complex64]: ... @overload # complex64, shape unknown def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... @overload # complex128, shape known -def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, _SortsToComplex128]) -> _Array[ShapeT, np.complex128]: ... @overload # complex128, shape unknown def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... @overload # clongdouble, shape known -def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +def sort_complex[ShapeT: _Shape](a: _Array[ShapeT, np.longdouble]) -> _Array[ShapeT, np.clongdouble]: ... @overload # clongdouble, shape unknown def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... # -def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... +def trim_zeros[T](filt: _TrimZerosSequence[T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> T: ... # NOTE: keep in sync with `corrcoef` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def cov( - m: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, bias: bool = False, ddof: SupportsIndex | SupportsInt | None = None, @@ -983,10 +966,10 @@ def cov( aweights: _ArrayLikeFloat_co | None = None, *, dtype: None = None, -) -> _Array2D[_AnyDoubleT]: ... +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayNoD[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -994,11 +977,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def cov( - m: _Array1D[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _Array1D[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1006,11 +989,11 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array0D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array0D[ScalarT]: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d -def cov( - m: _ArrayLike[_AnyDoubleT], +def cov[ScalarT: _InexactDouble]( + m: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, bias: bool = False, @@ -1018,8 +1001,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> NDArray[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> NDArray[ScalarT]: ... @overload # nd, casts to float64, y= def cov( m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1105,7 +1088,7 @@ def cov( dtype: _DTypeLike[np.complex128] | None = None, ) -> NDArray[np.complex128]: ... @overload # 1d complex-like, y=None, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, @@ -1114,10 +1097,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array0D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array0D[ScalarT]: ... @overload # nd complex-like, y=, dtype= -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, @@ -1126,10 +1109,10 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -> 0d or 2d -def cov( +def cov[ScalarT: np.generic]( m: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, @@ -1138,8 +1121,8 @@ def cov( fweights: _ArrayLikeInt_co | None = None, aweights: _ArrayLikeFloat_co | None = None, *, - dtype: _DTypeLike[_ScalarT], -) -> NDArray[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... @overload # nd complex-like, y=, dtype=? def cov( m: _ArrayLikeComplex_co, @@ -1182,37 +1165,37 @@ def cov( # This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. # NOTE: keep in sync with `cov` @overload # ?d, known inexact scalar-type >=64 precision, y=. -def corrcoef( - x: _ArrayLike[_AnyDoubleT], - y: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], + y: _ArrayLike[ScalarT], rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT]: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT]: ... @overload # ?d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayNoD[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayNoD[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # 1d, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _Array1D[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _Array1D[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> ScalarT: ... @overload # nd, known inexact scalar-type >=64 precision, y=None -def corrcoef( - x: _ArrayLike[_AnyDoubleT], +def corrcoef[ScalarT: _InexactDouble]( + x: _ArrayLike[ScalarT], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_AnyDoubleT] | None = None, -) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... + dtype: _DTypeLike[ScalarT] | None = None, +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd, casts to float64, y= def corrcoef( x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], @@ -1270,29 +1253,29 @@ def corrcoef( dtype: _DTypeLike[np.complex128] | None = None, ) -> _Array2D[np.complex128] | np.complex128: ... @overload # 1d complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _Seq1D[_ComplexLike_co], y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... @overload # nd complex-like, y=, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT]: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT]: ... @overload # nd complex-like, y=None, dtype= -def corrcoef( +def corrcoef[ScalarT: np.generic]( x: _ArrayLikeComplex_co, y: None = None, rowvar: bool = True, *, - dtype: _DTypeLike[_ScalarT], -) -> _Array2D[_ScalarT] | _ScalarT: ... + dtype: _DTypeLike[ScalarT], +) -> _Array2D[ScalarT] | ScalarT: ... @overload # nd complex-like, y=, dtype=? def corrcoef( x: _ArrayLikeComplex_co, @@ -1327,7 +1310,7 @@ def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... # @overload -def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... +def i0[ShapeT: _Shape](x: _Array[ShapeT, np.floating | np.integer]) -> _Array[ShapeT, np.float64]: ... @overload def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... @overload @@ -1341,15 +1324,15 @@ def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... # @overload -def sinc(x: _InexactT) -> _InexactT: ... +def sinc[ScalarT: np.inexact](x: ScalarT) -> ScalarT: ... @overload def sinc(x: float | _float64_co) -> np.float64: ... @overload def sinc(x: complex) -> np.complex128 | Any: ... @overload -def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +def sinc[ArrayT: NDArray[np.inexact]](x: ArrayT) -> ArrayT: ... @overload -def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +def sinc[ShapeT: _Shape](x: _Array[ShapeT, _integer_co]) -> _Array[ShapeT, np.float64]: ... @overload def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... @overload @@ -1370,13 +1353,13 @@ def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... # NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays # it has no effect, and would complicate the overloads significantly. @overload # known scalar-type, keepdims=False (default) -def median( - a: _ArrayLike[_InexactTimeT], +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], axis: None = None, out: None = None, overwrite_input: bool = False, keepdims: L[False] = False, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # float array-like, keepdims=False (default) def median( a: _ArrayLikeInt_co | _SeqND[float] | float, @@ -1402,31 +1385,31 @@ def median( keepdims: L[False] = False, ) -> np.complex128 | Any: ... @overload # known array-type, keepdims=True -def median( - a: _ArrayNumericT, +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> _ArrayNumericT: ... +) -> ArrayT: ... @overload # known scalar-type, keepdims=True -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, out: None = None, overwrite_input: bool = False, *, keepdims: L[True], -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis= -def median( - a: _ArrayLike[_ScalarNumericT], +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], axis: _ShapeLike, out: None = None, overwrite_input: bool = False, keepdims: bool = False, -) -> NDArray[_ScalarNumericT]: ... +) -> NDArray[ScalarT]: ... @overload # float array-like, keepdims=True def median( a: _SeqND[float], @@ -1462,22 +1445,22 @@ def median( keepdims: bool = False, ) -> NDArray[np.complex128]: ... @overload # out= (keyword) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def median( +def median[ArrayT: np.ndarray]( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, keepdims: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def median( a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -1489,8 +1472,8 @@ def median( # NOTE: keep in sync with `quantile` @overload # inexact, scalar, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1499,10 +1482,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1511,10 +1494,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1523,11 +1506,11 @@ def percentile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def percentile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1535,10 +1518,10 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def percentile( - a: _ArrayLike[_InexactDateTimeT], +def percentile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1547,7 +1530,7 @@ def percentile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1585,9 +1568,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1595,7 +1578,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def percentile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1645,9 +1628,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1655,7 +1638,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def percentile( a: _ListSeqND[complex], @@ -1705,9 +1688,9 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def percentile( +def percentile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1715,7 +1698,7 @@ def percentile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def percentile( a: _ArrayLikeObject_co, @@ -1729,29 +1712,29 @@ def percentile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def percentile( +def percentile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def percentile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -1767,8 +1750,8 @@ def percentile( # NOTE: keep in sync with `percentile` @overload # inexact, scalar, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: None = None, out: None = None, @@ -1777,10 +1760,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _InexactDateTimeT: ... +) -> ScalarT: ... @overload # inexact, scalar, axis= -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike, out: None = None, @@ -1789,10 +1772,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, scalar, keepdims=True -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: _FloatLike_co, axis: _ShapeLike | None = None, out: None = None, @@ -1801,11 +1784,11 @@ def quantile( *, keepdims: L[True], weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # inexact, array, axis=None -def quantile( - a: _ArrayLike[_InexactDateTimeT], - q: _Array[_ShapeT, _floating_co], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1813,10 +1796,10 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, _InexactDateTimeT]: ... +) -> _Array[ShapeT, ScalarT]: ... @overload # inexact, array-like -def quantile( - a: _ArrayLike[_InexactDateTimeT], +def quantile[ScalarT: np.inexact | np.timedelta64 | np.datetime64]( + a: _ArrayLike[ScalarT], q: NDArray[_floating_co] | _SeqND[_FloatLike_co], axis: _ShapeLike | None = None, out: None = None, @@ -1825,7 +1808,7 @@ def quantile( keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> NDArray[_InexactDateTimeT]: ... +) -> NDArray[ScalarT]: ... @overload # float, scalar, axis=None def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1863,9 +1846,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.float64]: ... @overload # float, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _SeqND[float] | _ArrayLikeInt_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1873,7 +1856,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.float64]: ... +) -> _Array[ShapeT, np.float64]: ... @overload # float, array-like def quantile( a: _SeqND[float] | _ArrayLikeInt_co, @@ -1923,9 +1906,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.complex128]: ... @overload # complex, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ListSeqND[complex], - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1933,7 +1916,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.complex128]: ... +) -> _Array[ShapeT, np.complex128]: ... @overload # complex, array-like def quantile( a: _ListSeqND[complex], @@ -1983,9 +1966,9 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # object_, array, axis=None -def quantile( +def quantile[ShapeT: _Shape]( a: _ArrayLikeObject_co, - q: _Array[_ShapeT, _floating_co], + q: _Array[ShapeT, _floating_co], axis: None = None, out: None = None, overwrite_input: bool = False, @@ -1993,7 +1976,7 @@ def quantile( keepdims: L[False] = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _Array[_ShapeT, np.object_]: ... +) -> _Array[ShapeT, np.object_]: ... @overload # object_, array-like def quantile( a: _ArrayLikeObject_co, @@ -2007,29 +1990,29 @@ def quantile( weights: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.object_]: ... @overload # out= (keyword) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, *, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # out= (positional) -def quantile( +def quantile[ArrayT: np.ndarray]( a: ArrayLike, q: _ArrayLikeFloat_co, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, overwrite_input: bool = False, method: _InterpolationMethod = "linear", keepdims: bool = False, weights: _ArrayLikeFloat_co | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload # fallback def quantile( a: _ArrayLikeNumber_co | _ArrayLikeObject_co, @@ -2045,12 +2028,12 @@ def quantile( # @overload # ?d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _ArrayNoD[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayNoD[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeFloat_co | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload # ?d, casts to float64 def trapezoid( y: _ArrayNoD[_integer_co], @@ -2059,12 +2042,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.float64] | np.float64: ... @overload # strict 1d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array1D[_InexactTimeT], - x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array1D[ScalarT], + x: _Array1D[ScalarT] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 1d, casts to float64 def trapezoid( y: _Array1D[_float64_co] | _Seq1D[float], @@ -2087,12 +2070,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload # strict 2d, known inexact/timedelta64 scalar-type -def trapezoid( - y: _Array2D[_InexactTimeT], - x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _Array2D[ScalarT], + x: _ArrayMax2D[ScalarT] | _Seq2D[float] | _Seq1D[float] | None = None, dx: float = 1.0, axis: SupportsIndex = -1, -) -> _InexactTimeT: ... +) -> ScalarT: ... @overload # strict 2d, casts to float64 def trapezoid( y: _Array2D[_float64_co] | _Seq2D[float], @@ -2115,12 +2098,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> np.complex128: ... @overload -def trapezoid( - y: _ArrayLike[_InexactTimeT], - x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, +def trapezoid[ScalarT: np.inexact | np.timedelta64]( + y: _ArrayLike[ScalarT], + x: _ArrayLike[ScalarT] | _ArrayLikeInt_co | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +) -> NDArray[ScalarT] | ScalarT: ... @overload def trapezoid( y: _ArrayLike[_float64_co], @@ -2150,12 +2133,12 @@ def trapezoid( axis: SupportsIndex = -1, ) -> NDArray[np.object_] | Any: ... @overload -def trapezoid( - y: _Seq1D[_SupportsRMulFloat[_T]], - x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, +def trapezoid[T]( + y: _Seq1D[_SupportsRMulFloat[T]], + x: _Seq1D[_SupportsRMulFloat[T] | T] | None = None, dx: complex = 1.0, axis: SupportsIndex = -1, -) -> _T: ... +) -> T: ... @overload def trapezoid( y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], @@ -2168,14 +2151,14 @@ def trapezoid( @overload # 0d def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... @overload # 1d, known scalar-type -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh1[_ScalarT]: ... +) -> _Mesh1[ScalarT]: ... @overload # 1d, unknown scalar-type def meshgrid( x1: ArrayLike, @@ -2186,35 +2169,35 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh1[Any]: ... @overload # 2d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, _ScalarT1]: ... +) -> _Mesh2[ScalarT1, ScalarT2]: ... @overload # 2d, known/unknown scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + x1: _ArrayLike[ScalarT], x2: ArrayLike, /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[_ScalarT, Any]: ... +) -> _Mesh2[ScalarT, Any]: ... @overload # 2d, unknown/known scalar-types -def meshgrid( +def meshgrid[ScalarT: np.generic]( x1: ArrayLike, - x2: _ArrayLike[_ScalarT], + x2: _ArrayLike[ScalarT], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh2[Any, _ScalarT]: ... +) -> _Mesh2[Any, ScalarT]: ... @overload # 2d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2226,16 +2209,16 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh2[Any, Any]: ... @overload # 3d, known scalar-types -def meshgrid( - x1: _ArrayLike[_ScalarT], - x2: _ArrayLike[_ScalarT1], - x3: _ArrayLike[_ScalarT2], +def meshgrid[ScalarT1: np.generic, ScalarT2: np.generic, ScalarT3: np.generic]( + x1: _ArrayLike[ScalarT1], + x2: _ArrayLike[ScalarT2], + x3: _ArrayLike[ScalarT3], /, *, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +) -> _Mesh3[ScalarT1, ScalarT2, ScalarT3]: ... @overload # 3d, unknown scalar-types def meshgrid( x1: ArrayLike, @@ -2248,12 +2231,12 @@ def meshgrid( indexing: _Indexing = "xy", ) -> _Mesh3[Any, Any, Any]: ... @overload # ?d, known scalar-types -def meshgrid( - *xi: _ArrayLike[_ScalarT], +def meshgrid[ScalarT: np.generic]( + *xi: _ArrayLike[ScalarT], copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy", -) -> tuple[NDArray[_ScalarT], ...]: ... +) -> tuple[NDArray[ScalarT], ...]: ... @overload # ?d, unknown scalar-types def meshgrid( *xi: ArrayLike, @@ -2267,11 +2250,11 @@ def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], # keep in sync with `insert` @overload # known scalar-type, axis=None (default) -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +def delete[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def delete[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified @@ -2279,11 +2262,11 @@ def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any] # keep in sync with `delete` @overload # known scalar-type, axis=None (default) -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[ScalarT]: ... @overload # known array-type, axis specified -def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +def insert[ArrayT: np.ndarray](arr: ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> ArrayT: ... @overload # known scalar-type, axis specified -def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def insert[ScalarT: np.generic](arr: _ArrayLike[ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # known scalar-type, axis=None (default) def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... @overload # unknown scalar-type, axis specified @@ -2291,27 +2274,27 @@ def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsInd # @overload # known array type, axis specified -def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... @overload # 1d, known scalar type, axis specified -def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq1D[ScalarT], values: _Seq1D[ScalarT], axis: SupportsIndex) -> _Array1D[ScalarT]: ... @overload # 2d, known scalar type, axis specified -def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq2D[ScalarT], values: _Seq2D[ScalarT], axis: SupportsIndex) -> _Array2D[ScalarT]: ... @overload # 3d, known scalar type, axis specified -def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _Seq3D[ScalarT], values: _Seq3D[ScalarT], axis: SupportsIndex) -> _Array3D[ScalarT]: ... @overload # ?d, known scalar type, axis specified -def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _SeqND[ScalarT], values: _SeqND[ScalarT], axis: SupportsIndex) -> NDArray[ScalarT]: ... @overload # ?d, unknown scalar type, axis specified def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... @overload # known scalar type, axis=None -def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +def append[ScalarT: np.generic](arr: _ArrayLike[ScalarT], values: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT]: ... @overload # unknown scalar type, axis=None def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... # @overload -def digitize( - x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False -) -> _Array[_ShapeT, np.int_]: ... +def digitize[ShapeT: _Shape]( + x: _Array[ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[ShapeT, np.int_]: ... @overload def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... @overload diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 72a31dcedc1f..0c4c673ef063 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,11 +1,11 @@ from collections.abc import Sequence -from typing import Any, Literal as L, SupportsIndex, TypeAlias +from typing import Any, Literal as L, SupportsIndex from numpy._typing import ArrayLike, NDArray __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] -_BinKind: TypeAlias = L[ +type _BinKind = L[ "stone", "auto", "doane", diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index ff316f566993..97930196ecfd 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -48,14 +48,8 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) _BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) - _AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) _MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) _NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) @@ -63,9 +57,12 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) ### -class ndenumerate(Generic[_ScalarT_co]): +class ndenumerate(Generic[_ScalarT_co]): # noqa: UP046 @overload - def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... + def __init__[ScalarT: np.generic]( + self: ndenumerate[ScalarT], + arr: _FiniteNestedSequence[_SupportsArray[np.dtype[ScalarT]]], + ) -> None: ... @overload def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @overload @@ -156,26 +153,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] # Keep in sync with _core.multiarray.concatenate @staticmethod @overload - def concatenate( - arrays: _ArrayLike[_ScalarT], + def concatenate[ScalarT: np.generic]( + arrays: _ArrayLike[ScalarT], /, axis: SupportsIndex | None = 0, out: None = None, *, dtype: None = None, casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload - def concatenate( + def concatenate[ScalarT: np.generic]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind | None = "same_kind", - ) -> NDArray[_ScalarT]: ... + ) -> NDArray[ScalarT]: ... @staticmethod @overload def concatenate( @@ -189,26 +186,26 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] ) -> NDArray[Incomplete]: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None = 0, *, - out: _ArrayT, + out: OutT, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @staticmethod @overload - def concatenate( + def concatenate[OutT: np.ndarray]( arrays: SupportsLenAndGetItem[ArrayLike], /, axis: SupportsIndex | None, - out: _ArrayT, + out: OutT, *, dtype: DTypeLike | None = None, casting: _CastingKind | None = "same_kind", - ) -> _ArrayT: ... + ) -> OutT: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): @@ -228,14 +225,14 @@ class IndexExpression(Generic[_BoolT_co]): maketuple: _BoolT_co def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupleT) -> _TupleT: ... + def __getitem__[TupleT: tuple[Any, ...]](self, item: TupleT) -> TupleT: ... @overload - def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + def __getitem__[T](self: IndexExpression[L[True]], item: T) -> tuple[T]: ... @overload - def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... @overload -def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +def ix_[DTypeT: np.dtype](*args: _FiniteNestedSequence[_HasDType[DTypeT]]) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 3bf41fc0cdde..7baca9c78045 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -5,7 +5,6 @@ from typing import ( Final, Literal, TypedDict, - TypeVar, Unpack, overload, type_check_only, @@ -15,8 +14,6 @@ import numpy as np import numpy.typing as npt from numpy._typing._dtype_like import _DTypeLikeNested -_T = TypeVar("_T") - @type_check_only class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None @@ -46,7 +43,7 @@ class LineSplitter: encoding: str | None = None, ) -> None: ... def __call__(self, /, line: str | bytes) -> list[str]: ... - def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + def autostrip[T](self, /, method: Callable[[T], Iterable[str]]) -> Callable[[T], list[str]]: ... class NameValidator: defaultexcludelist: ClassVar[Sequence[str]] = ... diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 349edd06f57b..f13ee2e7e967 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -13,15 +13,14 @@ from typing import ( IO, Any, ClassVar, - Generic, Literal as L, Protocol, Self, - TypeAlias, overload, + override, type_check_only, ) -from typing_extensions import TypeVar, override +from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import packbits, unpackbits @@ -43,23 +42,20 @@ __all__ = [ "unpackbits", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) -_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] -_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] -_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] -_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] +type _FName = StrPath | Iterable[str] | Iterable[bytes] +type _FNameRead = StrPath | SupportsRead[str] | SupportsRead[bytes] +type _FNameWriteBytes = StrPath | SupportsWrite[bytes] +type _FNameWrite = _FNameWriteBytes | SupportsWrite[str] @type_check_only -class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): +class _SupportsReadSeek[T](SupportsRead[T], Protocol): def seek(self, offset: int, whence: int, /) -> object: ... -class BagObj(Generic[_T_co]): - def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str, /) -> _T_co: ... +class BagObj[T]: + def __init__(self, /, obj: SupportsKeysAndGetItem[str, T]) -> None: ... + def __getattribute__(self, key: str, /) -> T: ... def __dir__(self) -> list[str]: ... class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): @@ -98,7 +94,7 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): @overload def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... @overload - def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] # def close(self) -> None: ... @@ -139,9 +135,9 @@ def loadtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[np.float64]: ... @overload -def loadtxt( +def loadtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str | Sequence[str] | None = "#", delimiter: str | None = None, converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, @@ -154,7 +150,7 @@ def loadtxt( *, quotechar: str | None = None, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def loadtxt( fname: _FName, @@ -186,12 +182,12 @@ def savetxt( ) -> None: ... @overload -def fromregex( +def fromregex[ScalarT: np.generic]( file: _FNameRead, regexp: str | bytes | Pattern[Any], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], encoding: str | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def fromregex( file: _FNameRead, @@ -230,9 +226,9 @@ def genfromtxt( like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload -def genfromtxt( +def genfromtxt[ScalarT: np.generic]( fname: _FName, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], comments: str = "#", delimiter: str | int | Iterable[int] | None = None, skip_header: int = 0, @@ -257,7 +253,7 @@ def genfromtxt( *, ndmin: L[0, 1, 2] = 0, like: _SupportsArrayFunc | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def genfromtxt( fname: _FName, diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 9c02a7f867c5..4899b868071c 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,13 +1,4 @@ -from typing import ( - Any, - Literal as L, - NoReturn, - SupportsIndex, - SupportsInt, - TypeAlias, - TypeVar, - overload, -) +from typing import Any, Literal as L, NoReturn, SupportsIndex, SupportsInt, overload import numpy as np from numpy import ( @@ -33,16 +24,10 @@ from numpy._typing import ( _ArrayLikeUInt_co, ) -_T = TypeVar("_T") +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[float64], NDArray[int32], NDArray[float64], NDArray[float64]] -_2Tup: TypeAlias = tuple[_T, _T] -_5Tup: TypeAlias = tuple[ - _T, - NDArray[float64], - NDArray[int32], - NDArray[float64], - NDArray[float64], -] +### __all__ = [ "poly", diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index ec4cd2c595ac..8037a01ac998 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,14 +1,5 @@ from collections.abc import Callable, Sequence -from typing import ( - Any, - Concatenate, - ParamSpec, - Protocol, - SupportsIndex, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Concatenate, Protocol, SupportsIndex, overload, type_check_only from typing_extensions import deprecated import numpy as np @@ -16,7 +7,6 @@ from numpy import ( _CastingKind, complexfloating, floating, - generic, integer, object_, signedinteger, @@ -55,9 +45,6 @@ __all__ = [ "put_along_axis", ] -_P = ParamSpec("_P") -_ScalarT = TypeVar("_ScalarT", bound=generic) - # Signature of `__array_wrap__` @type_check_only class _ArrayWrap(Protocol): @@ -76,52 +63,46 @@ class _SupportsArrayWrap(Protocol): ### -def take_along_axis( - arr: _ScalarT | NDArray[_ScalarT], +def take_along_axis[ScalarT: np.generic]( + arr: ScalarT | NDArray[ScalarT], indices: NDArray[integer], axis: int | None = -1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... -def put_along_axis( - arr: NDArray[_ScalarT], +def put_along_axis[ScalarT: np.generic]( + arr: NDArray[ScalarT], indices: NDArray[integer], values: ArrayLike, axis: int | None, ) -> None: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], +def apply_along_axis[**Tss, ScalarT: np.generic]( + func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, -) -> NDArray[_ScalarT]: ... + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> NDArray[ScalarT]: ... @overload -def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], Any], +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[NDArray[Any], Tss], Any], axis: SupportsIndex, arr: ArrayLike, - *args: _P.args, - **kwargs: _P.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... -def apply_over_axes( - func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, axes: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: _ArrayLike[_ScalarT], - axis: _ShapeLike, -) -> NDArray[_ScalarT]: ... +def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def expand_dims( - a: ArrayLike, - axis: _ShapeLike, -) -> NDArray[Any]: ... +def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]: ... # Deprecated in NumPy 2.0, 2023-08-18 @deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") @@ -134,22 +115,22 @@ def row_stack( # keep in sync with `numpy.ma.extras.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... # keep in sync with `numpy.ma.extras.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... @overload -def array_split( - ary: _ArrayLike[_ScalarT], +def array_split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def array_split( ary: ArrayLike, @@ -158,11 +139,11 @@ def array_split( ) -> list[NDArray[Any]]: ... @overload -def split( - ary: _ArrayLike[_ScalarT], +def split[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, axis: SupportsIndex = 0, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def split( ary: ArrayLike, @@ -172,10 +153,10 @@ def split( # keep in sync with `numpy.ma.extras.hsplit` @overload -def hsplit( - ary: _ArrayLike[_ScalarT], +def hsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def hsplit( ary: ArrayLike, @@ -183,10 +164,10 @@ def hsplit( ) -> list[NDArray[Any]]: ... @overload -def vsplit( - ary: _ArrayLike[_ScalarT], +def vsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def vsplit( ary: ArrayLike, @@ -194,10 +175,10 @@ def vsplit( ) -> list[NDArray[Any]]: ... @overload -def dsplit( - ary: _ArrayLike[_ScalarT], +def dsplit[ScalarT: np.generic]( + ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, -) -> list[NDArray[_ScalarT]]: ... +) -> list[NDArray[ScalarT]]: ... @overload def dsplit( ary: ArrayLike, @@ -225,10 +206,10 @@ def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... @overload -def tile( - A: _ArrayLike[_ScalarT], +def tile[ScalarT: np.generic]( + A: _ArrayLike[ScalarT], reps: int | Sequence[int], -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def tile( A: ArrayLike, diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 008f2d544414..77b9d60b9d7f 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,13 +1,11 @@ from collections.abc import Iterable -from typing import Any, SupportsIndex, TypeVar, overload +from typing import Any, SupportsIndex, overload -from numpy import generic +import numpy as np from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike __all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] -_ScalarT = TypeVar("_ScalarT", bound=generic) - class DummyArray: __array_interface__: dict[str, Any] base: NDArray[Any] | None @@ -18,13 +16,13 @@ class DummyArray: ) -> None: ... @overload -def as_strided( - x: _ArrayLike[_ScalarT], +def as_strided[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], shape: Iterable[int] | None = None, strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def as_strided( x: ArrayLike, @@ -35,14 +33,14 @@ def as_strided( ) -> NDArray[Any]: ... @overload -def sliding_window_view( - x: _ArrayLike[_ScalarT], +def sliding_window_view[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], axis: SupportsIndex | None = None, *, subok: bool = False, writeable: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def sliding_window_view( x: ArrayLike, @@ -54,11 +52,11 @@ def sliding_window_view( ) -> NDArray[Any]: ... @overload -def broadcast_to( - array: _ArrayLike[_ScalarT], +def broadcast_to[ScalarT: np.generic]( + array: _ArrayLike[ScalarT], shape: int | Iterable[int], subok: bool = False, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def broadcast_to( array: ArrayLike, diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 58582119429a..63f5f4cdc9c0 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,15 +1,6 @@ from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import ( - Any, - Literal as L, - Never, - Protocol, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Any, Literal as L, Never, Protocol, overload, type_check_only import numpy as np from numpy import _OrderCF @@ -45,37 +36,28 @@ __all__ = [ ### -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) -_InexactT = TypeVar("_InexactT", bound=np.inexact) -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) -_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) - -_Int_co: TypeAlias = np.integer | np.bool -_Float_co: TypeAlias = np.floating | _Int_co -_Number_co: TypeAlias = np.number | np.bool +type _Int_co = np.integer | np.bool +type _Float_co = np.floating | _Int_co +type _Number_co = np.number | np.bool -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] # Workaround for mypy's and pyright's lack of compliance with the typing spec for # overloads for gradual types. This works because only `Any` and `Never` are assignable # to `Never`. -_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] -_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] -_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] -_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] -_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[np.dtype[ScalarT]] | Sequence[ScalarT] +type _ArrayLike1DInt_co = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +type _ArrayLike1DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +type _ArrayLike2DFloat_co = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] +type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] -_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] -_Histogram2D: TypeAlias = tuple[_Array1D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] +type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array1D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] @type_check_only class _HasShapeAndNDim(Protocol): @@ -88,17 +70,17 @@ class _HasShapeAndNDim(Protocol): # keep in sync with `flipud` @overload -def fliplr(m: _ArrayT) -> _ArrayT: ... +def fliplr[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def fliplr[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def fliplr(m: ArrayLike) -> NDArray[Any]: ... # keep in sync with `fliplr` @overload -def flipud(m: _ArrayT) -> _ArrayT: ... +def flipud[ArrayT: np.ndarray](m: ArrayT) -> ArrayT: ... @overload -def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +def flipud[ScalarT: np.generic](m: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... @overload def flipud(m: ArrayLike) -> NDArray[Any]: ... @@ -115,27 +97,27 @@ def eye( like: _SupportsArrayFunc | None = None, ) -> _Array2D[np.float64]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", *, device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def eye( +def eye[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderCF = "C", device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def eye( N: int, @@ -150,23 +132,23 @@ def eye( # @overload -def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> NDArray[ScalarT]: ... @overload -def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Array1D[ScalarT]: ... @overload -def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... @overload def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... @overload -def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.diagflat` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _Array2D[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... @@ -181,23 +163,23 @@ def tri( like: _SupportsArrayFunc | None = None ) -> _Array2D[np.float64]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None, k: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload -def tri( +def tri[ScalarT: np.generic]( N: int, M: int | None = None, k: int = 0, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], like: _SupportsArrayFunc | None = None -) -> _Array2D[_ScalarT]: ... +) -> _Array2D[ScalarT]: ... @overload def tri( N: int, @@ -210,23 +192,23 @@ def tri( # keep in sync with `triu` @overload -def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def tril[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def tril[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # keep in sync with `tril` @overload -def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... +def triu[ArrayT: np.ndarray](m: ArrayT, k: int = 0) -> ArrayT: ... @overload -def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[ScalarT]: ... @overload def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap @overload -def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... @overload @@ -238,41 +220,41 @@ def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = # @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT], - y: _ArrayLike1D[_ComplexT | _Float_co], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Float_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_ComplexT | _Float_co], - y: _ArrayLike1D[_ComplexT], +def histogram2d[ScalarT: np.complexfloating]( + x: _ArrayLike1D[ScalarT | _Float_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_ComplexT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT | _Int_co], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT | _Int_co], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT | _Int_co], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT | _Int_co], + y: _ArrayLike1D[ScalarT], bins: int | Sequence[int] = 10, range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT]: ... +) -> _Histogram2D[ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -292,41 +274,41 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.complex128 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DNumber_co, y: _ArrayLike1DNumber_co, - bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + bins: _ArrayLike1D[ScalarT] | Sequence[_ArrayLike1D[ScalarT]], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_NumberCoT]: ... +) -> _Histogram2D[ScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], +def histogram2d[ScalarT: np.inexact, BinsScalarT: _Number_co]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], + bins: Sequence[_ArrayLike1D[BinsScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | _NumberCoT]: ... +) -> _Histogram2D[ScalarT | BinsScalarT]: ... @overload -def histogram2d( - x: _ArrayLike1D[_InexactT], - y: _ArrayLike1D[_InexactT], +def histogram2d[ScalarT: np.inexact]( + x: _ArrayLike1D[ScalarT], + y: _ArrayLike1D[ScalarT], bins: Sequence[_ArrayLike1DNumber_co | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[_InexactT | Any]: ... +) -> _Histogram2D[ScalarT | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: _ArrayLike1DInt_co | Sequence[float], y: _ArrayLike1DInt_co | Sequence[float], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.float64 | _NumberCoT]: ... +) -> _Histogram2D[np.float64 | ScalarT]: ... @overload def histogram2d( x: _ArrayLike1DInt_co | Sequence[float], @@ -337,14 +319,14 @@ def histogram2d( weights: _ArrayLike1DFloat_co | None = None, ) -> _Histogram2D[np.float64 | Any]: ... @overload -def histogram2d( +def histogram2d[ScalarT: _Number_co]( x: Sequence[complex], y: Sequence[complex], - bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + bins: Sequence[_ArrayLike1D[ScalarT] | int], range: _ArrayLike2DFloat_co | None = None, density: bool | None = None, weights: _ArrayLike1DFloat_co | None = None, -) -> _Histogram2D[np.complex128 | _NumberCoT]: ... +) -> _Histogram2D[np.complex128 | ScalarT]: ... @overload def histogram2d( x: Sequence[complex], @@ -397,7 +379,7 @@ def histogram2d( @overload def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... @overload -def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... +def mask_indices[T](n: int, mask_func: _MaskFunc[T], k: T) -> _Indices2D: ... # def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index dec5fc1dfaa4..b75f010d3550 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,7 +1,6 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable -from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only -from typing_extensions import TypeVar +from typing import Any, Literal as L, Protocol, overload, type_check_only import numpy as np from numpy._typing import ( @@ -30,33 +29,28 @@ __all__ = [ "typename", ] -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) -_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) - -_FloatMax32: TypeAlias = np.float32 | np.float16 -_ComplexMax128: TypeAlias = np.complex128 | np.complex64 -_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer -_Real: TypeAlias = np.floating | np.integer -_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 -_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer +type _FloatMax32 = np.float32 | np.float16 +type _ComplexMax128 = np.complex128 | np.complex64 +type _RealMax64 = np.float64 | np.float32 | np.float16 | np.integer +type _Real = np.floating | np.integer +type _ToReal = _Real | np.bool +type _InexactMax32 = np.inexact[_32Bit] | np.float16 +type _NumberMax64 = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer @type_check_only -class _HasReal(Protocol[_T_co]): +class _HasReal[T](Protocol): @property - def real(self, /) -> _T_co: ... + def real(self, /) -> T: ... @type_check_only -class _HasImag(Protocol[_T_co]): +class _HasImag[T](Protocol): @property - def imag(self, /) -> _T_co: ... + def imag(self, /) -> T: ... @type_check_only -class _HasDType(Protocol[_ScalarT_co]): +class _HasDType[ScalarT: np.generic](Protocol): @property - def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + def dtype(self, /) -> np.dtype[ScalarT]: ... ### @@ -64,17 +58,17 @@ def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[s # @overload -def real(val: _HasReal[_T]) -> _T: ... +def real[T](val: _HasReal[T]) -> T: ... @overload -def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def real[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... # @overload -def imag(val: _HasImag[_T]) -> _T: ... +def imag[T](val: _HasImag[T]) -> T: ... @overload -def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +def imag[RealT: _ToReal](val: _ArrayLike[RealT]) -> NDArray[RealT]: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... @@ -100,29 +94,29 @@ def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... # @overload -def nan_to_num( - x: _ScalarT, +def nan_to_num[ScalarT: np.generic]( + x: ScalarT, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def nan_to_num( - x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: NDArray[ScalarT] | _NestedSequence[_ArrayLike[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload -def nan_to_num( - x: _SupportsArray[np.dtype[_ScalarT]], +def nan_to_num[ScalarT: np.generic]( + x: _SupportsArray[np.dtype[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> _ScalarT | NDArray[_ScalarT]: ... +) -> ScalarT | NDArray[ScalarT]: ... @overload def nan_to_num( x: _NestedSequence[ArrayLike], @@ -143,12 +137,13 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... @overload def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index c16ce811e28a..d48557a7b5d7 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,8 +1,7 @@ -from typing import Any, TypeVar, overload +from typing import overload from typing_extensions import deprecated import numpy as np -from numpy import floating, object_ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, @@ -12,49 +11,31 @@ from numpy._typing import ( __all__ = ["fix", "isneginf", "isposinf"] -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) - @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _FloatLike_co, out: None = None) -> floating: ... +def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload @deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) -def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... +def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... +# @overload -def isposinf( - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isposinf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isposinf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isposinf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isposinf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... +# @overload -def isneginf( - x: _FloatLike_co, - out: None = None, -) -> np.bool: ... +def isneginf(x: _FloatLike_co, out: None = None) -> np.bool: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: None = None, -) -> NDArray[np.bool]: ... +def isneginf(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.bool]: ... @overload -def isneginf( - x: _ArrayLikeFloat_co, - out: _ArrayT, -) -> _ArrayT: ... +def isneginf[ArrayT: np.ndarray](x: _ArrayLikeFloat_co, out: ArrayT) -> ArrayT: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index f2e34eacc5a6..661dc97f224c 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from types import EllipsisType -from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload -from typing_extensions import TypeVar, deprecated, override +from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing_extensions import TypeVar, deprecated import numpy as np import numpy.typing as npt @@ -11,30 +11,25 @@ from numpy._typing import ( _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike, + _Shape, ) ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) # type: ignore[deprecated] -_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) # type: ignore[deprecated] -_RealContainerT = TypeVar( - "_RealContainerT", - bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], # type: ignore[deprecated] -) -_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) # type: ignore[deprecated] +type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] +type _BoolContainer = container[Any, np.dtype[np.bool]] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] -_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None -_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] -_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice -_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] +type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None +type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] +type _ToIndex = SupportsIndex | _ToIndexSlice +type _ToIndices = _ToIndex | tuple[_ToIndex, ...] ### # pyright: reportDeprecated = false @@ -52,19 +47,19 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = True, ) -> None: ... @overload - def __init__( - self: container[Any, np.dtype[_ScalarT]], + def __init__[ScalarT: np.generic]( + self: container[Any, np.dtype[ScalarT]], /, data: npt.ArrayLike, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = True, ) -> None: ... @overload @@ -112,20 +107,28 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with np.ndarray @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex64]], / + ) -> container[ShapeT, np.dtype[np.float32]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex128]], / + ) -> container[ShapeT, np.dtype[np.float64]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex192]], / + ) -> container[ShapeT, np.dtype[np.float96]]: ... @overload - def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + def __abs__[ShapeT: _Shape]( + self: container[ShapeT, np.dtype[np.complex256]], / + ) -> container[ShapeT, np.dtype[np.float128]]: ... @overload - def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... # - def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 - def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 # TODO(jorenham): complete these binary ops @@ -170,40 +173,34 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __and__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __and__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rand__ = __and__ @overload - def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __iand__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __xor__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __xor__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rxor__ = __xor__ @overload - def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ixor__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __or__( - self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / - ) -> container[_AnyShape, np.dtype[np.bool]]: ... + def __or__(self: _BoolContainer, other: _ArrayLikeBool_co, /) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __ror__ = __or__ @overload - def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + def __ior__[ContainerT: _BoolContainer](self: ContainerT, other: _ArrayLikeBool_co, /) -> ContainerT: ... @overload def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... @@ -211,16 +208,18 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + def __array__[DTypeT: np.dtype](self, /, t: DTypeT) -> np.ndarray[_ShapeT_co, DTypeT]: ... # @overload def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... @overload - def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( + self, a: np.ndarray[ShapeT, DTypeT], c: Any = ..., s: Any = ..., / + ) -> container[ShapeT, DTypeT]: ... # def copy(self, /) -> Self: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... - def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... + def astype[ScalarT: np.generic](self, /, typecode: _DTypeLike[ScalarT]) -> container[_ShapeT_co, np.dtype[ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index e73ba659a31c..87fbc3aa5c4c 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,22 +1,18 @@ from _typeshed import SupportsWrite from typing import LiteralString -from typing_extensions import TypeVar import numpy as np __all__ = ["get_include", "info", "show_runtime"] -_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) - -### - def get_include() -> LiteralString: ... def show_runtime() -> None: ... def info( object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" ) -> None: ... -def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... +def drop_metadata[DTypeT: np.dtype](dtype: DTypeT, /) -> DTypeT: ... # used internally by `lib._function_base_impl._median` -def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... +def _median_nancheck[ScalarOrArrayT: np.generic | np.ndarray]( + data: np.ndarray, result: ScalarOrArrayT, axis: int +) -> ScalarOrArrayT: ... diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi index 33713cf16331..3ba63bdb91dd 100644 --- a/numpy/lib/recfunctions.pyi +++ b/numpy/lib/recfunctions.pyi @@ -1,11 +1,11 @@ from _typeshed import Incomplete from collections.abc import Callable, Iterable, Mapping, Sequence -from typing import Any, Literal, TypeAlias, overload -from typing_extensions import TypeVar +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy import _CastingKind +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid, _Shape from numpy.ma.mrecords import MaskedRecords __all__ = [ @@ -32,26 +32,18 @@ __all__ = [ "unstructured_to_structured", ] -_T = TypeVar("_T") -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) -_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) -_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) +type _OneOrMany[T] = T | Iterable[T] +type _BuiltinSequence[T] = tuple[T, ...] | list[T] -_OneOrMany: TypeAlias = _T | Iterable[_T] -_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] +type _NestedNames = tuple[str | _NestedNames, ...] +type _NonVoid = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +type _NonVoidDType = np.dtype[_NonVoid] | np.dtypes.StringDType -_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] -_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ -_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType - -_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] +type _JoinType = Literal["inner", "outer", "leftouter"] ### -def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... +def recursive_fill_fields[VoidArrayT: npt.NDArray[np.void]](input: npt.NDArray[np.void], output: VoidArrayT) -> VoidArrayT: ... # def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... @@ -59,7 +51,7 @@ def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... # @overload -def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +def flatten_descr[NonVoidDTypeT: _NonVoidDType](ndtype: NonVoidDTypeT) -> tuple[tuple[Literal[""], NonVoidDTypeT]]: ... @overload def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... @@ -72,13 +64,13 @@ def get_fieldstructure( # @overload -def merge_arrays( - seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], +def merge_arrays[ShapeT: _Shape]( + seqarrays: Sequence[np.ndarray[ShapeT, np.dtype]] | np.ndarray[ShapeT, np.dtype], fill_value: float = -1, flatten: bool = False, usemask: bool = False, asrecarray: bool = False, -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload def merge_arrays( seqarrays: Sequence[npt.ArrayLike] | np.void, @@ -90,64 +82,64 @@ def merge_arrays( # @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], usemask: bool = True, *, asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # @overload -def rename_fields( - base: MaskedRecords[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: MaskedRecords[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.recarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.recarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def rename_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rename_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], namemapper: Mapping[str, str], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -155,20 +147,20 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[False] = False, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -176,30 +168,30 @@ def append_fields( *, usemask: Literal[False], asrecarray: Literal[True], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, fill_value: int = -1, usemask: Literal[True] = True, asrecarray: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None, fill_value: int, usemask: Literal[True], asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... @overload -def append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, @@ -207,46 +199,46 @@ def append_fields( usemask: Literal[True] = True, *, asrecarray: Literal[True], -) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +) -> MaskedRecords[ShapeT, np.dtype[np.void]]: ... # -def rec_drop_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_drop_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], drop_names: str | Iterable[str], -) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... # -def rec_append_fields( - base: np.ndarray[_ShapeT, np.dtype[np.void]], +def rec_append_fields[ShapeT: _Shape]( + base: np.ndarray[ShapeT, np.dtype[np.void]], names: _OneOrMany[str], data: _OneOrMany[npt.NDArray[Any]], dtypes: _BuiltinSequence[np.dtype] | None = None, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, # e.g. using a `TypeVar` with constraints. # https://github.com/numpy/numtype/issues/92 @overload -def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +def repack_fields[DTypeT: np.dtype](a: DTypeT, align: bool = False, recurse: bool = False) -> DTypeT: ... @overload -def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +def repack_fields[ScalarT: np.generic](a: ScalarT, align: bool = False, recurse: bool = False) -> ScalarT: ... @overload -def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... +def repack_fields[ArrayT: np.ndarray](a: ArrayT, align: bool = False, recurse: bool = False) -> ArrayT: ... # TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) @overload -def structured_to_unstructured( +def structured_to_unstructured[ScalarT: np.generic]( arr: npt.NDArray[np.void], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, - casting: np._CastingKind = "unsafe", -) -> npt.NDArray[_ScalarT]: ... + casting: _CastingKind = "unsafe", +) -> npt.NDArray[ScalarT]: ... @overload def structured_to_unstructured( arr: npt.NDArray[np.void], dtype: npt.DTypeLike | None = None, copy: bool = False, - casting: np._CastingKind = "unsafe", + casting: _CastingKind = "unsafe", ) -> npt.NDArray[Any]: ... # @@ -280,29 +272,29 @@ def unstructured_to_structured( ) -> npt.NDArray[np.void]: ... # -def apply_along_fields( - func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], - arr: np.ndarray[_ShapeT, np.dtype[np.void]], -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +def apply_along_fields[ShapeT: _Shape]( + func: Callable[[np.ndarray[ShapeT]], np.ndarray], + arr: np.ndarray[ShapeT, np.dtype[np.void]], +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... # -def require_fields( - array: np.ndarray[_ShapeT, np.dtype[np.void]], +def require_fields[ShapeT: _Shape]( + array: np.ndarray[ShapeT, np.dtype[np.void]], required_dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... # TODO(jorenham): Attempt shape-typing @overload -def stack_arrays( - arrays: _ArrayT, +def stack_arrays[ArrayT: np.ndarray]( + arrays: ArrayT, defaults: Mapping[str, object] | None = None, usemask: bool = True, asrecarray: bool = False, autoconvert: bool = False, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -357,27 +349,27 @@ def stack_arrays( # @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, return_index: Literal[False] = False, -) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[ShapeT, np.dtype[np.void]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None, ignoremask: bool, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... @overload -def find_duplicates( - a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], +def find_duplicates[ShapeT: _Shape]( + a: np.ma.MaskedArray[ShapeT, np.dtype[np.void]], key: str | None = None, ignoremask: bool = True, *, return_index: Literal[True], -) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +) -> tuple[np.ma.MaskedArray[ShapeT, np.dtype[np.void]], np.ndarray[ShapeT, np.dtype[np.int_]]]: ... # @overload diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index b2b8ada44419..6cbd3b43a07a 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -6,8 +6,6 @@ from typing import ( Never, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, ) @@ -79,10 +77,7 @@ __all__ = [ "vecdot", ] -_NumberT = TypeVar("_NumberT", bound=np.number) -_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) - -_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +type _ModeKind = L["reduced", "complete", "r", "raw"] ### @@ -102,7 +97,7 @@ class QRResult(NamedTuple): class SlogdetResult(NamedTuple): # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensionl arrays otherwise + # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any @@ -188,7 +183,7 @@ def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload -def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +def outer[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -351,11 +346,11 @@ def pinv( ) -> NDArray[complexfloating]: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def det(a: _ArrayLikeComplex_co) -> Any: ... @overload @@ -441,13 +436,13 @@ def vector_norm( # keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) @overload -def tensordot( - a: _ArrayLike[_NumericScalarT], - b: _ArrayLike[_NumericScalarT], +def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[_NumericScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def tensordot( a: _ArrayLikeBool_co, @@ -537,7 +532,7 @@ def cross( ) -> NDArray[complexfloating]: ... @overload -def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 67d6b3591a4b..a5e8e41b7709 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -4,7 +4,7 @@ import datetime as dt import types from _typeshed import Incomplete -from collections.abc import Callable, Sequence +from collections.abc import Buffer, Callable, Sequence from typing import ( Any, Concatenate, @@ -18,16 +18,15 @@ from typing import ( SupportsFloat, SupportsIndex, SupportsInt, - TypeAlias, Unpack, final, overload, + override, ) -from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( - _AnyShapeT, _HasDType, _HasDTypeWithRealAndImag, _ModeKind, @@ -41,16 +40,11 @@ from numpy import ( amin, bool_, bytes_, - character, complex128, complexfloating, datetime64, dtype, - dtypes, expand_dims, - flexible, - float16, - float32, float64, floating, generic, @@ -66,9 +60,7 @@ from numpy import ( signedinteger, str_, timedelta64, - ufunc, unsignedinteger, - void, ) from numpy._core.fromnumeric import _UFuncKwargs # type-check only from numpy._globals import _NoValueType @@ -292,59 +284,44 @@ __all__ = [ "zeros_like", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) -_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) -_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) -_NumberT = TypeVar("_NumberT", bound=number) -_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) -_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) -_UFuncT_co = TypeVar( - "_UFuncT_co", - # the `| Callable` simplifies self-binding to the ufunc's callable signature - bound=np.ufunc | Callable[..., object], - default=np.ufunc, - covariant=True, -) -_Pss = ParamSpec("_Pss") -_T = TypeVar("_T") +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +# the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature +_UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) + +type _RealNumber = np.floating | np.integer -_Ignored: TypeAlias = object +type _Ignored = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` -_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] -_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] - -_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] -_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] -_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] -_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] -_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] -_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] -_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] -_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] - -_ArrayInt_co: TypeAlias = NDArray[integer | bool_] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] - -_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co -_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None -_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 - -_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` -_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] - -_FillValue: TypeAlias = complex | None # int | float | complex | None -_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] -_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] +type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] + +type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] +type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] +type _MaskedArrayFloat64_co = _MaskedArray[np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool] +type _MaskedArrayFloat_co = _MaskedArray[np.floating | np.integer | np.bool] +type _MaskedArrayComplex128_co = _MaskedArray[np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool] +type _MaskedArrayComplex_co = _MaskedArray[np.inexact | np.integer | np.bool] +type _MaskedArrayNumber_co = _MaskedArray[np.number | np.bool] +type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] + +type _ArrayInt_co = NDArray[np.integer | np.bool] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + +type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co +type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | np.character | np.number | np.timedelta64 | np.bool | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | np.character | np.number | np.datetime64 | np.bool | None +type _ArangeScalar = _RealNumber | np.datetime64 | np.timedelta64 + +type _NoMaskType = np.bool_[Literal[False]] # type of `np.False_` +type _MaskArray[ShapeT: _Shape] = np.ndarray[ShapeT, np.dtype[np.bool]] + +type _FillValue = complex | None # int | float | complex | None +type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] +type _DomainCallable = Callable[..., NDArray[np.bool]] ### @@ -374,13 +351,13 @@ class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # https://github.com/microsoft/pyright/issues/10849 # https://github.com/microsoft/pyright/issues/10899 # https://github.com/microsoft/pyright/issues/11049 - def __call__( - self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, Tss], T]], /, a: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -390,14 +367,14 @@ class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # NOTE: We cannot meaningfully annotate the return (d)types of these methods until # the signatures of the corresponding `numpy.ufunc` methods are specified. @@ -421,14 +398,14 @@ class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): ) -> None: ... # NOTE: See the comment in `_MaskedUnaryOperation.__call__` - def __call__( - self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + def __call__[**Tss, T]( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, Tss], T]], /, a: ArrayLike, b: ArrayLike, - *args: _Pss.args, - **kwargs: _Pss.kwargs, - ) -> _T: ... + *args: Tss.args, + **kwargs: Tss.kwargs, + ) -> T: ... # not generic at runtime class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): @@ -446,7 +423,7 @@ class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): # NOTE: This class is only used internally for `maximum` and `minimum`, so we are # able to annotate the `__call__` method specifically for those two functions. @overload - def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + def __call__[ScalarT: np.generic](self, /, a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... @@ -530,7 +507,7 @@ def maximum_fill_value(obj: object) -> Any: ... # @overload # returns `a.fill_value` if `a` is a `MaskedArray` -def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +def get_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT]) -> ScalarT: ... @overload # otherwise returns `default_fill_value(a)` def get_fill_value(a: object) -> Any: ... @@ -540,33 +517,36 @@ def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... # the return type depends on the *values* of `a` and `b` (which cannot be known # statically), which is why we need to return an awkward `_ | None` @overload -def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... +def common_fill_value[ScalarT: np.generic](a: _MaskedArray[ScalarT], b: MaskedArray) -> ScalarT | None: ... @overload def common_fill_value(a: object, b: object) -> Any: ... # keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` @overload -def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... +def filled[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + fill_value: _ScalarLike_co | None = None, +) -> ndarray[ShapeT, DTypeT]: ... @overload -def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +def filled[ScalarT: np.generic](a: _ArrayLike[ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[ScalarT]: ... @overload def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... # keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` @overload -def fix_invalid( - a: np.ndarray[_ShapeT, _DTypeT], +def fix_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def fix_invalid( - a: _ArrayLike[_ScalarT], +def fix_invalid[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, copy: bool = True, fill_value: _ScalarLike_co | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def fix_invalid( a: ArrayLike, @@ -580,9 +560,12 @@ def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... # @overload -def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +def getdata[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + subok: bool = True, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload -def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +def getdata[ScalarT: np.generic](a: _ArrayLike[ScalarT], subok: bool = True) -> NDArray[ScalarT]: ... @overload def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... @@ -592,9 +575,9 @@ get_data = getdata @overload def getmask(a: _ScalarLike_co) -> _NoMaskType: ... @overload -def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +def getmask[ShapeT: _Shape](a: MaskedArray[ShapeT, Any]) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload -def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... +def getmask(a: ArrayLike) -> _MaskArray[_AnyShape] | _NoMaskType: ... get_mask = getmask @@ -602,7 +585,7 @@ get_mask = getmask @overload def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... @overload -def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... +def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, # which isn't necessarily a ndarray. Please open an issue if this causes issues. @@ -623,35 +606,35 @@ def make_mask( dtype: _DTypeLikeBool = ..., ) -> _NoMaskType: ... @overload # m: ndarray, shrink=True (default), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # m: ndarray, dtype: void-like -def make_mask( - m: np.ndarray[_ShapeT], +def make_mask[ShapeT: _Shape]( + m: np.ndarray[ShapeT], copy: bool = False, shrink: bool = True, *, dtype: _DTypeLikeVoid, -) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +) -> np.ndarray[ShapeT, np.dtype[np.void]]: ... @overload # m: array-like, shrink=True (default), dtype: bool-like (default) def make_mask( m: ArrayLike, copy: bool = False, shrink: Literal[True] = True, dtype: _DTypeLikeBool = ..., -) -> _MaskArray | _NoMaskType: ... +) -> _MaskArray[_AnyShape] | _NoMaskType: ... @overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) def make_mask( m: ArrayLike, @@ -659,7 +642,7 @@ def make_mask( *, shrink: Literal[False], dtype: _DTypeLikeBool = ..., -) -> _MaskArray: ... +) -> _MaskArray[_AnyShape]: ... @overload # m: array-like, dtype: void-like def make_mask( m: ArrayLike, @@ -679,11 +662,11 @@ def make_mask( # @overload # known shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[ShapeT]: ... @overload # known shape, dtype: structured -def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +def make_mask_none[ShapeT: _Shape](newshape: ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[ShapeT, dtype[np.void]]: ... @overload # unknown shape, dtype: unstructured (default) -def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray[_AnyShape]: ... @overload # unknown shape, dtype: structured def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... @@ -719,141 +702,206 @@ def mask_or( shrink: Literal[False], ) -> _MaskArray[tuple[()]]: ... @overload # ndarray, ndarray | nomask, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray, ndarray | nomask, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... @overload # ndarray | nomask, ndarray, shrink=True (default) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, shrink: Literal[True] = True, -) -> _MaskArray[_ShapeT] | _NoMaskType: ... +) -> _MaskArray[ShapeT] | _NoMaskType: ... @overload # ndarray | nomask, ndarray, shrink=False (kwarg) -def mask_or( - m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], - m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], +def mask_or[ShapeT: _Shape, ScalarT: np.generic]( + m1: np.ndarray[ShapeT, np.dtype[ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[ShapeT, np.dtype[ScalarT]], copy: bool = False, *, shrink: Literal[False], -) -> _MaskArray[_ShapeT]: ... +) -> _MaskArray[ShapeT]: ... # @overload -def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +def flatten_mask[ShapeT: _Shape](mask: np.ndarray[ShapeT]) -> _MaskArray[ShapeT]: ... @overload -def flatten_mask(mask: ArrayLike) -> _MaskArray: ... +def flatten_mask(mask: ArrayLike) -> _MaskArray[_AnyShape]: ... # NOTE: we currently don't know the field types of `void` dtypes, so it's not possible # to know the output dtype of the returned array. @overload -def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: MaskedArray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT]: ... @overload -def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +def flatten_structured_array[ShapeT: _Shape](a: np.ndarray[ShapeT, np.dtype[np.void]]) -> np.ndarray[ShapeT]: ... @overload # for some reason this accepts unstructured array-likes, hence this fallback overload def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_invalid[ShapeT: _Shape, DTypeT: np.dtype]( + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_invalid[ScalarT: np.generic](a: _ArrayLike[ScalarT], copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # array-like of known scalar-type -def masked_where( - condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_where[ShapeT: _Shape, DTypeT: np.dtype]( + condition: _ArrayLikeBool_co, + a: ndarray[ShapeT, DTypeT], + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_where[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_greater_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_greater_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_less_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_less_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_not_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_not_equal[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + value: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_equal[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + value: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_equal[ScalarT: np.generic](x: _ArrayLike[ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_inside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_inside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # keep in sync with other the `masked_*` functions @overload # known array with known shape and dtype -def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +def masked_outside[ShapeT: _Shape, DTypeT: np.dtype]( + x: ndarray[ShapeT, DTypeT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload # array-like of known scalar-type -def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +def masked_outside[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + v1: ArrayLike, + v2: ArrayLike, + copy: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload # unknown array-like def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... # only intended for object arrays, so we assume that's how it's always used in practice @overload -def masked_object( - x: np.ndarray[_ShapeT, np.dtype[np.object_]], +def masked_object[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.object_]], value: object, copy: bool = True, shrink: bool = True, -) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.object_]]: ... @overload def masked_object( x: _ArrayLikeObject_co, @@ -864,23 +912,23 @@ def masked_object( # keep roughly in sync with `filled` @overload -def masked_values( - x: np.ndarray[_ShapeT, _DTypeT], +def masked_values[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> MaskedArray[_ShapeT, _DTypeT]: ... + shrink: bool = True, +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def masked_values( - x: _ArrayLike[_ScalarT], +def masked_values[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], value: _ScalarLike_co, rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True -) -> _MaskedArray[_ScalarT]: ... + shrink: bool = True, +) -> _MaskedArray[ScalarT]: ... @overload def masked_values( x: ArrayLike, @@ -888,7 +936,7 @@ def masked_values( rtol: float = 1e-5, atol: float = 1e-8, copy: bool = True, - shrink: bool = True + shrink: bool = True, ) -> _MaskedArray[Incomplete]: ... # TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an @@ -914,7 +962,7 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): # Similar to `ndarray.__setitem__` but without the `void` case. @overload # flexible | object_ | bool def __setitem__( - self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: MaskedIterator[Any, dtype[np.flexible | object_ | np.bool] | np.dtypes.StringDType], index: _ToIndices, value: object, /, @@ -958,15 +1006,15 @@ class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. - def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... + def __next__[ScalarT: np.generic](self: MaskedIterator[Any, np.dtype[ScalarT]]) -> ScalarT: ... class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: Final[Literal[15]] = 15 @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, - data: _ArrayLike[_ScalarT], + data: _ArrayLike[ScalarT], mask: _ArrayLikeBool_co = nomask, dtype: None = None, copy: bool = False, @@ -977,13 +1025,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -992,14 +1040,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def __new__( + def __new__[ScalarT: np.generic]( cls, data: object, mask: _ArrayLikeBool_co = nomask, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, subok: bool = True, ndmin: int = 0, @@ -1008,7 +1056,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): hard_mask: bool | None = None, shrink: bool = True, order: _OrderKACF | None = None, - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload def __new__( cls, @@ -1025,44 +1073,56 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: _OrderKACF | None = None, ) -> _MaskedArray[Any]: ... - def __array_wrap__( + def __array_wrap__[ShapeT: _Shape, DTypeT: np.dtype]( self, - obj: ndarray[_ShapeT, _DTypeT], - context: tuple[ufunc, tuple[Any, ...], int] | None = None, + obj: ndarray[ShapeT, DTypeT], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, return_scalar: bool = False, - ) -> MaskedArray[_ShapeT, _DTypeT]: ... + ) -> MaskedArray[ShapeT, DTypeT]: ... @overload # type: ignore[override] # () def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... @overload # (dtype: DTypeT) - def view( + def view[DTypeT: np.dtype]( self, /, - dtype: _DTypeT | _HasDType[_DTypeT], + dtype: DTypeT | _HasDType[DTypeT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[ScalarT]) - def view( + def view[ScalarT: np.generic]( self, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload # ([dtype: _, ]*, type: ArrayT) - def view( + def view[ArrayT: np.ndarray]( self, /, dtype: DTypeLike | None = None, *, - type: type[_ArrayT], - fill_value: _ScalarLike_co | None = None - ) -> _ArrayT: ... + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: _, type: ArrayT) - def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: DTypeLike | None, + type: type[ArrayT], + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ArrayT, /) - def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + def view[ArrayT: np.ndarray]( + self, + /, + dtype: type[ArrayT], + type: None = None, + fill_value: _ScalarLike_co | None = None, + ) -> ArrayT: ... @overload # (dtype: ?) def view( self, @@ -1071,8 +1131,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # overlaps with previous overloads. dtype: _VoidDTypeLike | str | None, type: None = None, - fill_value: _ScalarLike_co | None = None - ) -> MaskedArray[_ShapeT_co, dtype]: ... + fill_value: _ScalarLike_co | None = None, + ) -> MaskedArray[_ShapeT_co, np.dtype]: ... # Keep in sync with `ndarray.__getitem__` @overload @@ -1082,14 +1142,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... @overload - def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[np.void]]: ... @property def shape(self) -> _ShapeT_co: ... - @shape.setter - def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + @shape.setter # type: ignore[override] + def shape[ShapeT: _Shape](self: MaskedArray[ShapeT, Any], shape: ShapeT, /) -> None: ... def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property @@ -1123,11 +1183,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def flat(self, value: ArrayLike, /) -> None: ... @property - def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... @fill_value.setter def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... - def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def get_fill_value[ScalarT: np.generic](self: _MaskedArray[ScalarT]) -> ScalarT: ... def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @@ -1135,33 +1195,33 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep roughly in sync with `ma.core.compress`, but swap the first two arguments @overload # type: ignore[override] - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def compress( + def compress[ArrayT: np.ndarray]( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, *, - out: _ArrayT - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: None = None, - out: None = None + out: None = None, ) -> MaskedArray[tuple[int], _DTypeT_co]: ... @overload def compress( self, condition: _ArrayLikeBool_co, axis: _ShapeLike | None = None, - out: None = None + out: None = None, ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? @@ -1176,13 +1236,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__add__` @overload # type: ignore[override] - def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __add__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __add__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1213,10 +1277,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __add__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1224,13 +1288,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__radd__` @overload # type: ignore[override] # signature equivalent to __add__ - def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __radd__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __radd__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1261,10 +1329,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... @overload def __radd__( - self: MaskedArray[Any, dtypes.StringDType], + self: MaskedArray[Any, np.dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, /, - ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + ) -> MaskedArray[_AnyShape, np.dtypes.StringDType]: ... @overload def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... @overload @@ -1272,13 +1340,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__sub__` @overload # type: ignore[override] - def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __sub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __sub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1310,13 +1382,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rsub__` @overload # type: ignore[override] - def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rsub__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rsub__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1348,13 +1424,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__mul__` @overload # type: ignore[override] - def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __mul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __mul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1379,7 +1459,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __mul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1390,13 +1470,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rmul__` @overload # type: ignore[override] # signature equivalent to __mul__ - def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rmul__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... @overload - def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rmul__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1421,7 +1505,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __rmul__( - self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1494,13 +1578,21 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__floordiv__` @overload # type: ignore[override] - def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... + def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... + def __floordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1524,13 +1616,25 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rfloordiv__` @overload # type: ignore[override] - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[ScalarT], + other: _ArrayLikeBool_co, + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... + def __rfloordiv__[ScalarT: _RealNumber]( + self: _MaskedArray[np.bool], + other: _ArrayLike[ScalarT], + /, + ) -> _MaskedArray[ScalarT]: ... @overload def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1552,13 +1656,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __pow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1584,13 +1692,17 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) @overload # type: ignore[override] - def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__[ScalarT: np.number]( + self: _MaskedArray[ScalarT], + other: int | np.bool, + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... @overload - def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... + def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... @overload - def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... + def __rpow__[ScalarT: np.number](self: _MaskedArray[np.bool], other: _ArrayLike[ScalarT], /) -> _MaskedArray[ScalarT]: ... @overload def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... @overload @@ -1616,13 +1728,25 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @property # type: ignore[misc] - def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def imag[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_imag[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[object, ScalarT], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... # @property # type: ignore[misc] - def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] - def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + def real[ScalarT: np.generic]( # type: ignore[override] + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + def get_real[ScalarT: np.generic]( + self: _HasDTypeWithRealAndImag[ScalarT, object], + /, + ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -1639,7 +1763,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload # (None) def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... @overload # (empty_sequence) - def reshape( # mypy false positive + def reshape( self, shape: Sequence[Never], /, @@ -1648,14 +1772,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: bool | None = None, ) -> MaskedArray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d - def reshape( + def reshape[ShapeT: _Shape]( self, - shape: _AnyShapeT, + shape: ShapeT, /, *, order: _OrderACF = "C", copy: bool | None = None, - ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + ) -> MaskedArray[ShapeT, _DTypeT_co]: ... @overload # (index) def reshape( self, @@ -1753,20 +1877,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def all( + def all[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ma.core.any` @overload # type: ignore[override] @@ -1799,20 +1923,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> bool_ | _MaskedArray[bool_]: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def any( + def any[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.trace` and `ma.core.trace` @overload @@ -1825,30 +1949,30 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: None = None, ) -> Any: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... @overload - def trace( + def trace[ArrayT: np.ndarray]( self, # >= 2D MaskedArray offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, - ) -> _ArrayT: ... + out: ArrayT, + ) -> ArrayT: ... # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. @overload def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... @overload - def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + def dot[ArrayT: np.ndarray](self, b: ArrayLike, out: ArrayT, strict: bool = False) -> ArrayT: ... # Keep in sync with `ma.core.sum` @overload # type: ignore[override] @@ -1861,32 +1985,39 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def sum( + def sum[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` @overload # out: None (default) - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... @overload # out: ndarray - def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumsum[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... # Keep in sync with `ma.core.prod` @overload # type: ignore[override] @@ -1899,34 +2030,41 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def prod( + def prod[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... product = prod # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` @overload # out: None (default) - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... @overload # out: ndarray - def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + def cumprod[ArrayT: np.ndarray]( + self, + /, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... # Keep in sync with `ma.core.mean` @overload # type: ignore[override] @@ -1938,24 +2076,24 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def mean( + def mean[ArrayT: np.ndarray]( self, /, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep roughly in sync with `ma.core.anom` @overload @@ -1977,26 +2115,26 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def var( + def var[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `var` and `ma.core.std` @overload # type: ignore[override] @@ -2010,34 +2148,34 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Any: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def std( + def std[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in sync with `ndarray.round` @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @overload # out=ndarray - def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex, out: ArrayT) -> ArrayT: ... @overload - def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + def round[ArrayT: np.ndarray](self, /, decimals: SupportsIndex = 0, *, out: ArrayT) -> ArrayT: ... def argsort( # type: ignore[override] self, @@ -2070,23 +2208,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmin( + def argmin[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # Keep in-sync with np.ma.argmax @overload # type: ignore[override] @@ -2108,23 +2246,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def argmax( + def argmax[ArrayT: np.ndarray]( self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # def sort( # type: ignore[override] @@ -2140,13 +2278,13 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # @overload # type: ignore[override] - def min( - self: _MaskedArray[_ScalarT], + def min[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def min( self, @@ -2156,32 +2294,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def min( + def min[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload # type: ignore[override] - def max( - self: _MaskedArray[_ScalarT], + def max[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def max( self, @@ -2191,32 +2329,32 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ... ) -> Any: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def max( + def max[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload - def ptp( - self: _MaskedArray[_ScalarT], + def ptp[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] = False, - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload def ptp( self, @@ -2226,22 +2364,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool = False, ) -> Any: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def ptp( + def ptp[ArrayT: np.ndarray]( self, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool = False, - ) -> _ArrayT: ... + ) -> ArrayT: ... # @overload @@ -2285,38 +2423,38 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.take @overload # type: ignore[override] - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, mode: _ModeKind = "raise" - ) -> _ScalarT: ... + ) -> ScalarT: ... @overload - def take( - self: _MaskedArray[_ScalarT], + def take[ScalarT: np.generic]( + self: _MaskedArray[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", - ) -> _MaskedArray[_ScalarT]: ... + ) -> _MaskedArray[ScalarT]: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... @overload - def take( + def take[ArrayT: np.ndarray]( self, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", - ) -> _ArrayT: ... + ) -> ArrayT: ... # keep in sync with `ndarray.diagonal` @override @@ -2370,19 +2508,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep in sync with `ndarray.tolist` @override @overload - def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + def tolist[T](self: MaskedArray[tuple[Never], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... @overload - def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + def tolist[T](self: MaskedArray[tuple[()], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... @overload - def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[_T]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[T]]: ... @overload - def tolist( - self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None - ) -> list[list[list[_T]]]: ... + def tolist[T]( + self: MaskedArray[tuple[int, int, int], dtype[generic[T]]], + /, + fill_value: _ScalarLike_co | None = None, + ) -> list[list[list[T]]]: ... @overload def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... @@ -2398,7 +2540,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... @dtype.setter - def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + def dtype[DTypeT: np.dtype](self: MaskedArray[_AnyShape, DTypeT], dtype: DTypeT, /) -> None: ... class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( @@ -2447,12 +2589,12 @@ class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): masked: Final[MaskedConstant] = ... masked_singleton: Final[MaskedConstant] = ... -masked_array: TypeAlias = MaskedArray +type masked_array = MaskedArray # keep in sync with `MaskedArray.__new__` @overload -def array( - data: _ArrayLike[_ScalarT], +def array[ScalarT: np.generic]( + data: _ArrayLike[ScalarT], dtype: None = None, copy: bool = False, order: _OrderKACF | None = None, @@ -2463,11 +2605,11 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], copy: bool = False, order: _OrderKACF | None = None, mask: _ArrayLikeBool_co = nomask, @@ -2477,9 +2619,9 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def array( +def array[ScalarT: np.generic]( data: object, dtype: DTypeLike | None = None, copy: bool = False, @@ -2491,37 +2633,61 @@ def array( shrink: bool = True, subok: bool = True, ndmin: int = 0, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... # keep in sync with `array` @overload -def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # keep in sync with `asarray` (but note the additional first overload) @overload -def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... +def asanyarray[MArrayT: MaskedArray](a: MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> MArrayT: ... @overload -def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + dtype: None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: _DTypeLike[ScalarT], + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +def asanyarray[ScalarT: np.generic]( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF | None = None, +) -> _MaskedArray[ScalarT]: ... # def is_masked(x: object) -> bool: ... @overload -def min( - obj: _ArrayLike[_ScalarT], +def min[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def min( obj: ArrayLike, @@ -2531,31 +2697,31 @@ def min( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def min( +def min[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( - obj: _ArrayLike[_ScalarT], +def max[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def max( obj: ArrayLike, @@ -2565,31 +2731,31 @@ def max( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def max( +def max[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( - obj: _ArrayLike[_ScalarT], +def ptp[ScalarT: np.generic]( + obj: _ArrayLike[ScalarT], axis: None = None, out: None = None, fill_value: _ScalarLike_co | None = None, keepdims: Literal[False] | _NoValueType = ..., -) -> _ScalarT: ... +) -> ScalarT: ... @overload def ptp( obj: ArrayLike, @@ -2599,22 +2765,22 @@ def ptp( keepdims: bool | _NoValueType = ... ) -> Any: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def ptp( +def ptp[ArrayT: np.ndarray]( obj: ArrayLike, axis: _ShapeLike | None = None, *, - out: _ArrayT, + out: ArrayT, fill_value: _ScalarLike_co | None = None, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # we cannot meaningfully annotate `frommethod` further, because the callable signature # of the return type fully depends on the *value* of `methodname` and `reversed` in @@ -2625,44 +2791,47 @@ def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: # since their use-cases are specific to masks, they only accept `MaskedArray` inputs. # keep in sync with `MaskedArray.harden_mask` -def harden_mask(a: _MArrayT) -> _MArrayT: ... +def harden_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.soften_mask` -def soften_mask(a: _MArrayT) -> _MArrayT: ... +def soften_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.shrink_mask` -def shrink_mask(a: _MArrayT) -> _MArrayT: ... +def shrink_mask[MArrayT: MaskedArray](a: MArrayT) -> MArrayT: ... # keep in sync with `MaskedArray.ids` def ids(a: ArrayLike) -> tuple[int, int]: ... # keep in sync with `ndarray.nonzero` -def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... +def nonzero(a: ArrayLike) -> tuple[_Array1D[np.intp], ...]: ... # keep first overload in sync with `MaskedArray.ravel` @overload -def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +def ravel[DTypeT: np.dtype](a: np.ndarray[Any, DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], DTypeT]: ... @overload -def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def ravel[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _Masked1D[ScalarT]: ... @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... # keep roughly in sync with `lib._function_base_impl.copy` @overload -def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +def copy[MArrayT: MaskedArray](a: MArrayT, order: _OrderKACF = "C") -> MArrayT: ... @overload -def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +def copy[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + order: _OrderKACF = "C", +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +def copy[ScalarT: np.generic](a: _ArrayLike[ScalarT], order: _OrderKACF = "C") -> _MaskedArray[ScalarT]: ... @overload def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.diagonal` @overload -def diagonal( - a: _ArrayLike[_ScalarT], +def diagonal[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, -) -> NDArray[_ScalarT]: ... +) -> NDArray[ScalarT]: ... @overload def diagonal( a: ArrayLike, @@ -2673,32 +2842,49 @@ def diagonal( # keep in sync with `_core.fromnumeric.repeat` @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +def repeat[ScalarT: np.generic](a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[ScalarT]: ... @overload -def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def repeat[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload -def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> _Masked1D[Incomplete]: ... @overload def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... # keep in sync with `_core.fromnumeric.swapaxes` @overload -def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +def swapaxes[MArrayT: MaskedArray](a: MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> MArrayT: ... @overload -def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +def swapaxes[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... # NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need # additional overloads to cover the array-like input here. @overload # a: MaskedArray, dtype=None -def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +def anom[MArrayT: MaskedArray](a: MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> MArrayT: ... @overload # a: array-like, dtype=None -def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (positional) -def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic](a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[ScalarT]) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: dtype-like (keyword) -def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +def anom[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[ScalarT], +) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (positional) def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... @overload # a: unknown array-like, dtype: unknown dtype-like (keyword) @@ -2715,12 +2901,26 @@ def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def all( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def all[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # Keep in sync with `all` and `MaskedArray.any` @overload @@ -2731,53 +2931,99 @@ def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... @overload def any( - a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., ) -> np.bool | _MaskedArray[np.bool]: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... @overload -def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +def any[ArrayT: np.ndarray]( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, keepdims: bool | _NoValueType = ..., +) -> ArrayT: ... # NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, # which wouldn't work here for array-like inputs, so we need additional overloads. @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, +) -> _Masked1D[ScalarT]: ... @overload -def compress( - condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None -) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +def compress[ScalarT: np.generic]( + condition: _ArrayLikeBool_co, + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, +) -> _MaskedArray[ScalarT]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> _Masked1D[Incomplete]: ... @overload def compress( - condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray](condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: ArrayT) -> ArrayT: ... @overload -def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def compress[ArrayT: np.ndarray]( + condition: _ArrayLikeBool_co, + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumprod` and `MaskedArray.cumsum` @overload # out: None (default) def cumsum( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumsum[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `cumsum` and `MaskedArray.cumsum` @overload # out: None (default) def cumprod( - a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, ) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) -def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray](a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (kwarg) -def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... +def cumprod[ArrayT: np.ndarray]( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, +) -> ArrayT: ... # Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` @overload @@ -2789,22 +3035,22 @@ def mean( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def mean( +def mean[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` @overload @@ -2816,22 +3062,22 @@ def sum( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def sum( +def sum[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `product` and `MaskedArray.prod` @overload @@ -2843,22 +3089,22 @@ def prod( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def prod( +def prod[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `prod` and `MaskedArray.prod` @overload @@ -2870,22 +3116,22 @@ def product( keepdims: bool | _NoValueType = ..., ) -> Incomplete: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def product( +def product[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` @overload @@ -2898,24 +3144,24 @@ def trace( out: None = None, ) -> Incomplete: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex, axis1: SupportsIndex, axis2: SupportsIndex, dtype: DTypeLike | None, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... @overload -def trace( +def trace[ArrayT: np.ndarray]( a: ArrayLike, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, dtype: DTypeLike | None = None, *, - out: _ArrayT, -) -> _ArrayT: ... + out: ArrayT, +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2929,26 +3175,26 @@ def std( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def std( +def std[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `std` and `MaskedArray.var` @overload @@ -2962,26 +3208,26 @@ def var( mean: _ArrayLikeNumber_co | _NoValueType = ..., ) -> Incomplete: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike | None, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def var( +def var[ArrayT: np.ndarray]( a: ArrayLike, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, - out: _ArrayT, + out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # (a, b) minimum: _extrema_operation = ... @@ -3017,23 +3263,23 @@ def argmin( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmin( +def argmin[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... # keep in sync with `argmin` @overload @@ -3055,40 +3301,40 @@ def argmax( keepdims: bool | _NoValueType = ..., ) -> Any: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, *, - out: _ArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def argmax( +def argmax[ArrayT: np.ndarray]( a: ArrayLike, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, - out: _ArrayT, + out: ArrayT, *, keepdims: bool | _NoValueType = ..., -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _IntLike_co, axis: None = None, out: None = None, - mode: _ModeKind = "raise" -) -> _ScalarT: ... + mode: _ModeKind = "raise", +) -> ScalarT: ... @overload -def take( - a: _ArrayLike[_ScalarT], +def take[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None, mode: _ModeKind = "raise", -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def take( a: ArrayLike, @@ -3106,28 +3352,29 @@ def take( mode: _ModeKind = "raise", ) -> _MaskedArray[Any]: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... @overload -def take( +def take[ArrayT: np.ndarray]( a: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, - out: _ArrayT, + out: ArrayT, mode: _ModeKind = "raise", -) -> _ArrayT: ... +) -> ArrayT: ... def power(a, b, third=None): ... def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... + @overload -def sort( - a: _ArrayT, +def sort[ArrayT: np.ndarray]( + a: ArrayT, axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, @@ -3135,7 +3382,7 @@ def sort( fill_value: _ScalarLike_co | None = None, *, stable: Literal[False] | None = None, -) -> _ArrayT: ... +) -> ArrayT: ... @overload def sort( a: ArrayLike, @@ -3147,10 +3394,12 @@ def sort( *, stable: Literal[False] | None = None, ) -> NDArray[Any]: ... + @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... + def concatenate(arrays, axis=0): ... def diag(v, k=0): ... def left_shift(a, n): ... @@ -3197,18 +3446,18 @@ def _convert2ma( # keep in sync with `_core.multiarray.arange` @overload # dtype= -def arange( +def arange[ScalarT: _ArangeScalar]( start_or_stop: _ArangeScalar | float, /, stop: _ArangeScalar | float | None = None, step: _ArangeScalar | float | None = 1, *, - dtype: _DTypeLike[_ArangeScalarT], + dtype: _DTypeLike[ScalarT], device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _Masked1D[_ArangeScalarT]: ... +) -> _Masked1D[ScalarT]: ... @overload # (int-like, int-like?, int-like?) def arange( start_or_stop: _IntLike_co, @@ -3303,8 +3552,8 @@ def arange( # based on `_core.fromnumeric.clip` @overload -def clip( - a: _ScalarT, +def clip[ScalarT: np.generic]( + a: ScalarT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3315,10 +3564,10 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _ScalarT: ... +) -> ScalarT: ... @overload -def clip( - a: NDArray[_ScalarT], +def clip[ScalarT: np.generic]( + a: NDArray[ScalarT], a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -3329,13 +3578,13 @@ def clip( hardmask: bool = False, dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | None, a_max: ArrayLike | None, - out: _MArrayT, + out: MArrayT, *, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., @@ -3343,21 +3592,21 @@ def clip( hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def clip( +def clip[MArrayT: MaskedArray]( a: ArrayLike, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., *, - out: _MArrayT, + out: MArrayT, min: ArrayLike | _NoValueType | None = ..., max: ArrayLike | _NoValueType | None = ..., fill_value: _FillValue | None = None, hardmask: bool = False, dtype: DTypeLike | None = None, **kwargs: Unpack[_UFuncKwargs], -) -> _MArrayT: ... +) -> MArrayT: ... @overload def clip( a: ArrayLike, @@ -3384,29 +3633,29 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +) -> _Masked1D[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: SupportsIndex, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], _DTypeT]: ... +) -> MaskedArray[tuple[int], DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: SupportsIndex, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +) -> _Masked1D[ScalarT]: ... @overload def empty( shape: SupportsIndex, @@ -3417,10 +3666,10 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int]]: ... +) -> _Masked1D[Any]: ... @overload # known shape -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: None = None, order: _OrderCF = "C", *, @@ -3428,32 +3677,32 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +) -> MaskedArray[ShapeT, np.dtype[np.float64]]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: _DTypeT | _SupportsDType[_DTypeT], +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... @overload -def empty( - shape: _AnyShapeT, - dtype: type[_ScalarT], +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... @overload -def empty( - shape: _AnyShapeT, +def empty[ShapeT: _Shape]( + shape: ShapeT, dtype: DTypeLike | None = None, order: _OrderCF = "C", *, @@ -3461,9 +3710,9 @@ def empty( like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShapeT]: ... +) -> MaskedArray[ShapeT]: ... @overload # unknown shape -def empty( +def empty[ShapeT: _Shape]( shape: _ShapeLike, dtype: None = None, order: _OrderCF = "C", @@ -3474,27 +3723,27 @@ def empty( hardmask: bool = False, ) -> _MaskedArray[np.float64]: ... @overload -def empty( +def empty[DTypeT: np.dtype]( shape: _ShapeLike, - dtype: _DTypeT | _SupportsDType[_DTypeT], + dtype: DTypeT | _SupportsDType[DTypeT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[_AnyShape, _DTypeT]: ... +) -> MaskedArray[_AnyShape, DTypeT]: ... @overload -def empty( +def empty[ScalarT: np.generic]( shape: _ShapeLike, - dtype: type[_ScalarT], + dtype: type[ScalarT], order: _OrderCF = "C", *, device: Literal["cpu"] | None = None, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty( shape: _ShapeLike, @@ -3508,8 +3757,8 @@ def empty( # keep in sync with `_core.multiarray.empty_like` @overload -def empty_like( - a: _MArrayT, +def empty_like[MArrayT: MaskedArray]( + a: MArrayT, /, dtype: None = None, order: _OrderKACF = "K", @@ -3517,10 +3766,10 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def empty_like( - a: _ArrayLike[_ScalarT], +def empty_like[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], /, dtype: None = None, order: _OrderKACF = "K", @@ -3528,18 +3777,18 @@ def empty_like( shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def empty_like( +def empty_like[ScalarT: np.generic]( a: Incomplete, /, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", subok: bool = True, shape: _ShapeLike | None = None, *, device: Literal["cpu"] | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def empty_like( a: Incomplete, @@ -3571,14 +3820,14 @@ def frombuffer( like: _SupportsArrayFunc | None = None, ) -> _MaskedArray[np.float64]: ... @overload -def frombuffer( +def frombuffer[ScalarT: np.generic]( buffer: Buffer, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], count: SupportsIndex = -1, offset: SupportsIndex = 0, *, like: _SupportsArrayFunc | None = None, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def frombuffer( buffer: Buffer, @@ -3590,14 +3839,14 @@ def frombuffer( ) -> _MaskedArray[Incomplete]: ... # keep roughly in sync with `_core.numeric.fromfunction` -def fromfunction( - function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], +def fromfunction[ShapeT: _Shape, DTypeT: np.dtype]( + function: Callable[..., np.ndarray[ShapeT, DTypeT]], shape: Sequence[int], *, dtype: DTypeLike | None = float, like: _SupportsArrayFunc | None = None, **kwargs: object, -) -> MaskedArray[_ShapeT, _DTypeT]: ... +) -> MaskedArray[ShapeT, DTypeT]: ... # keep roughly in sync with `_core.numeric.identity` @overload @@ -3610,14 +3859,14 @@ def identity( hardmask: bool = False, ) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... @overload -def identity( +def identity[ScalarT: np.generic]( n: int, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], *, like: _SupportsArrayFunc | None = None, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +) -> MaskedArray[tuple[int, int], np.dtype[ScalarT]]: ... @overload def identity( n: int, @@ -3657,23 +3906,23 @@ def indices( hardmask: bool = False, ) -> tuple[_MaskedArray[np.intp], ...]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[False] = False, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload -def indices( +def indices[ScalarT: np.generic]( dimensions: Sequence[int], - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], sparse: Literal[True], *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> tuple[_MaskedArray[_ScalarT], ...]: ... +) -> tuple[_MaskedArray[ScalarT], ...]: ... @overload def indices( dimensions: Sequence[int], @@ -3704,13 +3953,13 @@ def indices( # keep roughly in sync with `_core.fromnumeric.squeeze` @overload -def squeeze( - a: _ArrayLike[_ScalarT], +def squeeze[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, *, fill_value: _FillValue | None = None, hardmask: bool = False, -) -> _MaskedArray[_ScalarT]: ... +) -> _MaskedArray[ScalarT]: ... @overload def squeeze( a: ArrayLike, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 580dcd486c28..83f3bee761ce 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete from collections.abc import Sequence -from typing import SupportsIndex, TypeAlias, TypeVar, overload +from typing import SupportsIndex, overload import numpy as np from numpy import _CastingKind @@ -66,24 +66,21 @@ __all__ = [ "vstack", ] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) -_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) -_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) - -_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] ### # keep in sync with `numpy._core.shape_base.atleast_1d` @overload -def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_1d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_1d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -93,13 +90,15 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_2d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_2d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -109,13 +108,15 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.atleast_2d` @overload -def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> _MArray[ScalarT]: ... @overload -def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( + a0: _ArrayLike[ScalarT1], a1: _ArrayLike[ScalarT2], / +) -> tuple[_MArray[ScalarT1], _MArray[ScalarT2]]: ... @overload -def atleast_3d( - a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] -) -> tuple[_MArray[_ScalarT], ...]: ... +def atleast_3d[ScalarT: np.generic]( + a0: _ArrayLike[ScalarT], a1: _ArrayLike[ScalarT], /, *arys: _ArrayLike[ScalarT] +) -> tuple[_MArray[ScalarT], ...]: ... @overload def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... @overload @@ -125,19 +126,19 @@ def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray # keep in sync with `numpy._core.shape_base.vstack` @overload -def vstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def vstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def vstack( +def vstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def vstack( tup: Sequence[ArrayLike], @@ -150,19 +151,19 @@ row_stack = vstack # keep in sync with `numpy._core.shape_base.hstack` @overload -def hstack( - tup: Sequence[_ArrayLike[_ScalarT]], +def hstack[ScalarT: np.generic]( + tup: Sequence[_ArrayLike[ScalarT]], *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def hstack( +def hstack[ScalarT: np.generic]( tup: Sequence[ArrayLike], *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def hstack( tup: Sequence[ArrayLike], @@ -173,35 +174,35 @@ def hstack( # keep in sync with `numpy._core.shape_base_impl.column_stack` @overload -def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base_impl.dstack` @overload -def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> _MArray[ScalarT]: ... @overload def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... # keep in sync with `numpy._core.shape_base.stack` @overload -def stack( - arrays: Sequence[_ArrayLike[_ScalarT]], +def stack[ScalarT: np.generic]( + arrays: Sequence[_ArrayLike[ScalarT]], axis: SupportsIndex = 0, out: None = None, *, dtype: None = None, casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload -def stack( +def stack[ScalarT: np.generic]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, out: None = None, *, - dtype: _DTypeLike[_ScalarT], + dtype: _DTypeLike[ScalarT], casting: _CastingKind = "same_kind" -) -> _MArray[_ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload def stack( arrays: Sequence[ArrayLike], @@ -212,33 +213,33 @@ def stack( casting: _CastingKind = "same_kind" ) -> _MArray[Incomplete]: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex, - out: _MArrayT, + out: MArrayT, *, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... @overload -def stack( +def stack[MArrayT: MaskedArray]( arrays: Sequence[ArrayLike], axis: SupportsIndex = 0, *, - out: _MArrayT, + out: MArrayT, dtype: DTypeLike | None = None, casting: _CastingKind = "same_kind", -) -> _MArrayT: ... +) -> MArrayT: ... # keep in sync with `numpy._core.shape_base_impl.hsplit` @overload -def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[ScalarT]]: ... @overload def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... # keep in sync with `numpy._core.twodim_base_impl.hsplit` @overload -def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray[ScalarT]: ... @overload def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 53ac58c85d2e..f446dbf1c4b9 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -1,4 +1,4 @@ -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np import numpy.typing as npt @@ -505,9 +505,8 @@ __all__ += np.__all__ ### -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] -_Order: TypeAlias = Literal["C", "F"] +type _Matrix[ScalarT: np.generic] = np.matrix[tuple[int, int], np.dtype[ScalarT]] +type _Order = Literal["C", "F"] ### @@ -515,7 +514,7 @@ _Order: TypeAlias = Literal["C", "F"] @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def empty[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -523,7 +522,7 @@ def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def ones[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -531,7 +530,7 @@ def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C" @overload def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... @overload -def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def zeros[ScalarT: np.generic](shape: int | tuple[int, int], dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... @@ -539,7 +538,7 @@ def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C @overload def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... @overload -def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... +def identity[ScalarT: np.generic](n: int, dtype: _DTypeLike[ScalarT]) -> _Matrix[ScalarT]: ... @overload def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... @@ -553,9 +552,9 @@ def eye( order: _Order = "C", ) -> _Matrix[np.float64]: ... @overload -def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None, k: int, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload -def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +def eye[ScalarT: np.generic](n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[ScalarT], order: _Order = "C") -> _Matrix[ScalarT]: ... @overload def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... @@ -573,8 +572,8 @@ def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... # @overload -def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _Matrix[ScalarT], m: int, n: int) -> _Matrix[ScalarT]: ... @overload -def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... +def repmat[ScalarT: np.generic](a: _ArrayLike[ScalarT], m: int, n: int) -> npt.NDArray[ScalarT]: ... @overload def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 40c747d1ae3d..55b9d795078c 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from collections.abc import Mapping, Sequence from types import EllipsisType -from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, overload from typing_extensions import TypeVar import numpy as np @@ -17,17 +17,15 @@ from numpy._typing import ( __all__ = ["asmatrix", "bmat", "matrix"] -_T = TypeVar("_T") -_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) -_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) -_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) -_2D: TypeAlias = tuple[int, int] -_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] -_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None -_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] +type _2D = tuple[int, int] +type _Matrix[ScalarT: np.generic] = matrix[_2D, np.dtype[ScalarT]] +type _ToIndex1 = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +### class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] @@ -57,7 +55,7 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # keep in sync with `prod` and `mean` @overload # type: ignore[override] @@ -65,9 +63,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def sum[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `mean` @overload # type: ignore[override] @@ -75,9 +73,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def prod[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `sum` and `prod` @overload # type: ignore[override] @@ -85,9 +83,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... @overload - def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT) -> OutT: ... @overload - def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def mean[OutT: np.ndarray](self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `var` @overload # type: ignore[override] @@ -95,11 +93,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def std[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def std( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def std[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `std` @overload # type: ignore[override] @@ -107,11 +105,11 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... @overload - def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + def var[OutT: np.ndarray](self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: OutT, ddof: float = 0) -> OutT: ... @overload - def var( # pyright: ignore[reportIncompatibleMethodOverride] - self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 - ) -> _ArrayT: ... + def var[OutT: np.ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: OutT, ddof: float = 0 + ) -> OutT: ... # keep in sync with `all` @overload # type: ignore[override] @@ -119,9 +117,9 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def any[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def any[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `any` @overload # type: ignore[override] @@ -129,70 +127,70 @@ class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... @overload - def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def all[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def all[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `min` and `ptp` @overload # type: ignore[override] - def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def max[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def max[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def max[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `ptp` @overload # type: ignore[override] - def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def min[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def min[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def min[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `max` and `min` @overload - def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + def ptp[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> ScalarT: ... @overload def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def ptp[OutT: np.ndarray](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmin` @overload # type: ignore[override] - def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmax[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmax[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # keep in sync with `argmax` @overload # type: ignore[override] - def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + def argmin[ScalarT: np.generic](self: NDArray[ScalarT], axis: None = None, out: None = None) -> np.intp: ... @overload def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... @overload - def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None, out: OutT) -> OutT: ... @overload - def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + def argmin[OutT: NDArray[np.integer | np.bool]](self, axis: _ShapeLike | None = None, *, out: OutT) -> OutT: ... # pyright: ignore[reportIncompatibleMethodOverride] # the second overload handles the (rare) case that the matrix is not 2-d @overload - def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + def tolist[T](self: _Matrix[np.generic[T]]) -> list[list[T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] @overload def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] # these three methods will at least return a `2-d` array of shape (1, n) def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... - def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # matrix.T is inherited from _ScalarOrArrayCommon def getT(self) -> Self: ... diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index b16b06c8a734..2fdfd24db7a9 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,16 +1,7 @@ import abc import decimal from collections.abc import Iterator, Sequence -from typing import ( - Any, - ClassVar, - Generic, - Literal, - Self, - SupportsIndex, - TypeAlias, - overload, -) +from typing import Any, ClassVar, Generic, Literal, Self, SupportsIndex, overload from typing_extensions import TypeIs, TypeVar import numpy as np @@ -38,10 +29,12 @@ from ._polytypes import ( __all__ = ["ABCPolyBase"] _NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) -_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) -_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co -class ABCPolyBase(Generic[_NameT_co], abc.ABC): +type _AnyOther = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co + +### + +class ABCPolyBase(Generic[_NameT_co], abc.ABC): # noqa: UP046 __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] __array_ufunc__: ClassVar[None] = None maxpower: ClassVar[Literal[100]] = 100 @@ -76,7 +69,7 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def __call__(self, /, arg: _PolyT) -> _PolyT: ... + def __call__[PolyT: ABCPolyBase](self, /, arg: PolyT) -> PolyT: ... @overload def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... @overload @@ -134,22 +127,22 @@ class ABCPolyBase(Generic[_NameT_co], abc.ABC): # @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload - def convert( + def convert[PolyT: ABCPolyBase]( self, /, domain: _SeriesLikeCoef_co | None = None, *, - kind: type[_PolyT], + kind: type[PolyT], window: _SeriesLikeCoef_co | None = None, - ) -> _PolyT: ... + ) -> PolyT: ... @overload def convert( self, diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index b5a603b6ca85..46d17ac6353c 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,5 +1,3 @@ -# ruff: noqa: PYI046 - from collections.abc import Sequence from typing import ( Any, @@ -9,8 +7,6 @@ from typing import ( Self, SupportsIndex, SupportsInt, - TypeAlias, - TypeVar, overload, type_check_only, ) @@ -19,75 +15,75 @@ import numpy as np import numpy.typing as npt from numpy._typing import ( _ArrayLikeComplex_co, - # array-likes _ArrayLikeFloat_co, _ArrayLikeNumber_co, _ComplexLike_co, _FloatLike_co, - # scalar-likes _IntLike_co, _NestedSequence, _NumberLike_co, _SupportsArray, ) -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) - # compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase @type_check_only -class _SupportsCoefOps(Protocol[_T_contra]): +class _SupportsCoefOps[T](Protocol): def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... def __neg__(self, /) -> Self: ... def __pos__(self, /) -> Self: ... - def __add__(self, x: _T_contra, /) -> Self: ... - def __sub__(self, x: _T_contra, /) -> Self: ... - def __mul__(self, x: _T_contra, /) -> Self: ... - def __pow__(self, x: _T_contra, /) -> Self | float: ... - def __radd__(self, x: _T_contra, /) -> Self: ... - def __rsub__(self, x: _T_contra, /) -> Self: ... - def __rmul__(self, x: _T_contra, /) -> Self: ... + def __add__(self, x: T, /) -> Self: ... + def __sub__(self, x: T, /) -> Self: ... + def __mul__(self, x: T, /) -> Self: ... + def __pow__(self, x: T, /) -> Self | float: ... + def __radd__(self, x: T, /) -> Self: ... + def __rsub__(self, x: T, /) -> Self: ... + def __rmul__(self, x: T, /) -> Self: ... -_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _PolyScalar = np.bool | np.number | np.object_ -_FloatSeries: TypeAlias = _Series[np.floating] -_ComplexSeries: TypeAlias = _Series[np.complexfloating] -_ObjectSeries: TypeAlias = _Series[np.object_] -_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] +type _Series[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_FloatArray: TypeAlias = npt.NDArray[np.floating] -_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] -_ObjectArray: TypeAlias = npt.NDArray[np.object_] -_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] +type _FloatSeries = _Series[np.floating] +type _ComplexSeries = _Series[np.complexfloating] +type _ObjectSeries = _Series[np.object_] +type _CoefSeries = _Series[np.inexact | np.object_] -_Tuple2: TypeAlias = tuple[_T, _T] -_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] -_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] +type _FloatArray = npt.NDArray[np.floating] +type _ComplexArray = npt.NDArray[np.complexfloating] +type _ObjectArray = npt.NDArray[np.object_] +type _CoefArray = npt.NDArray[np.inexact | np.object_] -_AnyInt: TypeAlias = SupportsInt | SupportsIndex +type _Tuple2[_T] = tuple[_T, _T] +type _Array1[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[1]], np.dtype[ScalarT]] +type _Array2[ScalarT: _PolyScalar] = np.ndarray[tuple[Literal[2]], np.dtype[ScalarT]] -_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] -_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co +type _AnyInt = SupportsInt | SupportsIndex + +type _CoefObjectLike_co = np.object_ | _SupportsCoefOps[Any] +type _CoefLike_co = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. -_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] -_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] -_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] -_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] -_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] -_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] +type _SeriesLikeBool_co = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +type _SeriesLikeInt_co = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +type _SeriesLikeFloat_co = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +type _SeriesLikeComplex_co = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +type _SeriesLikeObject_co = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +type _SeriesLikeCoef_co = _SupportsArray[np.dtype[_PolyScalar]] | Sequence[_CoefLike_co] + +type _ArrayLikeCoefObject_co = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +type _ArrayLikeCoef_co = npt.NDArray[_PolyScalar] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co -_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] -_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co +type _Line[ScalarT: _PolyScalar] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Companion[ScalarT: _PolyScalar] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] -_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _AnyDegrees = Sequence[SupportsIndex] +type _FullFitResult = Sequence[np.inexact | np.int32] @type_check_only class _FuncLine(Protocol): @overload - def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + def __call__[ScalarT: _PolyScalar](self, /, off: ScalarT, scl: ScalarT) -> _Line[ScalarT]: ... @overload def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... @overload @@ -308,8 +304,6 @@ class _FuncVander(Protocol): @overload def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... -_AnyDegrees: TypeAlias = Sequence[SupportsIndex] - @type_check_only class _FuncVander2D(Protocol): @overload @@ -360,8 +354,6 @@ class _FuncVander3D(Protocol): deg: _AnyDegrees, ) -> _CoefArray: ... -_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] - @type_check_only class _FuncFit(Protocol): @overload @@ -476,8 +468,6 @@ class _FuncRoots(Protocol): @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... -_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] - @type_check_only class _FuncCompanion(Protocol): @overload diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 1cfb27829b2e..157b0e5d0f46 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,15 +1,6 @@ from _typeshed import ConvertibleToInt from collections.abc import Callable, Iterable -from typing import ( - Any, - ClassVar, - Concatenate, - Final, - Literal as L, - Self, - TypeVar, - overload, -) +from typing import Any, ClassVar, Concatenate, Final, Literal as L, Self, overload import numpy as np import numpy.typing as npt @@ -81,15 +72,14 @@ __all__ = [ "chebinterpolate", ] -_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) -_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) +### -def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... -def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _cseries_to_zseries[ScalarT: np.number | np.object_](c: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_to_cseries[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_mul[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_div[ScalarT: np.number | np.object_](z1: npt.NDArray[ScalarT], z2: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_der[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... +def _zseries_int[ScalarT: np.number | np.object_](zs: npt.NDArray[ScalarT]) -> _Series[ScalarT]: ... poly2cheb: Final[_FuncPoly2Ortho] = ... cheb2poly: Final[_FuncUnOp] = ... @@ -133,17 +123,17 @@ def chebinterpolate( args: tuple[()] = (), ) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... @overload -def chebinterpolate( - func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[[npt.NDArray[np.float64]], CoefScalarT], deg: _IntLike_co, args: tuple[()] = (), -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... @overload -def chebinterpolate( - func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], +def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], CoefScalarT], deg: _IntLike_co, args: Iterable[Any], -) -> npt.NDArray[_CoefScalarT]: ... +) -> npt.NDArray[CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 04b8238735dd..60f4af5a1fd7 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -63,8 +63,6 @@ __all__ = [ "hermweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herm: Final[_FuncPoly2Ortho] = ... herm2poly: Final[_FuncUnOp] = ... @@ -95,7 +93,10 @@ hermfit: Final[_FuncFit] = ... hermcompanion: Final[_FuncCompanion] = ... hermroots: Final[_FuncRoots] = ... -def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index b996de52c6da..6997c8a381ef 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,4 @@ -from typing import Any, ClassVar, Final, Literal as L, TypeVar +from typing import Any, ClassVar, Final, Literal as L import numpy as np from numpy._typing import _Shape @@ -63,8 +63,6 @@ __all__ = [ "hermeweight", ] -_ShapeT = TypeVar("_ShapeT", bound=_Shape) - poly2herme: Final[_FuncPoly2Ortho] = ... herme2poly: Final[_FuncUnOp] = ... @@ -95,7 +93,10 @@ hermefit: Final[_FuncFit] = ... hermecompanion: Final[_FuncCompanion] = ... hermeroots: Final[_FuncRoots] = ... -def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... +def _normed_hermite_e_n[ShapeT: _Shape]( + x: np.ndarray[ShapeT, np.dtype[np.float64]], + n: int, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 79ca317c12b0..fbaaf7d22880 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,14 +1,5 @@ from collections.abc import Callable, Iterable, Sequence -from typing import ( - Final, - Literal, - Protocol, - SupportsIndex, - TypeAlias, - TypeVar, - overload, - type_check_only, -) +from typing import Final, Literal, Protocol, SupportsIndex, overload, type_check_only import numpy as np import numpy.typing as npt @@ -44,16 +35,13 @@ from ._polytypes import ( __all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] -_T = TypeVar("_T") -_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) - -_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] -_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] -_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] +type _AnyLineF = Callable[[float, float], _CoefArray] +type _AnyMulF = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +type _AnyVanderF = Callable[[np.ndarray, int], _CoefArray] @type_check_only -class _ValFunc(Protocol[_T]): - def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... +class _ValFunc[T](Protocol): + def __call__(self, x: np.ndarray, c: T, /, *, tensor: bool = True) -> T: ... ### @@ -77,7 +65,7 @@ def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = Tru def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... # -def trimseq(seq: _SeqT) -> _SeqT: ... +def trimseq[SeqT: _CoefArray | Sequence[_CoefLike_co]](seq: SeqT) -> SeqT: ... # @overload @@ -219,10 +207,10 @@ def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... # keep in sync with `_gridnd` -def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _valnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_valnd` -def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... +def _gridnd[T](val_f: _ValFunc[T], c: T, *args: npt.ArrayLike) -> T: ... # keep in sync with `_polytypes._FuncBinOp` @overload diff --git a/numpy/random/_common.pyi b/numpy/random/_common.pyi index b667fd1c82eb..417387612014 100644 --- a/numpy/random/_common.pyi +++ b/numpy/random/_common.pyi @@ -1,11 +1,12 @@ +from _typeshed import Incomplete from collections.abc import Callable -from typing import Any, NamedTuple, TypeAlias +from typing import NamedTuple import numpy as np -__all__: list[str] = ["interface"] +__all__ = ["interface"] -_CDataVoidPointer: TypeAlias = Any +type _CDataVoidPointer = Incomplete # currently not expressible class interface(NamedTuple): state_address: int diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 9e857fb000ef..9b0e5c331b57 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,8 +1,8 @@ from collections.abc import Callable, MutableSequence -from typing import Any, Literal, TypeAlias, TypeVar, overload +from typing import Any, Literal, overload import numpy as np -from numpy import dtype, float32, float64, int64 +from numpy import float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, @@ -23,7 +23,6 @@ from numpy._typing import ( _IntPCodes, _ShapeLike, _SingleCodes, - _SupportsDType, _UInt8Codes, _UInt16Codes, _UInt32Codes, @@ -32,24 +31,10 @@ from numpy._typing import ( ) from numpy.random import BitGenerator, RandomState, SeedSequence -_IntegerT = TypeVar("_IntegerT", bound=np.integer) +type _DTypeLikeFloat32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes +type _DTypeLikeFloat64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes -_DTypeLikeFloat32: TypeAlias = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) - -_DTypeLikeFloat64: TypeAlias = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) +### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... @@ -57,10 +42,7 @@ class Generator: def __str__(self) -> str: ... def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... - def __reduce__(self) -> tuple[ - Callable[[BitGenerator], Generator], - tuple[BitGenerator], - None]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Generator]: ... @@ -238,15 +220,15 @@ class Generator: endpoint: bool = False, ) -> np.bool: ... @overload - def integers( + def integers[ScalarT: np.integer]( self, low: int, high: int | None = None, size: None = None, *, - dtype: _DTypeLike[_IntegerT], + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> _IntegerT: ... + ) -> ScalarT: ... @overload def integers( self, @@ -267,15 +249,15 @@ class Generator: endpoint: bool = False, ) -> NDArray[np.bool]: ... @overload - def integers( + def integers[ScalarT: np.integer]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[_IntegerT], + dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[_IntegerT]: ... + ) -> NDArray[ScalarT]: ... @overload def integers( self, diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi index b8b1b7bcf63b..b0aa143801ba 100644 --- a/numpy/random/_pickle.pyi +++ b/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only +from typing import Final, Literal, TypedDict, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 @@ -9,8 +9,6 @@ from numpy.random._sfc64 import SFC64 from numpy.random.bit_generator import BitGenerator from numpy.random.mtrand import RandomState -_T = TypeVar("_T", bound=BitGenerator) - @type_check_only class _BitGenerators(TypedDict): MT19937: type[MT19937] @@ -19,6 +17,8 @@ class _BitGenerators(TypedDict): Philox: type[Philox] SFC64: type[SFC64] +### + BitGenerators: Final[_BitGenerators] = ... @overload @@ -32,7 +32,7 @@ def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... @overload def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... @overload -def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __bit_generator_ctor[BitGeneratorT: BitGenerator](bit_generator: type[BitGeneratorT]) -> BitGeneratorT: ... def __generator_ctor( bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index ee4499dee1f3..3c2069aba408 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -8,7 +8,6 @@ from typing import ( Literal, NamedTuple, Self, - TypeAlias, TypedDict, overload, type_check_only, @@ -29,7 +28,7 @@ __all__ = ["BitGenerator", "SeedSequence"] ### -_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes +type _DTypeLikeUint_ = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 016bbecf4604..5cb7f746380d 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -16,11 +16,8 @@ from typing import ( Generic, Literal as L, NoReturn, - ParamSpec, Self, SupportsIndex, - TypeAlias, - TypeVarTuple, overload, type_check_only, ) @@ -91,25 +88,20 @@ __all__ = [ # noqa: RUF022 ### -_T = TypeVar("_T") -_Ts = TypeVarTuple("_Ts") -_Tss = ParamSpec("_Tss") -_ET = TypeVar("_ET", bound=BaseException, default=BaseException) -_FT = TypeVar("_FT", bound=Callable[..., Any]) _W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) -_StrLike: TypeAlias = str | bytes -_RegexLike: TypeAlias = _StrLike | Pattern[Any] -_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co +type _StrLike = str | bytes +type _RegexLike = _StrLike | Pattern[Any] +type _NumericArrayLike = _ArrayLikeNumber_co | _ArrayLikeObject_co -_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] -_WarningSpec: TypeAlias = type[Warning] -_WarnLog: TypeAlias = list[warnings.WarningMessage] -_ToModules: TypeAlias = Iterable[types.ModuleType] +type _ExceptionSpec[ExceptionT: BaseException] = type[ExceptionT] | tuple[type[ExceptionT], ...] +type _WarningSpec = type[Warning] +type _WarnLog = list[warnings.WarningMessage] +type _ToModules = Iterable[types.ModuleType] # Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` -_ComparisonFunc: TypeAlias = Callable[ - [NDArray[Any], NDArray[Any]], +type _ComparisonFunc = Callable[ + [np.ndarray, np.ndarray], bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] @@ -158,7 +150,7 @@ class suppress_warnings: def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... def __enter__(self) -> Self: ... def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... - def __call__(self, /, func: _FT) -> _FT: ... + def __call__[FuncT: Callable[..., Any]](self, /, func: FuncT) -> FuncT: ... # def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... @@ -289,36 +281,36 @@ def assert_string_equal(actual: str, desired: str) -> None: ... # @overload -def assert_raises( - exception_class: _ExceptionSpec[_ET], +def assert_raises[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], /, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises( - exception_class: _ExceptionSpec, - callable: Callable[_Tss, Any], +def assert_raises[**Tss]( + exception_class: _ExceptionSpec[BaseException], + callable: Callable[Tss, Any], /, - *args: _Tss.args, - **kwargs: _Tss.kwargs, + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @overload -def assert_raises_regex( - exception_class: _ExceptionSpec[_ET], +def assert_raises_regex[ExceptionT: BaseException]( + exception_class: _ExceptionSpec[ExceptionT], expected_regexp: _RegexLike, *, msg: str | None = None, -) -> unittest.case._AssertRaisesContext[_ET]: ... +) -> unittest.case._AssertRaisesContext[ExceptionT]: ... @overload -def assert_raises_regex( - exception_class: _ExceptionSpec, +def assert_raises_regex[**Tss]( + exception_class: _ExceptionSpec[BaseException], expected_regexp: _RegexLike, - callable: Callable[_Tss, Any], - *args: _Tss.args, - **kwargs: _Tss.kwargs, + callable: Callable[Tss, Any], + *args: Tss.args, + **kwargs: Tss.kwargs, ) -> None: ... # @@ -368,19 +360,24 @@ def assert_array_max_ulp( def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload @deprecated("Please use warnings.catch_warnings or pytest.warns instead") -def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_warns[**Tss, ReturnT]( + warning_class: _WarningSpec, + func: Callable[Tss, ReturnT], + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> ReturnT: ... # @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +def assert_no_warnings[**Tss, ReturnT](func: Callable[Tss, ReturnT], /, *args: Tss.args, **kwargs: Tss.kwargs) -> ReturnT: ... # @overload def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload -def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... +def assert_no_gc_cycles[**Tss](func: Callable[Tss, Any], /, *args: Tss.args, **kwargs: Tss.kwargs) -> None: ... ### @@ -479,23 +476,23 @@ def run_threaded( prepare_args: None = None, ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int, pass_count: bool, pass_barrier: bool, outer_iterations: int, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... @overload -def run_threaded( - func: Callable[[*_Ts], None], +def run_threaded[*Ts]( + func: Callable[[*Ts], None], max_workers: int = 8, pass_count: bool = False, pass_barrier: bool = False, outer_iterations: int = 1, *, - prepare_args: tuple[*_Ts], + prepare_args: tuple[*Ts], ) -> None: ... # diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index fb78eb077c44..21aca2bc69ef 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -39,7 +39,7 @@ """ from collections.abc import Callable, Iterable -from typing import TYPE_CHECKING, Final, TypeAlias, cast +from typing import TYPE_CHECKING, Final, cast import numpy as np @@ -115,7 +115,7 @@ def plugin(version: str) -> type: else: - _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + type _HookFunc = Callable[[AnalyzeTypeContext], mypy.types.Type] def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: """Replace a type-alias with a concrete ``NBitBase`` subclass.""" diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 084ae971bdd0..06159f6e979e 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -1,16 +1,13 @@ -from typing import TypeAlias, TypeVar - import numpy as np import numpy.typing as npt from numpy._typing import _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] +type _MArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] -MAR_b: MaskedArray[np.bool] -MAR_c: MaskedArray[np.complex128] -MAR_td64: MaskedArray[np.timedelta64] +MAR_b: _MArray[np.bool] +MAR_c: _MArray[np.complex128] +MAR_td64: _MArray[np.timedelta64] AR_b: npt.NDArray[np.bool] diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 3ccea66861eb..b5d6b0d38ef0 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,12 +1,11 @@ import datetime as dt -from typing import Any, TypeAlias, TypeVar, cast +from typing import Any, cast import numpy as np import numpy.typing as npt from numpy._typing import _Shape -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] # mypy: disable-error-code=no-untyped-call diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index b7daf3397f9d..36440fca9487 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,17 +1,15 @@ -import sys from collections import deque from pathlib import Path -from typing import Any, Generic, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt +from numpy._typing import _AnyShape -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... -class SubClass(npt.NDArray[_ScalarT_co]): ... - -class IntoSubClass(Generic[_ScalarT_co]): - def __array__(self) -> SubClass[_ScalarT_co]: ... +class IntoSubClass[ScalarT: np.generic]: + def __array__(self) -> SubClass[ScalarT]: ... i8: np.int64 @@ -268,10 +266,9 @@ assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) -if sys.version_info >= (3, 12): - from collections.abc import Buffer +from collections.abc import Buffer - def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... +def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... - buffer: Buffer - assert_type(create_array(buffer), npt.NDArray[Any]) +buffer: Buffer +assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 49986bd5d12c..809f77d9736d 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,10 +1,10 @@ -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.typing as npt -FalseType: TypeAlias = L[False] -TrueType: TypeAlias = L[True] +type FalseType = L[False] +type TrueType = L[True] i4: np.int32 i8: np.int64 diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 2fba2feae385..b83ecc62221f 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 5c3dc85038db..b93826922662 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] -_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] AR_U: _StrCharArray AR_S: _BytesCharArray diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 7f670b3be51c..f8f939d4609f 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -2,13 +2,13 @@ import ctypes as ct import datetime as dt from decimal import Decimal from fractions import Fraction -from typing import Any, Literal, LiteralString, TypeAlias, assert_type +from typing import Any, Literal, LiteralString, assert_type import numpy as np from numpy.dtypes import StringDType # a combination of likely `object` dtype-like candidates (no `_co`) -_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta +type _PyObjectLike = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 98d61a6d3428..4907f8464cf2 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,10 +1,10 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np -_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] +type _ArrayND = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +type _Array1D = np.ndarray[tuple[int], np.dtypes.BytesDType] +type _Array2D = np.ndarray[tuple[int, int], np.dtypes.Int8DType] _a_nd: np.flatiter[_ArrayND] _a_1d: np.flatiter[_Array1D] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 8eef32ddd593..a94d278e87f4 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,22 +1,20 @@ -from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type +from typing import Any, Literal, NoReturn, assert_type import numpy as np -from numpy import dtype, generic from numpy._typing import NDArray, _AnyShape -_ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _NoMaskType = np.bool[Literal[False]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] -_NoMaskType: TypeAlias = np.bool[Literal[False]] -_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +### -class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... +class MaskedArraySubclass[ScalarT: np.generic](np.ma.MaskedArray[_AnyShape, np.dtype[ScalarT]]): ... -class IntoMaskedArraySubClass(Generic[_ScalarT_co]): - def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... +class IntoMaskedArraySubClass[ScalarT: np.generic]: + def __array__(self) -> MaskedArraySubclass[ScalarT]: ... -MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] +type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 3a32b3d394f0..b76760d547b9 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,9 +1,9 @@ -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_Shape2D: TypeAlias = tuple[int, int] +type _Shape2D = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 0fe907c0006b..ada8f7777696 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,12 +1,10 @@ import datetime as dt -from typing import Any, Literal, TypeVar, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) - -class SubClass(npt.NDArray[_ScalarT_co]): ... +class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT]]): ... subclass: SubClass[np.float64] diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 66470b95bf15..c6e931eaca84 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,13 +1,10 @@ -from typing import TypeVar, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt from numpy._typing import _32Bit, _64Bit -T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... +def add[T1: npt.NBitBase, T2: npt.NBitBase](a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... # type: ignore[deprecated] i8: np.int64 i4: np.int32 diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi index feaccf28f578..e8ccc573d642 100644 --- a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -1,39 +1,36 @@ -from typing import Any, Protocol, TypeAlias, TypeVar, assert_type +from typing import Any, Protocol, assert_type import numpy as np from numpy._typing import _64Bit -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) +class CanAbs[T](Protocol): + def __abs__(self, /) -> T: ... -class CanAbs(Protocol[_T_co]): - def __abs__(self, /) -> _T_co: ... +class CanInvert[T](Protocol): + def __invert__(self, /) -> T: ... -class CanInvert(Protocol[_T_co]): - def __invert__(self, /) -> _T_co: ... +class CanNeg[T](Protocol): + def __neg__(self, /) -> T: ... -class CanNeg(Protocol[_T_co]): - def __neg__(self, /) -> _T_co: ... +class CanPos[T](Protocol): + def __pos__(self, /) -> T: ... -class CanPos(Protocol[_T_co]): - def __pos__(self, /) -> _T_co: ... +def do_abs[T](x: CanAbs[T]) -> T: ... +def do_invert[T](x: CanInvert[T]) -> T: ... +def do_neg[T](x: CanNeg[T]) -> T: ... +def do_pos[T](x: CanPos[T]) -> T: ... -def do_abs(x: CanAbs[_T]) -> _T: ... -def do_invert(x: CanInvert[_T]) -> _T: ... -def do_neg(x: CanNeg[_T]) -> _T: ... -def do_pos(x: CanPos[_T]) -> _T: ... - -_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] -_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] -_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] -_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] -_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] -_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] -_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] -_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] -_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] +type _Bool_1d = np.ndarray[tuple[int], np.dtype[np.bool]] +type _UInt8_1d = np.ndarray[tuple[int], np.dtype[np.uint8]] +type _Int16_1d = np.ndarray[tuple[int], np.dtype[np.int16]] +type _LongLong_1d = np.ndarray[tuple[int], np.dtype[np.longlong]] +type _Float32_1d = np.ndarray[tuple[int], np.dtype[np.float32]] +type _Float64_1d = np.ndarray[tuple[int], np.dtype[np.float64]] +type _LongDouble_1d = np.ndarray[tuple[int], np.dtype[np.longdouble]] +type _Complex64_1d = np.ndarray[tuple[int], np.dtype[np.complex64]] +type _Complex128_1d = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _CLongDouble_1d = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +type _Void_1d = np.ndarray[tuple[int], np.dtype[np.void]] b1_1d: _Bool_1d u1_1d: _UInt8_1d diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 0ce599a40310..6bbe057ff5b7 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy.typing as npt -_ArrayND: TypeAlias = npt.NDArray[np.int64] -_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] -_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] +type _ArrayND = npt.NDArray[np.int64] +type _Array2D = np.ndarray[tuple[int, int], np.dtype[np.int8]] +type _Array3D = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] _nd: _ArrayND _2d: _Array2D diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 4c4899ad6308..faba91273c91 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,30 +1,29 @@ from collections.abc import Sequence from decimal import Decimal -from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type +from typing import Any, Literal as L, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] -_Ar_f: TypeAlias = npt.NDArray[np.floating] -_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] -_Ar_O: TypeAlias = npt.NDArray[np.object_] +type _Ar_x = npt.NDArray[np.inexact | np.object_] +type _Ar_f = npt.NDArray[np.floating] +type _Ar_c = npt.NDArray[np.complexfloating] +type _Ar_O = npt.NDArray[np.object_] -_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] -_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _Ar_x_n = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +type _Ar_f_n = np.ndarray[tuple[int], np.dtype[np.floating]] +type _Ar_c_n = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _Ar_O_n = np.ndarray[tuple[int], np.dtype[np.object_]] -_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] -_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] -_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] -_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _Ar_x_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +type _Ar_f_2 = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +type _Ar_c_2 = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +type _Ar_O_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] -_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +type _Ar_1d[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -_BasisName: TypeAlias = L["X"] +type _BasisName = L["X"] SC_i: np.int_ SC_i_co: int | np.int_ diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index 07d6c9d1af65..9c5aff1117dc 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,20 +1,20 @@ from collections.abc import Sequence from decimal import Decimal from fractions import Fraction -from typing import Literal as L, TypeAlias, assert_type +from typing import Literal as L, assert_type import numpy as np import numpy.polynomial.polyutils as pu import numpy.typing as npt from numpy.polynomial._polytypes import _Tuple2 -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] -_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] -_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] -_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] +type _ArrFloat1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +type _ArrComplex1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +type _ArrObject1D_2 = np.ndarray[tuple[L[2]], np.dtype[np.object_]] num_int: int num_float: float diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index 3188ad9a1239..b87ba4fb2677 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,15 @@ from collections.abc import Sequence -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.polynomial as npp import numpy.typing as npt -_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] -_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] -_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] -_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] -_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] +type _ArrFloat1D = np.ndarray[tuple[int], np.dtype[np.floating]] +type _ArrFloat1D64 = np.ndarray[tuple[int], np.dtype[np.float64]] +type _ArrComplex1D = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +type _ArrComplex1D128 = np.ndarray[tuple[int], np.dtype[np.complex128]] +type _ArrObject1D = np.ndarray[tuple[int], np.dtype[np.object_]] AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index aacf217e4207..da66ab003078 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,10 +1,10 @@ import io -from typing import Any, TypeAlias, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt -_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] +type _RecArray = np.recarray[tuple[Any, ...], np.dtype[np.record]] AR_i8: npt.NDArray[np.int64] REC_AR_V: _RecArray diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index ab926baa7f15..c56c8e88092c 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,9 +1,7 @@ -from typing import Any, Literal, TypeAlias, assert_type +from typing import Any, Literal, assert_type import numpy as np -_1: TypeAlias = Literal[1] - b: np.bool u8: np.uint64 i8: np.int64 @@ -198,17 +196,17 @@ assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) assert_type(b.reshape(()), np.bool) assert_type(i8.reshape([]), np.int64) -assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) -assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) -assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) -assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) -assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) -assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type(b.reshape(1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[int, int, int], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[int, int, int, int], np.dtype[np.str_]]) assert_type( S.reshape(1, 1, 1, 1, 1), np.ndarray[ # len(shape) >= 5 - tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + tuple[int, int, int, int, int, *tuple[int, ...]], np.dtype[np.bytes_], ], ) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 18bd252d5ff9..166481d80922 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,11 +1,11 @@ -from typing import TypeAlias, assert_type +from typing import assert_type import numpy as np import numpy._typing as np_t import numpy.typing as npt -AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +type AR_T_alias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +type AR_TU_alias = AR_T_alias | npt.NDArray[np.str_] AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 34fbc5feeb41..583ca60f90a7 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -6,7 +6,7 @@ import unittest import warnings from collections.abc import Callable from pathlib import Path -from typing import Any, TypeVar, assert_type +from typing import Any, assert_type import numpy as np import numpy.typing as npt @@ -16,7 +16,6 @@ AR_i8: npt.NDArray[np.int64] bool_obj: bool suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -FT = TypeVar("FT", bound=Callable[..., Any]) def func() -> int: ... @@ -148,7 +147,7 @@ assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), class Test: ... -def decorate(a: FT) -> FT: +def decorate[FT: Callable[..., Any]](a: FT) -> FT: return a assert_type(np.testing.decorate_methods(Test, decorate), None) diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 30d4f408f1a9..8cafe729a943 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,15 +1,13 @@ -from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only +from typing import Any, assert_type, type_check_only import numpy as np import numpy.typing as npt -_ScalarT = TypeVar("_ScalarT", bound=np.generic) +type _1D = tuple[int] +type _2D = tuple[int, int] +type _ND = tuple[Any, ...] -_1D: TypeAlias = tuple[int] -_2D: TypeAlias = tuple[int, int] -_ND: TypeAlias = tuple[Any, ...] - -_Indices2D: TypeAlias = tuple[ +type _Indices2D = tuple[ np.ndarray[_1D, np.dtype[np.intp]], np.ndarray[_1D, np.dtype[np.intp]], ] @@ -33,7 +31,7 @@ _to_1d_f64: list[float] _to_1d_c128: list[complex] @type_check_only -def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +def func1[ScalarT: np.generic](ar: npt.NDArray[ScalarT], a: int) -> npt.NDArray[ScalarT]: ... @type_check_only def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... From 4fa8035e37bcc8e6c92ee69c0a97721cebec7411 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:06:20 +0100 Subject: [PATCH 0973/1718] MAINT: Remove p311 special-casing in ``spin stubtest`` --- .spin/cmds.py | 8 ++------ tools/stubtest/allowlist.txt | 8 ++++++++ tools/stubtest/allowlist_py311.txt | 2 -- tools/stubtest/allowlist_py312.txt | 6 ------ 4 files changed, 10 insertions(+), 14 deletions(-) delete mode 100644 tools/stubtest/allowlist_py311.txt delete mode 100644 tools/stubtest/allowlist_py312.txt diff --git a/.spin/cmds.py b/.spin/cmds.py index ea62717e4f78..e522108a5107 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -565,17 +565,13 @@ def stubtest(*, concise: bool, build_dir: str) -> None: stubtest_dir = curdir.parent / 'tools' / 'stubtest' mypy_config = stubtest_dir / 'mypy.ini' - allowlists = [stubtest_dir / 'allowlist.txt'] - if sys.version_info < (3, 12): - allowlists.append(stubtest_dir / 'allowlist_py311.txt') - else: - allowlists.append(stubtest_dir / 'allowlist_py312.txt') + allowlist = stubtest_dir / 'allowlist.txt' cmd = [ 'stubtest', '--ignore-disjoint-bases', f'--mypy-config-file={mypy_config}', - *(f'--allowlist={allowlist}' for allowlist in allowlists), + f'--allowlist={allowlist}', ] if concise: cmd.append('--concise') diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 30f4cd120cc9..eab246271dd1 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,3 +1,6 @@ +# TODO: remove once distutils is gone +numpy\.f2py\._backends\._distutils + # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes @@ -59,6 +62,11 @@ numpy\.core\.shape_base.* numpy\.core\.umath.* numpy\.typing\.mypy_plugin +# false positive "... is not a Union" errors +numpy\.typing\.ArrayLike +numpy\.typing\.DTypeLike +numpy\.typing\.NDArray + # ufuncs, see https://github.com/python/mypy/issues/20223 numpy\.(\w+\.)*abs numpy\.(\w+\.)*absolute diff --git a/tools/stubtest/allowlist_py311.txt b/tools/stubtest/allowlist_py311.txt deleted file mode 100644 index e6b2e364230e..000000000000 --- a/tools/stubtest/allowlist_py311.txt +++ /dev/null @@ -1,2 +0,0 @@ -# python == 3.11.* - diff --git a/tools/stubtest/allowlist_py312.txt b/tools/stubtest/allowlist_py312.txt deleted file mode 100644 index a0ee0edf2be2..000000000000 --- a/tools/stubtest/allowlist_py312.txt +++ /dev/null @@ -1,6 +0,0 @@ -# python >= 3.12 - -# false positive "... is not a Union" errors -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike - From 582922b56e99b64fd62424159e3da1bbfc8dd82a Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:11:14 +0100 Subject: [PATCH 0974/1718] MAINT: remove ``typing_extensions`` as test dependency --- .github/workflows/linux_blas.yml | 2 +- environment.yml | 5 ++--- requirements/test_requirements.txt | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index c4e2d55330c4..88c5322dd754 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -274,7 +274,7 @@ jobs: run: | # do not use test_requirements.txt, it includes coverage which requires # sqlite3, which is not available on OpenSUSE python - pip install --break-system-packages pytest pytest-xdist hypothesis typing_extensions pytest-timeout + pip install --break-system-packages pytest pytest-xdist hypothesis pytest-timeout spin test -j auto -- numpy/linalg --timeout=600 --durations=10 diff --git a/environment.yml b/environment.yml index 67b7cec26754..307dd4631012 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.12 # need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas @@ -24,9 +24,8 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - typing_extensions>=4.5.0 - mypy=1.19.0 - - orjson # makes mypy faster + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 - sphinx-copybutton diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 66537dafdade..2e2f679cee72 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -13,7 +13,6 @@ pytest-timeout # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml mypy==1.19.0; platform_python_implementation != "PyPy" -typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer tzdata From 75570dd993271ee90e2e8c4ee5b230a70aa369fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:28:02 +0100 Subject: [PATCH 0975/1718] STY: appease ruff --- doc/source/conf.py | 2 -- numpy/_pytesttester.py | 1 - numpy/tests/test_public_api.py | 7 +++---- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 302f1fd3731c..111a631d8ea1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -145,8 +145,6 @@ class PyTypeObject(ctypes.Structure): # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" -nitpick_ignore = [] - # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index cbb4191047ba..25f5300a74ac 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -123,7 +123,6 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import pytest module = sys.modules[self.module_name] diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 1e26cb18b60f..3ccba81ebaff 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -142,6 +142,7 @@ def test_NPY_NO_EXPORT(): "version", ]] + PUBLIC_ALIASED_MODULES = [ "numpy.char", "numpy.emath", @@ -191,6 +192,7 @@ def test_NPY_NO_EXPORT(): "testing.print_coercion_tables", ]] + def is_unexpected(name): """Check if this needs to be considered.""" return ( @@ -201,9 +203,6 @@ def is_unexpected(name): ) -SKIP_LIST = [] - - def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -214,7 +213,7 @@ def test_all_modules_are_expected(): for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, prefix=np.__name__ + '.', onerror=None): - if is_unexpected(modname) and modname not in SKIP_LIST: + if is_unexpected(modname): # We have a name that is new. If that's on purpose, add it to # PUBLIC_MODULES. We don't expect to have to add anything to # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! From 915c058a7bd211c5de04e43a61109983f600b9a5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 04:45:37 +0100 Subject: [PATCH 0976/1718] TYP: address new mypy test failures --- numpy/_core/fromnumeric.pyi | 41 +++++++++++-------- numpy/_typing/__init__.py | 4 +- .../tests/data/pass/array_constructors.py | 3 +- numpy/typing/tests/data/pass/array_like.py | 8 +--- numpy/typing/tests/data/pass/ma.py | 2 +- numpy/typing/tests/data/pass/ndarray_misc.py | 4 +- .../typing/tests/data/reveal/fromnumeric.pyi | 4 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 6 +-- numpy/typing/tests/data/reveal/numeric.pyi | 2 +- numpy/typing/tests/test_runtime.py | 35 +++++++--------- 10 files changed, 51 insertions(+), 58 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index b929328a9443..ce08335ac256 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -122,7 +122,13 @@ type _PyArray[_T] = list[_T] | tuple[_T, ...] # `int` also covers `bool` type _PyScalar = complex | bytes | str -type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _0D = tuple[()] +type _1D = tuple[int] +type _2D = tuple[int, int] +type _3D = tuple[int, int, int] +type _4D = tuple[int, int, int, int] + +type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] ### @@ -185,7 +191,7 @@ def reshape[ScalarT: np.generic]( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload # shape: ~ShapeT def reshape[ScalarT: np.generic, ShapeT: _Shape]( a: _ArrayLike[ScalarT], @@ -212,7 +218,7 @@ def reshape( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[tuple[int], np.dtype]: ... +) -> np.ndarray[_1D]: ... @overload # shape: ~ShapeT def reshape[ShapeT: _Shape]( a: ArrayLike, @@ -221,7 +227,7 @@ def reshape[ShapeT: _Shape]( order: _OrderACF = "C", *, copy: bool | None = None, -) -> np.ndarray[ShapeT, np.dtype]: ... +) -> np.ndarray[ShapeT]: ... @overload # shape: Sequence[index] def reshape( a: ArrayLike, @@ -267,7 +273,7 @@ def repeat[ScalarT: np.generic]( a: _ArrayLike[ScalarT], repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +) -> _Array1D[ScalarT]: ... @overload def repeat[ScalarT: np.generic]( a: _ArrayLike[ScalarT], @@ -279,7 +285,7 @@ def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None, -) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +) -> _Array1D[Any]: ... @overload def repeat( a: ArrayLike, @@ -465,19 +471,20 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# @overload def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload -def resize[ScalarT: np.generic, ShapeT: _Shape]( - a: _ArrayLike[ScalarT], new_shape: ShapeT -) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +def resize[ScalarT: np.generic, AnyShapeT: (_0D, _1D, _2D, _3D, _4D)]( + a: _ArrayLike[ScalarT], + new_shape: AnyShapeT, +) -> np.ndarray[AnyShapeT, np.dtype[ScalarT]]: ... @overload def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[_1D]: ... @overload -def resize[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT) -> np.ndarray[ShapeT, np.dtype]: ... +def resize[AnyShapeT: (_0D, _1D, _2D, _3D, _4D)](a: ArrayLike, new_shape: AnyShapeT) -> np.ndarray[AnyShapeT]: ... @overload def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @@ -550,9 +557,9 @@ def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1 @overload def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... # this prevents `Any` from being returned with Pyright @overload @@ -564,12 +571,12 @@ def shape(a: _PyScalar) -> tuple[()]: ... # `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are # subtypes of it, which would make the return types incompatible. @overload -def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +def shape(a: _PyArray[_PyScalar]) -> _1D: ... @overload -def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> _2D: ... # this overload will be skipped by typecheckers that don't support PEP 688 @overload -def shape(a: memoryview | bytearray) -> tuple[int]: ... +def shape(a: memoryview | bytearray) -> _1D: ... @overload def shape(a: ArrayLike) -> _AnyShape: ... diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 82479a3fcb08..d6b6105e79b8 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,7 +1,7 @@ """Private counterpart of ``numpy.typing``.""" from ._array_like import ( - ArrayLike, + ArrayLike as ArrayLike, NDArray as NDArray, _ArrayLike as _ArrayLike, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, @@ -84,7 +84,7 @@ # from ._dtype_like import ( - DTypeLike, + DTypeLike as DTypeLike, _DTypeLike as _DTypeLike, _DTypeLikeBool as _DTypeLikeBool, _DTypeLikeBytes as _DTypeLikeBytes, diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py index d91d257cb17c..27cbffa06a5c 100644 --- a/numpy/typing/tests/data/pass/array_constructors.py +++ b/numpy/typing/tests/data/pass/array_constructors.py @@ -1,7 +1,6 @@ from typing import Any import numpy as np -import numpy.typing as npt class Index: @@ -9,7 +8,7 @@ def __index__(self) -> int: return 0 -class SubClass(npt.NDArray[np.float64]): +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): pass diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index f922beae34ce..f1e09b03a4ec 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,11 +1,5 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - import numpy as np - -if TYPE_CHECKING: - from numpy._typing import ArrayLike, NDArray, _SupportsArray +from numpy._typing import ArrayLike, NDArray, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index b5d6b0d38ef0..72cbc5d9b98e 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -5,7 +5,7 @@ import numpy.typing as npt from numpy._typing import _Shape -type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] +type MaskedArray[ScalarT: np.generic] = np.ma.MaskedArray[_Shape, np.dtype[ScalarT]] # mypy: disable-error-code=no-untyped-call diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 40c84d8641bd..d50becb20ee4 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -15,8 +15,8 @@ import numpy.typing as npt -class SubClass(npt.NDArray[np.float64]): ... -class IntSubClass(npt.NDArray[np.intp]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... +class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... i4 = np.int32(1) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 58ea2c5f8732..26de2c2b6e37 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -5,7 +5,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class NDArraySubclass(npt.NDArray[np.complex128]): ... +class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] @@ -25,7 +25,7 @@ i8: np.int64 f: float # integer‑dtype subclass for argmin/argmax -class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... AR_sub_i: NDArrayIntSubclass assert_type(np.take(b, 0), np.bool) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 2972a58c328f..be0666f95fcb 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -16,7 +16,7 @@ from typing_extensions import CapsuleType import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.object_]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 i8: np.int64 @@ -62,12 +62,12 @@ assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) assert_type(f8.argsort(), npt.NDArray[np.intp]) assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 44192fd64331..7b3abc2d6761 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -10,7 +10,7 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -class SubClass(npt.NDArray[np.int64]): ... +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.int64]]): ... i8: np.int64 diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index 462fe4eabdc0..9db74c8ddc28 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -3,7 +3,8 @@ from typing import ( Any, NamedTuple, - Union, # pyright: ignore[reportDeprecated] + Self, + TypeAliasType, get_args, get_origin, get_type_hints, @@ -17,31 +18,23 @@ class TypeTup(NamedTuple): - typ: type - args: tuple[type, ...] - origin: type | None + typ: type # type expression + args: tuple[type, ...] # generic type parameters or arguments + origin: type | None # e.g. `UnionType` or `GenericAlias` + @classmethod + def from_type_alias(cls, alias: TypeAliasType, /) -> Self: + # PEP 695 `type _ = ...` aliases wrap the type expression as a + # `types.TypeAliasType` instance with a `__value__` attribute. + tp = alias.__value__ + return cls(typ=tp, args=get_args(tp), origin=get_origin(tp)) -def _flatten_type_alias(t: Any) -> Any: - # "flattens" a TypeAliasType to its underlying type alias - return getattr(t, "__value__", t) - - -NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) TYPES = { - "ArrayLike": TypeTup( - _flatten_type_alias(npt.ArrayLike), - _flatten_type_alias(npt.ArrayLike).__args__, - Union, - ), - "DTypeLike": TypeTup( - _flatten_type_alias(npt.DTypeLike), - _flatten_type_alias(npt.DTypeLike).__args__, - Union, - ), + "ArrayLike": TypeTup.from_type_alias(npt.ArrayLike), + "DTypeLike": TypeTup.from_type_alias(npt.DTypeLike), "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] - "NDArray": NDArrayTup, + "NDArray": TypeTup.from_type_alias(npt.NDArray), } From 18b128a149501397e771ea180212bd44bd060252 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 28 Nov 2025 17:22:13 +0100 Subject: [PATCH 0977/1718] TYP: fix mypy_primer errors in `timedelta64` --- numpy/__init__.pyi | 64 +++++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8ca91fd5b007..b8ede6c2a85a 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -165,6 +165,7 @@ from typing import ( TypedDict, final, overload, + override, type_check_only, ) @@ -174,7 +175,6 @@ from typing import ( # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite from typing_extensions import CapsuleType, TypeVar, deprecated -from typing import override from numpy import ( char, @@ -2636,7 +2636,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[bool_ | number | object_], /) -> complex: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2727,13 +2727,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex64DType], /) -> ndarray[ShapeT, dtypes.Float32DType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.Complex128DType], /) -> ndarray[ShapeT, dtypes.Float64DType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[ShapeT, dtypes.LongDoubleDType]: ... # @overload - # def __abs__(self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... + # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload def __abs__[ShapeT: _Shape, NBitT: NBitBase]( self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / @@ -2753,7 +2753,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __matmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3018,7 +3018,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] + def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3056,7 +3056,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... # type: ignore[overload-overlap] + def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -4630,7 +4630,7 @@ int_ = intp long = signedinteger[_NBitLong] longlong = signedinteger[_NBitLongLong] -class unsignedinteger(integer[_NBitT1]): +class unsignedinteger(integer[_NBitT]): def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... # arithmetic ops @@ -4892,7 +4892,7 @@ class inexact(number[_NBitT, _InexactItemT_co], Generic[_NBitT, _InexactItemT_co @abstractmethod def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... -class floating(_RealMixin, _RoundMixin, inexact[_NBitT1, float]): +class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... # arithmetic ops @@ -5076,7 +5076,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __add__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __add__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __add__(self, other: complex, /) -> float64 | complex128: ... @@ -5085,7 +5085,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __radd__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __radd__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __radd__(self, other: complex, /) -> float64 | complex128: ... @@ -5094,7 +5094,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __sub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __sub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __sub__(self, other: complex, /) -> float64 | complex128: ... @@ -5103,7 +5103,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rsub__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rsub__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rsub__(self, other: complex, /) -> float64 | complex128: ... @@ -5112,7 +5112,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __mul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __mul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __mul__(self, other: complex, /) -> float64 | complex128: ... @@ -5121,7 +5121,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rmul__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rmul__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rmul__(self, other: complex, /) -> float64 | complex128: ... @@ -5130,7 +5130,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __truediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __truediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __truediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5139,7 +5139,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] @overload - def __rtruediv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rtruediv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... @@ -5148,7 +5148,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __floordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __floordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __floordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5157,7 +5157,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... @overload - def __rfloordiv__(self, other: complexfloating[_NBitT1, _NBitT2], /) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rfloordiv__[NBitT: NBitBase](self, other: complexfloating[NBitT], /) -> complexfloating[NBitT | _64Bit]: ... @overload def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @@ -5166,9 +5166,7 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__( - self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / - ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __pow__[NBitT: NBitBase](self, other: complexfloating[NBitT], mod: None = None, /) -> complexfloating[NBitT | _64Bit]: ... @overload def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5177,9 +5175,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] @overload def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] @overload - def __rpow__( - self, other: complexfloating[_NBitT1, _NBitT2], mod: None = None, / - ) -> complexfloating[_NBitT1 | _64Bit, _NBitT2 | _64Bit]: ... + def __rpow__[NBitT: NBitBase]( + self, other: complexfloating[NBitT], mod: None = None, / + ) -> complexfloating[NBitT | _64Bit]: ... @overload def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @@ -5410,9 +5408,11 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... @overload - def __add__[DateOrTimeT: dt.date | dt.timedelta](self: timedelta64[dt.timedelta], x: DateOrTimeT, /) -> DateOrTimeT: ... + def __add__[AnyDateOrTimeT: (dt.datetime, dt.date, dt.timedelta)]( + self: timedelta64[dt.timedelta], x: AnyDateOrTimeT, / + ) -> AnyDateOrTimeT: ... @overload - def __add__[AnyItemT: (dt.timedelta, int, None)]( + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ @@ -5433,7 +5433,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload - def __sub__[AnyItemT: (dt.timedelta, int, None)]( + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... @@ -5450,7 +5450,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__[AnyItemT: (dt.timedelta, int, None)]( # type: ignore[misc] + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( # type: ignore[misc] self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / ) -> timedelta64[AnyItemT]: ... @overload @@ -5604,7 +5604,7 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... @overload - def __new__[AnyItemT: (dt.date, dt.datetime, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... + def __new__[AnyItemT: (dt.datetime, dt.date, None)](cls, value: AnyItemT, /) -> datetime64[AnyItemT]: ... @overload def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec[_TD64Unit] = ..., /) -> datetime64[None]: ... @overload From 288bc507f14166eac3a47ba157c3f5a5d84c6c30 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 17:28:20 +0100 Subject: [PATCH 0978/1718] MAINT: remove unused stubtest allowlist entry --- tools/stubtest/allowlist.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index eab246271dd1..58110d66046e 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -1,6 +1,3 @@ -# TODO: remove once distutils is gone -numpy\.f2py\._backends\._distutils - # intentional type-check-only deviations from runtime numpy\._typing.* numpy\.polynomial\._polytypes From 94845770d2db2296b4bc7c674f184a0fe1e25582 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 18:39:08 +0100 Subject: [PATCH 0979/1718] STY: re-enable ruff/pyupgrade --- ruff.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ruff.toml b/ruff.toml index c02a90e610e6..ebbb29283622 100644 --- a/ruff.toml +++ b/ruff.toml @@ -36,8 +36,7 @@ extend-select = [ "W", # pycodestyle/warning "PGH", # pygrep-hooks "PLE", # pylint/error - # TODO: re-enable after merging https://github.com/numpy/numpy/pull/30319 - # "UP", # pyupgrade + "UP", # pyupgrade ] ignore = [ # flake8-bugbear From dcf055eb7c6cdbb5a93f4a4ec6071c22ffc3eb29 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 2 Dec 2025 18:51:42 +0100 Subject: [PATCH 0980/1718] MAINT: post-rebase correction --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 111a631d8ea1..238a7d11f8a4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -608,7 +608,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore += [ +nitpick_ignore = [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), From b688a081dbcc35fe759dd49e609264425f2763fd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 4 Dec 2025 15:28:35 +0100 Subject: [PATCH 0981/1718] DOC, TST, TYP: update `NDArray` doctest --- numpy/_typing/_add_docstring.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 5330a6b3b715..883b890a1a16 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + NDArray >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + NDArray[numpy.float64] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) From 688cd5c17690faddbb39252e137d46e37c5ab30e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 5 Dec 2025 11:08:52 +0100 Subject: [PATCH 0982/1718] TYP: post-rebase corrections --- numpy/lib/_type_check_impl.pyi | 1 - numpy/lib/_user_array_impl.pyi | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index b75f010d3550..dbef0ca87280 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -137,7 +137,6 @@ def nan_to_num( # NOTE: The [overload-overlap] mypy error is a false positive @overload def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... -def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... @overload diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 661dc97f224c..4a6dfffbea92 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -21,10 +21,10 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=T type _ArrayInt_co = npt.NDArray[np.integer | np.bool] -type _BoolContainer = container[Any, np.dtype[np.bool]] -type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] -type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] -type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] +type _BoolContainer = container[Any, np.dtype[np.bool]] # type: ignore[deprecated] +type _IntegralContainer = container[Any, np.dtype[np.bool | np.integer | np.object_]] # type: ignore[deprecated] +type _RealContainer = container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]] # type: ignore[deprecated] +type _NumericContainer = container[Any, np.dtype[np.number | np.timedelta64 | np.object_]] # type: ignore[deprecated] type _ToIndexSlice = slice | EllipsisType | _ArrayInt_co | None type _ToIndexSlices = _ToIndexSlice | tuple[_ToIndexSlice, ...] From f6d4862ffc9ff8f1992199e52b3ee1e427f43b75 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 11 Nov 2025 00:45:51 -0500 Subject: [PATCH 0983/1718] BUG: Change return type of sign(timedelta64) to float64. Closes gh-8463. Closes gh-29496. --- numpy/_core/code_generators/generate_umath.py | 6 ++-- numpy/_core/src/umath/loops.c.src | 7 +++- numpy/_core/src/umath/ufunc_type_resolution.c | 25 ++++++++++++++ numpy/_core/src/umath/ufunc_type_resolution.h | 7 ++++ numpy/_core/tests/test_datetime.py | 34 +++++++++++-------- 5 files changed, 61 insertions(+), 18 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index f5d8530bbc58..4b8f74bd52b9 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -526,8 +526,10 @@ def english_upper(s): 'sign': Ufunc(1, 1, None, docstrings.get('numpy._core.umath.sign'), - 'PyUFunc_SimpleUniformOperationTypeResolver', - TD(nobool_or_datetime, dispatch=[('loops_autovec', ints)]), + 'PyUFunc_SignTypeResolver', + TD(ints + flts, dispatch=[('loops_autovec', ints)]), + TD(timedeltaonly, out='d'), + TD(cmplx + O), ), 'greater': Ufunc(2, 1, None, diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 3928d2a0d0c4..1ad9cab4666e 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -688,7 +688,12 @@ TIMEDELTA_sign(char **args, npy_intp const *dimensions, npy_intp const *steps, v { UNARY_LOOP { const npy_timedelta in1 = *(npy_timedelta *)ip1; - *((npy_timedelta *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + if (in1 == NPY_DATETIME_NAT) { + *((npy_double *)op1) = NPY_NAN; + } + else { + *((npy_double *)op1) = in1 > 0 ? 1.0 : (in1 < 0 ? -1.0 : 0.0); + } } } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index f5a203719b54..4562071d782e 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -429,6 +429,31 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc, return ret; } +/* + * This function applies special type resolution rules for the 'sign' ufunc. + * 'sign' converts timedelta64 to float64, so isn't covered by the simple + * unary type resolution. + * + * Returns 0 on success, -1 on error. + */ +NPY_NO_EXPORT int +PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes) +{ + if (PyArray_DESCR(operands[0])->type_num == NPY_TIMEDELTA) { + out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + out_dtypes[1] = PyArray_DescrFromType(NPY_DOUBLE); + return 0; + } + else { + return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting, + operands, type_tup, out_dtypes); + } +} + /* * The ones_like function shouldn't really be a ufunc, but while it diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 9e812e97d6fe..531e9267afa2 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -19,6 +19,13 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc, PyObject *type_tup, PyArray_Descr **out_dtypes); +NPY_NO_EXPORT int +PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + NPY_NO_EXPORT int PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c7b11149ed43..df8d6d48d387 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1080,21 +1080,18 @@ def test_datetime_like(self): assert_equal(np.zeros_like(b).dtype, b.dtype) assert_equal(np.empty_like(b).dtype, b.dtype) - def test_datetime_unary(self): - for tda, tdb, tdzero, tdone, tdmone in \ + def test_timedelta64_unary(self): + for tda, tdb, tdzero in \ [ # One-dimensional arrays (np.array([3], dtype='m8[D]'), np.array([-3], dtype='m8[D]'), - np.array([0], dtype='m8[D]'), - np.array([1], dtype='m8[D]'), - np.array([-1], dtype='m8[D]')), + np.array([0], dtype='m8[D]')), # NumPy scalars (np.timedelta64(3, '[D]'), np.timedelta64(-3, '[D]'), - np.timedelta64(0, '[D]'), - np.timedelta64(1, '[D]'), - np.timedelta64(-1, '[D]'))]: + np.timedelta64(0, '[D]')), + ]: # negative ufunc assert_equal(-tdb, tda) assert_equal((-tdb).dtype, tda.dtype) @@ -1112,13 +1109,20 @@ def test_datetime_unary(self): assert_equal(np.absolute(tdb).dtype, tda.dtype) # sign ufunc - assert_equal(np.sign(tda), tdone) - assert_equal(np.sign(tdb), tdmone) - assert_equal(np.sign(tdzero), tdzero) - assert_equal(np.sign(tda).dtype, tda.dtype) - - # The ufuncs always produce native-endian results - assert_ + assert_equal(np.sign(tda), np.ones_like(tda, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdb), -np.ones_like(tdb, dtype=np.float64), + strict=True) + assert_equal(np.sign(tdzero), np.zeros_like(tdzero, dtype=np.float64), + strict=True) + + def test_timedelta64_sign_nat(self): + x = np.array([np.timedelta64(-123, 's'), + np.timedelta64(0, 's'), + np.timedelta64(88, 's'), + np.timedelta64('NaT', 's')]) + s = np.sign(x) + assert_equal(s, np.array([-1.0, 0.0, 1.0, np.nan]), strict=True) def test_datetime_add(self): for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ From d8c4393e512a69a557dba22a79fa8733a7bb57dd Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 5 Dec 2025 21:28:50 -0500 Subject: [PATCH 0984/1718] MAINT: Add missing checks for failure of NPY_DT_CALL_ensure_canonical() --- numpy/_core/src/umath/ufunc_type_resolution.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 4562071d782e..eaea560e9b98 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -445,6 +445,9 @@ PyUFunc_SignTypeResolver(PyUFuncObject *ufunc, { if (PyArray_DESCR(operands[0])->type_num == NPY_TIMEDELTA) { out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_DOUBLE); return 0; } @@ -609,6 +612,9 @@ PyUFunc_SimpleUniformOperationTypeResolver( descr = PyArray_DESCR(operands[0]); } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(descr); + if (out_dtypes[0] == NULL) { + return -1; + } } /* All types are the same - copy the first one to the rest */ @@ -675,6 +681,9 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; @@ -694,6 +703,9 @@ PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc, } out_dtypes[0] = NPY_DT_CALL_ensure_canonical(PyArray_DESCR(operands[0])); + if (out_dtypes[0] == NULL) { + return -1; + } out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL); return 0; From f1904d5fd4629dcbf60b3d5c314e3d1b3c4bcb46 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sat, 6 Dec 2025 08:56:29 -0500 Subject: [PATCH 0985/1718] TST: Add a test of sign(nat) for scalar nat. --- numpy/_core/tests/test_datetime.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index df8d6d48d387..bd809b83d24c 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1124,6 +1124,10 @@ def test_timedelta64_sign_nat(self): s = np.sign(x) assert_equal(s, np.array([-1.0, 0.0, 1.0, np.nan]), strict=True) + def test_timedelta64_sign_nat_scalar(self): + nat = np.timedelta64('nat', 'm') + assert_equal(np.sign(nat), np.nan) + def test_datetime_add(self): for dta, dtb, dtc, dtnat, tda, tdb, tdc in \ [ From 60bf05c38a1059cef0d897a83c626bad702ae1ba Mon Sep 17 00:00:00 2001 From: Karan Singh Date: Mon, 8 Dec 2025 01:50:06 +0530 Subject: [PATCH 0986/1718] DOC: Clarify `np.negative` * DOC: Improve examples for np.negative and np.positive * DOC: Improve examples for np.negative * DOC: clarify unary examples [skip actions][skip cirrus] --- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index d4707da906b2..fe07047a1758 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -3174,7 +3174,7 @@ def add_newdoc(place, name, doc): add_newdoc('numpy._core.umath', 'negative', """ - Numerical negative, element-wise. + Numerical negation, element-wise. Parameters ---------- From 2fd0b59716e55038977a522cb6c50126afa80737 Mon Sep 17 00:00:00 2001 From: star1327p Date: Sun, 7 Dec 2025 15:40:30 -0800 Subject: [PATCH 0987/1718] DOC: Correct wording usage such as a/an --- doc/neps/roadmap.rst | 2 +- doc/source/reference/routines.ma.rst | 2 +- doc/source/user/basics.interoperability.rst | 6 +++--- numpy/_core/einsumfunc.py | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 4 ++-- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/umath/matmul.c.src | 2 +- numpy/_core/tests/examples/cython/checks.pyx | 2 +- numpy/_core/tests/test_memmap.py | 2 +- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/_iotools.py | 2 +- numpy/lib/recfunctions.py | 8 ++++---- numpy/linalg/_linalg.py | 2 +- numpy/ma/core.py | 12 ++++++------ numpy/typing/__init__.py | 2 +- 15 files changed, 26 insertions(+), 26 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 01cd21158be0..828b173ff21b 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -214,7 +214,7 @@ Maintenance - ``numpy.ma`` is still in poor shape and under-maintained. It needs to be improved, ideas include: - - Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project? + - Rewrite masked arrays to not be an ndarray subclass -- maybe in a separate project? - MaskedArray as a duck-array type, and/or - dtypes that support missing values diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 2b1b5dac1710..0004f2264976 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -247,7 +247,7 @@ Conversion operations ma.masked_where -> to a ndarray +> to an ndarray ~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index b1c115ff1de0..e001244e67b4 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -216,7 +216,7 @@ The ``__array_ufunc__`` protocol A :ref:`universal function (or ufunc for short) ` is a “vectorized” wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. The output of the ufunc (and -its methods) is not necessarily a ndarray, if not all input arguments are +its methods) is not necessarily an ndarray, if not all input arguments are ndarrays. Indeed, if any input defines an ``__array_ufunc__`` method, control will be passed completely to that function, i.e., the ufunc is overridden. The ``__array_ufunc__`` method defined on that (non-ndarray) object has access to @@ -286,10 +286,10 @@ Consider the following: >>> type(ser) pandas.core.series.Series -Now, ``ser`` is **not** a ndarray, but because it +Now, ``ser`` is **not** an ndarray, but because it `implements the __array_ufunc__ protocol `__, -we can apply ufuncs to it as if it were a ndarray: +we can apply ufuncs to it as if it were an ndarray: >>> np.exp(ser) 0 2.718282 diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 9461994f5795..3a04b02b9c93 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -401,7 +401,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): if result is not None: known_contractions.append(result) - # If we do not have a inner contraction, rescan pairs + # If we do not have an inner contraction, rescan pairs # including outer products if len(known_contractions) == 0: diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index dbab8b4253d8..9bc9efcc15ee 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -1817,7 +1817,7 @@ PyArray_Zero(PyArrayObject *arr) /* XXX this is dangerous, the caller probably is not aware that zeroval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ memcpy(zeroval, &npy_static_pydata.zero_obj, sizeof(PyObject *)); @@ -1856,7 +1856,7 @@ PyArray_One(PyArrayObject *arr) /* XXX this is dangerous, the caller probably is not aware that oneval is actually a static PyObject* In the best case they will only use it as-is, but - if they simply memcpy it into a ndarray without using + if they simply memcpy it into an ndarray without using setitem(), refcount errors will occur */ memcpy(oneval, &npy_static_pydata.one_obj, sizeof(PyObject *)); diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 3a43e1bd983b..bee865b494c7 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1331,7 +1331,7 @@ _array_from_buffer_3118(PyObject *memoryview) PyExc_RuntimeError, "For the given ctypes object, neither the item size " "computed from the PEP 3118 buffer format nor from " - "converting the type to a np.dtype matched the actual " + "converting the type to an np.dtype matched the actual " "size. This is a bug both in python and numpy"); Py_DECREF(descr); return NULL; diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 11e014acec7f..95d23995e630 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -517,7 +517,7 @@ NPY_NO_EXPORT void void *ip1=args[0], *ip2=args[1], *op=args[2]; #if @USEBLAS@ && defined(HAVE_CBLAS) /* - * TODO: refactor this out to a inner_loop_selector, in + * TODO: refactor this out to an inner_loop_selector, in * PyUFunc_MatmulLoopSelector. But that call does not have access to * n, m, p and strides. */ diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index f0f427d2167f..bfb2de95fa59 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -142,7 +142,7 @@ def get_dtype_flags(cnp.dtype dtype): cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): - """A function to create a NpyIter struct from a nditer object. + """A function to create a NpyIter struct from an nditer object. This function is only meant for testing purposes and only extracts the necessary info from nditer to test the functionality of NpyIter methods diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 8e2aa0a507b1..18ef2aad4d78 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -205,7 +205,7 @@ class MemmapSubClass(memmap): fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the - # ufunc and __getitem__ output is never turned into a ndarray + # ufunc and __getitem__ output is never turned into an ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 87a834850fda..70634aa7e795 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -81,7 +81,7 @@ # When the sample contains exactly the percentile wanted, the virtual_index is # an integer to the index of this element. # When the percentile wanted is in between two elements, the virtual_index -# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# is made of an integer part (a.k.a 'i' or 'left') and a fractional part # (a.k.a 'g' or 'gamma') # # Each method in _QuantileMethods has two properties diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 3586b41de86c..959a59e7480a 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -607,7 +607,7 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, self.default = default or False dtype = np.dtype('bool') else: - # Is the input a np.dtype ? + # Is the input an np.dtype ? try: self.func = None dtype = np.dtype(dtype_or_func) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index c8a6dd818e96..6b044ca07d61 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -326,7 +326,7 @@ def _izip_records(seqarrays, fill_value=None, flatten=True): def _fix_output(output, usemask=True, asrecarray=False): """ - Private function: return a recarray, a ndarray, a MaskedArray + Private function: return a recarray, an ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ if not isinstance(output, ma.MaskedArray): @@ -1334,7 +1334,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1514,7 +1514,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is - `asrecarray==True`) or a ndarray. + `asrecarray==True`) or an ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. @@ -1667,7 +1667,7 @@ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None): """ Join arrays `r1` and `r2` on keys. - Alternative to join_by, that always returns a np.recarray. + Alternative to join_by, that always returns an np.recarray. See Also -------- diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6884c9b7ef8d..cfd0800aec0a 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3004,7 +3004,7 @@ def _multi_dot_three(A, B, C, out=None): def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ - Return a np.array that encodes the optimal order of multiplications. + Return an np.array that encodes the optimal order of multiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e0ead317d9f6..e107b2c6ab7e 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -710,12 +710,12 @@ def getdata(a, subok=True): Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, - else return `a` as a ndarray or subclass (depending on `subok`) if not. + else return `a` as an ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like - Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + Input ``MaskedArray``, alternatively an ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). @@ -3152,7 +3152,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: - # Take the domain, and make sure it's a ndarray + # Take the domain, and make sure it's an ndarray with np.errstate(divide='ignore', invalid='ignore'): # The result may be masked for two (unary) domains. # That can't really be right as some domains drop @@ -3552,7 +3552,7 @@ def __setmask__(self, mask, copy=False): mask = mask.astype(mdtype) # Mask is a sequence else: - # Make sure the new mask is a ndarray with the proper dtype + # Make sure the new mask is an ndarray with the proper dtype try: copy = None if not copy else True mask = np.array(mask, copy=copy, dtype=mdtype) @@ -3597,7 +3597,7 @@ def mask(self, value): def recordmask(self): """ Get or set the mask of the array if it has no named fields. For - structured arrays, returns a ndarray of booleans where entries are + structured arrays, returns an ndarray of booleans where entries are ``True`` if **all** the fields are masked, ``False`` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], @@ -6553,7 +6553,7 @@ def __new__(self, data, mask=nomask, dtype=None, fill_value=None, @property def _data(self): - # Make sure that the _data part is a np.void + # Make sure that the _data part is an np.void return super()._data[()] def __getitem__(self, indx): diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index ef4c0885257b..1a90f9e0c212 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -93,7 +93,7 @@ Number precision ~~~~~~~~~~~~~~~~ -The precision of `numpy.number` subclasses is treated as a invariant generic +The precision of `numpy.number` subclasses is treated as an invariant generic parameter (see :class:`~NBitBase`), simplifying the annotating of processes involving precision-based casting. From bab2cb21ee08c0fd3ac8ab79b24629885b88bfe5 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 8 Dec 2025 03:46:14 -0500 Subject: [PATCH 0988/1718] MAINT: Use RAII objects in unique.cpp to ensure safe resource management. (#30329) Use RAII scoped locking for string locking and GIL release in unique and fix a few smaller things. --- * BENCH: Add UniqueIntegers benchmark. * Remove incorrect call of Py_DECREF(res_obj). * Don't use std::invoke. * Simplify code in the unique_* functions--no need for a lambda. * BUG: Add missing Py_DECREF(arr) in array__unique_hash(). * BUG: Add missing Py_XDECREF(arr) if npy_parse_args() fails in array__unique_hash(). Unlikely to be needed, but might as well be thorough. --- benchmarks/benchmarks/bench_lib.py | 47 ++++ numpy/_core/src/common/raii_utils.hpp | 9 +- numpy/_core/src/multiarray/unique.cpp | 373 +++++++++++--------------- 3 files changed, 217 insertions(+), 212 deletions(-) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index 33e2871fc727..11d454ae41bf 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -2,6 +2,8 @@ import string +from asv_runner.benchmarks.mark import SkipNotImplemented + import numpy as np from .common import Benchmark @@ -189,6 +191,51 @@ def time_unique_all(self, array_size, percent_nans, return_inverse=True, return_counts=True) +class UniqueIntegers(Benchmark): + """Benchmark for np.unique with integer dtypes.""" + + param_names = ["array_size", "num_unique_values", "dtype"] + params = [ + # sizes of the 1D arrays + [200, 100000, 1000000], + # number of unique values in arrays + [25, 125, 5000, 50000, 250000], + # dtypes of the arrays + [np.uint8, np.int16, np.uint32, np.int64], + ] + + def setup(self, array_size, num_unique_values, dtype): + unique_array = np.arange(num_unique_values, dtype=dtype) + base_array = np.resize(unique_array, array_size) + rng = np.random.default_rng(121263137472525314065) + rng.shuffle(base_array) + self.arr = base_array + + def time_unique_values(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=False) + + def time_unique_counts(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=False, return_counts=True,) + + def time_unique_inverse(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=False, + return_inverse=True, return_counts=False) + + def time_unique_all(self, array_size, num_unique_values, dtype): + if num_unique_values >= np.iinfo(dtype).max or num_unique_values > array_size: + raise SkipNotImplemented("skipped") + np.unique(self.arr, return_index=True, + return_inverse=True, return_counts=True) + + class Isin(Benchmark): """Benchmarks for `numpy.isin`.""" diff --git a/numpy/_core/src/common/raii_utils.hpp b/numpy/_core/src/common/raii_utils.hpp index e92d0eae9269..1049e97387f0 100644 --- a/numpy/_core/src/common/raii_utils.hpp +++ b/numpy/_core/src/common/raii_utils.hpp @@ -126,9 +126,11 @@ class SaveThreadState // // Instead of // +// Py_INCREF(descr); // npy_string_allocator *allocator = NpyString_acquire_allocator(descr); // [code that uses allocator] // NpyString_release_allocator(allocator); +// Py_DECREF(descr); // // use // @@ -139,16 +141,19 @@ class SaveThreadState // class NpyStringAcquireAllocator { + PyArray_StringDTypeObject *_descr; npy_string_allocator *_allocator; public: - NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) { - _allocator = NpyString_acquire_allocator(descr); + NpyStringAcquireAllocator(PyArray_StringDTypeObject *descr) : _descr(descr) { + Py_INCREF(_descr); + _allocator = NpyString_acquire_allocator(_descr); } ~NpyStringAcquireAllocator() { NpyString_release_allocator(_allocator); + Py_DECREF(_descr); } NpyStringAcquireAllocator(const NpyStringAcquireAllocator&) = delete; diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 77083c04c519..8fc5b580961e 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -1,13 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -#define HASH_TABLE_INITIAL_BUCKETS 1024 #include #include #include #include #include +#include #include #include #include @@ -15,6 +15,7 @@ #include #include "numpy/arrayobject.h" #include "gil_utils.h" +#include "raii_utils.hpp" extern "C" { #include "fnv.h" #include "npy_argparse.h" @@ -22,18 +23,42 @@ extern "C" { #include "numpy/halffloat.h" } -// This is to use RAII pattern to handle cpp exceptions while avoiding memory leaks. -// Adapted from https://stackoverflow.com/a/25510879/2536294 -template -struct FinalAction { - FinalAction(F f) : clean_{f} {} - ~FinalAction() { clean_(); } - private: - F clean_; -}; -template -FinalAction finally(F f) { - return FinalAction(f); +// HASH_TABLE_INITIAL_BUCKETS is the reserve hashset capacity used in the +// std::unordered_set instances in the various unique_* functions. +// We use min(input_size, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket +// count: +// - Reserving for all elements (isize) may over-allocate when there are few +// unique values. +// - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps +// memory usage reasonable (4 KiB for pointers). +// See https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 +const npy_intp HASH_TABLE_INITIAL_BUCKETS = 1024; + +// +// Create a 1-d array with the given length that has the same +// dtype as the input `arr`. +// +static inline PyArrayObject * +empty_array_like(PyArrayObject *arr, npy_intp length) +{ + PyArray_Descr *descr = PyArray_DESCR(arr); + Py_INCREF(descr); + + // Create the output array. + PyArrayObject *res_obj = + reinterpret_cast( + PyArray_NewFromDescr( + &PyArray_Type, + descr, + 1, // ndim + &length, // shape + NULL, // strides + NULL, // data + NPY_ARRAY_WRITEABLE, // flags + NULL // obj + ) + ); + return res_obj; } template @@ -183,19 +208,10 @@ static PyObject* unique_numeric(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of numeric (integer or complex). - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of numeric (integer or complex). + * This function uses hashing to identify uniqueness efficiently. + */ auto hash = [equal_nan](const T *value) -> size_t { return hash_func(value, equal_nan); @@ -204,55 +220,38 @@ unique_numeric(PyArrayObject *self, npy_bool equal_nan) return equal_func(lhs, rhs, equal_nan); }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + { + np::raii::SaveThreadState save_thread_state{}; + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - copy_func(odata, *it); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + copy_func(odata, *it); + } } - return res_obj; + return reinterpret_cast(res_obj); } template @@ -260,23 +259,16 @@ static PyObject* unique_string(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array of fixed size strings. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input + * array of fixed size strings. + * This function uses hashing to identify uniqueness efficiently. + */ + PyArray_Descr *descr = PyArray_DESCR(self); // variables for the string npy_intp itemsize = descr->elsize; npy_intp num_chars = itemsize / sizeof(T); + auto hash = [num_chars](const T *value) -> size_t { return npy_fnv1a(value, num_chars * sizeof(T)); }; @@ -284,77 +276,48 @@ unique_string(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs, rhs, itemsize) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - for (npy_intp i = 0; i < isize; i++, idata += istride) { - hashset.insert((T *)idata); - } + using set_type = std::unordered_set; + + // number of elements in the input array + npy_intp isize = PyArray_SIZE(self); + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + { + np::raii::SaveThreadState save_thread_state{}; + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + for (npy_intp i = 0; i < isize; i++, idata += istride) { + hashset.insert(reinterpret_cast(idata)); + } + } + + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - NPY_DISABLE_C_API; - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - std::memcpy(odata, *it, itemsize); + + { + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + std::memcpy(odata, *it, itemsize); + } } - return res_obj; + return reinterpret_cast(res_obj); } static PyObject* unique_vstring(PyArrayObject *self, npy_bool equal_nan) { /* - * Returns a new NumPy array containing the unique values of the input array. - * This function uses hashing to identify uniqueness efficiently. - */ - NPY_ALLOW_C_API_DEF; - NPY_ALLOW_C_API; - PyArray_Descr *descr = PyArray_DESCR(self); - Py_INCREF(descr); - NPY_DISABLE_C_API; - - PyThreadState *_save1 = PyEval_SaveThread(); - - // number of elements in the input array - npy_intp isize = PyArray_SIZE(self); + * Returns a new NumPy array containing the unique values of the input array. + * This function uses hashing to identify uniqueness efficiently. + */ - // variables for the vstring - npy_string_allocator *in_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)descr); auto hash = [equal_nan](const npy_static_string *value) -> size_t { if (value->buf == NULL) { if (equal_nan) { @@ -382,83 +345,70 @@ unique_vstring(PyArrayObject *self, npy_bool equal_nan) return std::memcmp(lhs->buf, rhs->buf, lhs->size) == 0; }; - // Reserve hashset capacity in advance to minimize reallocations and collisions. - // We use min(isize, HASH_TABLE_INITIAL_BUCKETS) as the initial bucket count: - // - Reserving for all elements (isize) may over-allocate when there are few unique values. - // - Using a moderate upper bound HASH_TABLE_INITIAL_BUCKETS(1024) keeps memory usage reasonable (4 KiB for pointers). - // See discussion: https://github.com/numpy/numpy/pull/28767#discussion_r2064267631 - std::unordered_set hashset( - std::min(isize, (npy_intp)HASH_TABLE_INITIAL_BUCKETS), hash, equal - ); - - // Input array is one-dimensional, enabling efficient iteration using strides. - char *idata = PyArray_BYTES(self); - npy_intp istride = PyArray_STRIDES(self)[0]; - // unpacked_strings need to be allocated outside of the loop because of the lifetime problem. + npy_intp isize = PyArray_SIZE(self); + // unpacked_strings must live as long as hashset because hashset points + // to values in this vector. std::vector unpacked_strings(isize, {0, NULL}); - for (npy_intp i = 0; i < isize; i++, idata += istride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)idata; - int is_null = NpyString_load(in_allocator, packed_string, &unpacked_strings[i]); - if (is_null == -1) { - npy_gil_error(PyExc_RuntimeError, - "Failed to load string from packed static string. "); - return NULL; + + using set_type = std::unordered_set; + set_type hashset(std::min(isize, HASH_TABLE_INITIAL_BUCKETS), hash, equal); + + { + PyArray_StringDTypeObject *descr = + reinterpret_cast(PyArray_DESCR(self)); + np::raii::NpyStringAcquireAllocator alloc(descr); + np::raii::SaveThreadState save_thread_state{}; + + char *idata = PyArray_BYTES(self); + npy_intp istride = PyArray_STRIDES(self)[0]; + + for (npy_intp i = 0; i < isize; i++, idata += istride) { + npy_packed_static_string *packed_string = + reinterpret_cast(idata); + int is_null = NpyString_load(alloc.allocator(), packed_string, + &unpacked_strings[i]); + if (is_null == -1) { + // Unexpected error. Throw a C++ exception that will be caught + // by the caller of unique_vstring() and converted into a Python + // RuntimeError. + throw std::runtime_error("Failed to load string from packed " + "static string."); + } + hashset.insert(&unpacked_strings[i]); } - hashset.insert(&unpacked_strings[i]); } - NpyString_release_allocator(in_allocator); - - npy_intp length = hashset.size(); - - PyEval_RestoreThread(_save1); - NPY_ALLOW_C_API; - PyObject *res_obj = PyArray_NewFromDescr( - &PyArray_Type, - descr, - 1, // ndim - &length, // shape - NULL, // strides - NULL, // data - // This flag is needed to be able to call .sort on it. - NPY_ARRAY_WRITEABLE, // flags - NULL // obj - ); + PyArrayObject *res_obj = empty_array_like(self, hashset.size()); if (res_obj == NULL) { return NULL; } - PyArray_Descr *res_descr = PyArray_DESCR((PyArrayObject *)res_obj); - Py_INCREF(res_descr); - NPY_DISABLE_C_API; - - PyThreadState *_save2 = PyEval_SaveThread(); - auto save2_dealloc = finally([&]() { - PyEval_RestoreThread(_save2); - }); - - npy_string_allocator *out_allocator = NpyString_acquire_allocator((PyArray_StringDTypeObject *)res_descr); - auto out_allocator_dealloc = finally([&]() { - NpyString_release_allocator(out_allocator); - }); - - char *odata = PyArray_BYTES((PyArrayObject *)res_obj); - npy_intp ostride = PyArray_STRIDES((PyArrayObject *)res_obj)[0]; - // Output array is one-dimensional, enabling efficient iteration using strides. - for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { - npy_packed_static_string *packed_string = (npy_packed_static_string *)odata; - int pack_status = 0; - if ((*it)->buf == NULL) { - pack_status = NpyString_pack_null(out_allocator, packed_string); - } else { - pack_status = NpyString_pack(out_allocator, packed_string, (*it)->buf, (*it)->size); - } - if (pack_status == -1) { - // string packing failed - return NULL; + + { + PyArray_StringDTypeObject *res_descr = + reinterpret_cast(PyArray_DESCR(res_obj)); + np::raii::NpyStringAcquireAllocator alloc(res_descr); + np::raii::SaveThreadState save_thread_state{}; + + char *odata = PyArray_BYTES(res_obj); + npy_intp ostride = PyArray_STRIDES(res_obj)[0]; + for (auto it = hashset.begin(); it != hashset.end(); it++, odata += ostride) { + npy_packed_static_string *packed_string = + reinterpret_cast(odata); + int pack_status = 0; + if ((*it)->buf == NULL) { + pack_status = NpyString_pack_null(alloc.allocator(), packed_string); + } else { + pack_status = NpyString_pack(alloc.allocator(), packed_string, + (*it)->buf, (*it)->size); + } + if (pack_status == -1) { + // string packing failed + return NULL; + } } } - - return res_obj; + return reinterpret_cast(res_obj); } @@ -549,24 +499,27 @@ array__unique_hash(PyObject *NPY_UNUSED(module), NULL, NULL, NULL ) < 0 ) { + Py_XDECREF(arr); return NULL; } + PyObject *result = NULL; try { auto type = PyArray_TYPE(arr); // we only support data types present in our unique_funcs map if (unique_funcs.find(type) == unique_funcs.end()) { Py_RETURN_NOTIMPLEMENTED; } - - return unique_funcs[type](arr, equal_nan); + result = unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); - return NULL; + result = NULL; } catch (const std::exception &e) { PyErr_SetString(PyExc_RuntimeError, e.what()); - return NULL; + result = NULL; } + Py_DECREF(arr); + return result; } From 7eec5e2977871dfbfd363a828d5f11131b46d953 Mon Sep 17 00:00:00 2001 From: Arthur Lacote Date: Mon, 8 Dec 2025 09:55:27 +0100 Subject: [PATCH 0989/1718] BUG: fix reduction issue in weighted quantile (#30070) * TST: Test weighted quantile with multiple axes reduction * FIX: Fix weighted quantile reduction by handling weights in _ureduce --- numpy/lib/_function_base_impl.py | 20 ++++++++++++++------ numpy/lib/tests/test_function_base.py | 11 +++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 87a834850fda..e407598d62c6 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -3857,13 +3857,21 @@ def _ureduce(a, func, keepdims=False, **kwargs): if len(axis) == 1: kwargs['axis'] = axis[0] else: - keep = set(range(nd)) - set(axis) + keep = sorted(set(range(nd)) - set(axis)) nkeep = len(keep) - # swap axis that should not be reduced to front - for i, s in enumerate(sorted(keep)): - a = a.swapaxes(i, s) - # merge reduced axis - a = a.reshape(a.shape[:nkeep] + (-1,)) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + kwargs['axis'] = -1 elif keepdims and out is not None: index_out = (0, ) * nd diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5b8b0adbdd0d..412f06d07e20 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4228,6 +4228,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) def test_quantile_weights_min_max(self, method): # Test weighted quantile at 0 and 1 with leading and trailing zero From 13f849dc9a49104c7fc8c6cd521323f3990b3b95 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 8 Dec 2025 18:31:23 +0100 Subject: [PATCH 0990/1718] TYP: ``random.Generator``: many fixes and improvements (#30391) --- numpy/random/_generator.pyi | 1364 ++++++++++----------- numpy/typing/tests/data/reveal/random.pyi | 415 ++----- 2 files changed, 751 insertions(+), 1028 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 9b0e5c331b57..f3f911b98ce7 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,255 +1,516 @@ from collections.abc import Callable, MutableSequence -from typing import Any, Literal, overload +from typing import Any, Literal, Self, overload import numpy as np -from numpy import float32, float64, int64 from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _BoolCodes, _DoubleCodes, _DTypeLike, - _DTypeLikeBool, _Float32Codes, _Float64Codes, _FloatLike_co, - _Int8Codes, - _Int16Codes, - _Int32Codes, _Int64Codes, - _IntPCodes, + _NestedSequence, _ShapeLike, _SingleCodes, - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntPCodes, ) -from numpy.random import BitGenerator, RandomState, SeedSequence -type _DTypeLikeFloat32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes -type _DTypeLikeFloat64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import RandomState + +type _ArrayF32 = NDArray[np.float32] +type _ArrayF64 = NDArray[np.float64] + +type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +# we use `str` to avoid type-checker performance issues because of the many `Literal` variants +type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str + +# Similar to `_ArrayLike{}_co`, but rejects scalars +type _NDArrayLikeInt = NDArray[np.generic[int]] | _NestedSequence[int] +type _NDArrayLikeFloat = NDArray[np.generic[float]] | _NestedSequence[float] + +type _MethodExp = Literal["zig", "inv"] ### class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def __getstate__(self) -> None: ... def __setstate__(self, state: dict[str, Any] | None) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], Generator], tuple[BitGenerator], None]: ... + + # @property def bit_generator(self) -> BitGenerator: ... - def spawn(self, n_children: int) -> list[Generator]: ... + def spawn(self, n_children: int) -> list[Self]: ... def bytes(self, length: int) -> bytes: ... + + # continuous distributions + + # @overload - def standard_normal( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_normal( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... - @overload - def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... + def standard_cauchy(self, size: None = None) -> float: ... @overload + def standard_cauchy(self, size: _ShapeLike) -> _ArrayF64: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def random(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype=f64 (default) + def random(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype=f32 + def random(self, size: _ShapeLike, dtype: _DTypeLikeF32, out: None = None) -> _ArrayF32: ... + @overload # out: f64 array (keyword) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f64 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + @overload # out: f64 array (positional) + def random[ArrayT: _ArrayF64](self, size: _ShapeLike | None, dtype: _DTypeLikeF64, out: ArrayT) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def random[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored + def standard_normal(self, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None) -> float: ... + @overload # size=, dtype: f64 (default) + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None) -> _ArrayF64: ... + @overload # size=, dtype: f32 + def standard_normal(self, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_normal[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_normal[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array + def standard_normal[ArrayT: _ArrayF32](self, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT) -> ArrayT: ... + + # + @overload # size=None (default); NOTE: dtype is ignored def standard_exponential( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: None = None, + self, size: None = None, dtype: _DTypeLikeFloat = ..., method: _MethodExp = "zig", out: None = None ) -> float: ... - @overload - def standard_exponential( - self, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_exponential( - self, - size: _ShapeLike | None = None, - *, - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload + @overload # size=, dtype: f64 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload + self, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", out: None = None + ) -> _ArrayF64: ... + @overload # size=, dtype: f32 (default) def standard_exponential( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - method: Literal["zig", "inv"] = "zig", - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def random( - self, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def random( - self, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def beta( - self, - a: _FloatLike_co, - b: _FloatLike_co, - size: None = None, + self, size: _ShapeLike, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: None = None + ) -> _ArrayF32: ... + @overload # dtype: f64 (default), out: f64 array (keyword) + def standard_exponential[ArrayT: _ArrayF64]( + self, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (keyword), out: f32 array + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, method: _MethodExp = "zig", out: ArrayT + ) -> ArrayT: ... + @overload # dtype: f32 (positional), out: f32 array (keyword) + def standard_exponential[ArrayT: _ArrayF32]( + self, size: _ShapeLike | None, dtype: _DTypeLikeF32, method: _MethodExp = "zig", *, out: ArrayT + ) -> ArrayT: ... + + # + @overload # 0d, size=None (default); NOTE: dtype is ignored + def standard_gamma( + self, shape: _FloatLike_co, size: None = None, dtype: _DTypeLikeFloat = ..., out: None = None ) -> float: ... - @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... - @overload - def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + @overload # >0d, dtype: f64 (default) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _NDArrayLikeFloat, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64 | Any: ... + @overload # >=0d, dtype: f32 (keyword) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: None = None, *, dtype: _DTypeLikeF32, out: None = None + ) -> _ArrayF32 | Any: ... + @overload # >=0d, size=, dtype: f64 (default) + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF64 = ..., out: None = None + ) -> _ArrayF64: ... + @overload # >=0d, size=, dtype: f32 + def standard_gamma( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike, dtype: _DTypeLikeF32, *, out: None = None + ) -> _ArrayF32: ... + @overload # >=0d, dtype: f64 (default), out: f64 array (keyword) + def standard_gamma[ArrayT: _ArrayF64]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, dtype: _DTypeLikeF64 = ..., *, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (keyword), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None, *, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... + @overload # >=0d, dtype: f32 (positional), out: f32 array + def standard_gamma[ArrayT: _ArrayF32]( + self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None, dtype: _DTypeLikeF32, out: ArrayT + ) -> ArrayT: ... # - @overload + @overload # 0d + def power(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def power(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def power(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def power(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def pareto(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def pareto(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def pareto(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def pareto(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def weibull(self, /, a: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def weibull(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def weibull(self, /, a: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def weibull(self, /, a: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def standard_t(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def standard_t(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def standard_t(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def standard_t(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d + def chisquare(self, /, df: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def chisquare(self, /, df: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def chisquare(self, /, df: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def chisquare(self, /, df: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def exponential(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def exponential(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def exponential(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def exponential(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def exponential(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default) + def rayleigh(self, /, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (keyword) + def rayleigh(self, /, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (positional) + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >0d + def rayleigh(self, /, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d + def rayleigh(self, /, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def noncentral_chisquare(self, /, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def noncentral_chisquare(self, /, df: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def noncentral_chisquare(self, /, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def f(self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def f(self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def vonmises(self, /, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def vonmises(self, /, mu: _NDArrayLikeFloat, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def vonmises(self, /, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def wald(self, /, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def wald(self, /, mean: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def wald(self, /, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d + def beta(self, /, a: _FloatLike_co, b: _FloatLike_co, size: None = None) -> float: ... + @overload # size= + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def beta(self, /, a: _ArrayLikeFloat_co, b: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def beta(self, /, a: _NDArrayLikeFloat, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def beta(self, /, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d (default) + def gamma(self, /, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # size= (keyword) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gamma(self, /, shape: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def gamma(self, /, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def uniform(self, /, low: _FloatLike_co = 0.0, high: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # >=0d, >=0d, size= (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def uniform(self, /, low: _ArrayLikeFloat_co, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d, size= (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, *, high: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def uniform(self, /, low: _NDArrayLikeFloat, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d (fallback) + def uniform(self, /, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def normal(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def normal(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def normal(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def normal(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def gumbel(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def gumbel(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def gumbel(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def gumbel(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def logistic(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def logistic(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def logistic(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def logistic(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def logistic( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def laplace(self, /, loc: _FloatLike_co = 0.0, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def laplace(self, /, loc: _ArrayLikeFloat_co, scale: _NDArrayLikeFloat, size: None) -> _ArrayF64: ... + @overload # size= (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def laplace(self, /, loc: _ArrayLikeFloat_co = 0.0, *, scale: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def laplace(self, /, loc: _NDArrayLikeFloat, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def laplace( + self, /, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d (default), 0d (default) + def lognormal(self, /, mean: _FloatLike_co = 0.0, sigma: _FloatLike_co = 1.0, size: None = None) -> float: ... + @overload # size= (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _ArrayLikeFloat_co, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (positional) + def lognormal(self, /, mean: _ArrayLikeFloat_co, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # size= (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> _ArrayF64: ... + @overload # >=0d, >0d (keyword) + def lognormal(self, /, mean: _ArrayLikeFloat_co = 0.0, *, sigma: _NDArrayLikeFloat, size: None = None) -> _ArrayF64: ... + @overload # >0d, >=0d + def lognormal(self, /, mean: _NDArrayLikeFloat, sigma: _ArrayLikeFloat_co = 1.0, size: None = None) -> _ArrayF64: ... + @overload # >=0d, >=0d + def lognormal( + self, /, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def triangular(self, /, left: _FloatLike_co, mode: _FloatLike_co, right: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _NDArrayLikeFloat, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def triangular( + self, /, left: _NDArrayLikeFloat, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def triangular( + self, /, left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + # + @overload # 0d, 0d, 0d + def noncentral_f(self, /, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = None) -> float: ... + @overload # >=0d, >=0d, >=0d, size= + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _NDArrayLikeFloat, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >0d, >=0d + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _NDArrayLikeFloat, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >0d, >=0d, >=0d + def noncentral_f( + self, /, dfnum: _NDArrayLikeFloat, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64: ... + @overload # >=0d, >=0d, >=0d (fallback) + def noncentral_f( + self, /, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None = None + ) -> _ArrayF64 | Any: ... + + ### + # discrete + + # + @overload # 0d bool | int + def integers[AnyIntT: (bool, int)]( + self, low: int, high: int | None = None, size: None = None, *, dtype: type[AnyIntT], endpoint: bool = False + ) -> AnyIntT: ... + @overload # 0d integer dtype + def integers[ScalarT: np.integer | np.bool]( + self, low: int, high: int | None = None, size: None = None, *, dtype: _DTypeLike[ScalarT], endpoint: bool = False + ) -> ScalarT: ... + @overload # 0d int64 (default) def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - endpoint: bool = False, + self, low: int, high: int | None = None, size: None = None, dtype: _DTypeLikeI64 = ..., endpoint: bool = False ) -> np.int64: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[bool], - endpoint: bool = False, - ) -> bool: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[int], - endpoint: bool = False, - ) -> int: ... - @overload + @overload # 0d unknown def integers( + self, low: int, high: int | None = None, size: None = None, dtype: DTypeLike | None = ..., endpoint: bool = False + ) -> Any: ... + @overload # integer dtype, size= + def integers[ScalarT: np.integer | np.bool]( self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[np.bool] | _BoolCodes, - endpoint: bool = False, - ) -> np.bool: ... - @overload - def integers[ScalarT: np.integer]( - self, - low: int, - high: int | None = None, - size: None = None, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, *, + size: _ShapeLike, dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> ScalarT: ... - @overload + ) -> NDArray[ScalarT]: ... + @overload # int64 (default), size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + *, + size: _ShapeLike, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, ) -> NDArray[np.int64]: ... - @overload + @overload # unknown, size= def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, *, - dtype: _DTypeLikeBool, + size: _ShapeLike, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> NDArray[np.bool]: ... - @overload - def integers[ScalarT: np.integer]( + ) -> np.ndarray: ... + @overload # >=0d, integer dtype + def integers[ScalarT: np.integer | np.bool]( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, @@ -257,231 +518,202 @@ class Generator: *, dtype: _DTypeLike[ScalarT], endpoint: bool = False, - ) -> NDArray[ScalarT]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> np.int8: ... - @overload + ) -> NDArray[ScalarT] | Any: ... + @overload # >=0d, int64 (default) def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _Int8Codes, - endpoint: bool = False, - ) -> NDArray[np.int8]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt8Codes, + dtype: _DTypeLikeI64 = ..., endpoint: bool = False, - ) -> np.uint8: ... - @overload + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, unknown def integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _UInt8Codes, + dtype: DTypeLike | None = ..., endpoint: bool = False, - ) -> NDArray[np.uint8]: ... - @overload - def integers( + ) -> np.ndarray | Any: ... + + # + @overload # 0d + def zipf(self, /, a: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def zipf(self, /, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def zipf(self, /, a: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def zipf(self, /, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def geometric(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def geometric(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def geometric(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def geometric(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d + def logseries(self, /, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def logseries(self, /, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def logseries(self, /, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def logseries(self, /, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d (default) + def poisson(self, /, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... + @overload # size= (keyword) + def poisson(self, /, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # size= (positional) + def poisson(self, /, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >0d + def poisson(self, /, lam: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d + def poisson(self, /, lam: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def binomial(self, /, n: int, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def binomial(self, /, n: _NDArrayLikeInt, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def binomial(self, /, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d + def negative_binomial(self, /, n: _FloatLike_co, p: _FloatLike_co, size: None = None) -> int: ... + @overload # size= + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.int64]: ... + @overload # >=0d, >0d + def negative_binomial(self, /, n: _ArrayLikeFloat_co, p: _NDArrayLikeFloat, size: None = None) -> NDArray[np.int64]: ... + @overload # >0d, >=0d + def negative_binomial(self, /, n: _NDArrayLikeFloat, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d + def negative_binomial( + self, /, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + # + @overload # 0d, 0d, 0d + def hypergeometric(self, /, ngood: int, nbad: int, nsample: int, size: None = None) -> int: ... + @overload # size= + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike + ) -> NDArray[np.int64]: ... + @overload # >=0d, >=0d, >0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _NDArrayLikeInt, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _NDArrayLikeInt, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _NDArrayLikeInt, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + @overload # >=0d, >=0d, >=0d + def hypergeometric( + self, /, ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: None = None + ) -> NDArray[np.int64] | Any: ... + + ### + # multivariate + + # + def dirichlet(self, /, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> _ArrayF64: ... + + # + def multivariate_normal( self, - low: int, - high: int | None = None, - size: None = None, + /, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> np.int16: ... - @overload - def integers( + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> _ArrayF64: ... + + # + def multinomial( + self, /, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[np.int64]: ... + + # + def multivariate_hypergeometric( self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, + /, + colors: _ArrayLikeInt_co, + nsample: int, size: _ShapeLike | None = None, - *, - dtype: _Int16Codes, - endpoint: bool = False, - ) -> NDArray[np.int16]: ... + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[np.int64]: ... + + ### + # resampling + + # axis must be 0 for MutableSequence @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> np.uint16: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt16Codes, - endpoint: bool = False, - ) -> NDArray[np.uint16]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> np.int32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _Int32Codes, - endpoint: bool = False, - ) -> NDArray[np.int32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> np.uint32: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt32Codes, - endpoint: bool = False, - ) -> NDArray[np.uint32]: ... - @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> np.uint64: ... - @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UInt64Codes, - endpoint: bool = False, - ) -> NDArray[np.uint64]: ... + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> np.intp: ... + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _IntPCodes, - endpoint: bool = False, - ) -> NDArray[np.intp]: ... + def permutation(self, /, x: int, axis: int = 0) -> NDArray[np.int64]: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> np.uintp: ... + def permutation(self, /, x: ArrayLike, axis: int = 0) -> np.ndarray: ... + + # @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _UIntPCodes, - endpoint: bool = False, - ) -> NDArray[np.uintp]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayT, *, axis: int | None = None, out: None = None) -> ArrayT: ... @overload - def integers( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: DTypeLike | None = ..., - endpoint: bool = False, - ) -> Any: ... + def permuted(self, /, x: ArrayLike, *, axis: int | None = None, out: None = None) -> np.ndarray: ... @overload - def integers( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: DTypeLike | None = ..., - endpoint: bool = False, - ) -> NDArray[Any]: ... + def permuted[ArrayT: np.ndarray](self, /, x: ArrayLike, *, axis: int | None = None, out: ArrayT) -> ArrayT: ... - # TODO: Use a TypeVar _T here to get away from Any output? - # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] - @overload + # + @overload # >=0d int, size=None (default) def choice( self, - a: int, + /, + a: int | _NestedSequence[int], size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, ) -> int: ... - @overload - def choice( + @overload # >=0d known, size=None (default) + def choice[ScalarT: np.generic]( self, - a: int, - size: _ShapeLike | None = None, + /, + a: _ArrayLike[ScalarT], + size: None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[int64]: ... - @overload + ) -> ScalarT: ... + @overload # >=0d unknown, size=None (default) def choice( self, + /, a: ArrayLike, size: None = None, replace: bool = True, @@ -489,356 +721,38 @@ class Generator: axis: int = 0, shuffle: bool = True, ) -> Any: ... - @overload + @overload # >=0d int, size= def choice( self, - a: ArrayLike, - size: _ShapeLike | None = None, + /, + a: int | _NestedSequence[int], + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, axis: int = 0, shuffle: bool = True, - ) -> NDArray[Any]: ... - @overload - def uniform( - self, - low: _FloatLike_co = 0.0, - high: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def uniform( - self, - low: _ArrayLikeFloat_co = 0.0, - high: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def normal( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def normal( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _FloatLike_co, - size: None = None, - dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., - out: None = None, - ) -> float: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - *, - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat32 = ..., - out: NDArray[float32] | None = None, - ) -> NDArray[float32]: ... - @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - dtype: _DTypeLikeFloat64 = ..., - out: NDArray[float64] | None = None, - ) -> NDArray[float64]: ... - @overload - def gamma( - self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None - ) -> float: ... - @overload - def gamma( - self, - shape: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def f( - self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_f( - self, - dfnum: _FloatLike_co, - dfden: _FloatLike_co, - nonc: _FloatLike_co, - size: None = None, - ) -> float: ... - @overload - def noncentral_f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... - @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def noncentral_chisquare( - self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def noncentral_chisquare( - self, - df: _ArrayLikeFloat_co, - nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = None - ) -> NDArray[float64]: ... - @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def vonmises( - self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def power(self, a: _FloatLike_co, size: None = None) -> float: ... - @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def standard_cauchy(self, size: None = None) -> float: ... - @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... - @overload - def laplace( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def laplace( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def gumbel( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def gumbel( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def logistic( - self, - loc: _FloatLike_co = 0.0, - scale: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def logistic( - self, - loc: _ArrayLikeFloat_co = 0.0, - scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def lognormal( - self, - mean: _FloatLike_co = 0.0, - sigma: _FloatLike_co = 1.0, - size: None = None, - ) -> float: ... - @overload - def lognormal( - self, - mean: _ArrayLikeFloat_co = 0.0, - sigma: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... - @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def wald( - self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None - ) -> float: ... - @overload - def wald( - self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - @overload - def triangular( - self, - left: _FloatLike_co, - mode: _FloatLike_co, - right: _FloatLike_co, - size: None = None, - ) -> float: ... - @overload - def triangular( - self, - left: _ArrayLikeFloat_co, - mode: _ArrayLikeFloat_co, - right: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def negative_binomial( - self, n: _FloatLike_co, p: _FloatLike_co, size: None = None - ) -> int: ... - @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... - @overload - def poisson( - self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... - @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - @overload - def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = None - ) -> int: ... - @overload - def hypergeometric( - self, - ngood: _ArrayLikeInt_co, - nbad: _ArrayLikeInt_co, - nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = None, - ) -> NDArray[int64]: ... - @overload - def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... - @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_normal( + ) -> NDArray[np.int64]: ... + @overload # >=0d known, size= + def choice[ScalarT: np.generic]( self, - mean: _ArrayLikeFloat_co, - cov: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - check_valid: Literal["warn", "raise", "ignore"] = "warn", - tol: float = 1e-8, - *, - method: Literal["svd", "eigh", "cholesky"] = "svd", - ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[int64]: ... - def multivariate_hypergeometric( + /, + a: _ArrayLike[ScalarT], + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[ScalarT]: ... + @overload # >=0d unknown, size= + def choice( self, - colors: _ArrayLikeInt_co, - nsample: int, - size: _ShapeLike | None = None, - method: Literal["marginals", "count"] = "marginals", - ) -> NDArray[int64]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... - def permuted( - self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None - ) -> NDArray[Any]: ... - - # axis must be 0 for MutableSequence - @overload - def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... - @overload - def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + /, + a: ArrayLike, + size: _ShapeLike, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> np.ndarray: ... -def default_rng( - seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None -) -> Generator: ... +def default_rng(seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None) -> Generator: ... diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index e188eb02893f..4d00ef0d99aa 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -349,11 +349,11 @@ assert_type(def_gen.gumbel(0.5, 0.5), float) assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -363,11 +363,11 @@ assert_type(def_gen.laplace(0.5, 0.5), float) assert_type(def_gen.laplace(0.5, 0.5, size=None), float) assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -377,11 +377,11 @@ assert_type(def_gen.logistic(0.5, 0.5), float) assert_type(def_gen.logistic(0.5, 0.5, size=None), float) assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -419,11 +419,11 @@ assert_type(def_gen.normal(0.5, 0.5), float) assert_type(def_gen.normal(0.5, 0.5, size=None), float) assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) @@ -488,14 +488,14 @@ assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), np assert_type(def_gen.hypergeometric(20, 20, 10), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) -assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64] | Any) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64] | Any) assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) @@ -503,8 +503,8 @@ I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) assert_type(def_gen.integers(0, 100), np.int64) assert_type(def_gen.integers(100), np.int64) -assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) -assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64] | Any) I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) I_bool_low_like: list[int] = [0] @@ -515,107 +515,59 @@ assert_type(def_gen.integers(2, dtype=bool), bool) assert_type(def_gen.integers(0, 2, dtype=bool), bool) assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[Any] | Any) assert_type(def_gen.integers(2, dtype=np.bool), np.bool) assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) -assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) -assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool] | Any) I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) - -assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) - assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) -assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) -assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8] | Any) I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) - -assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) - assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) -assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) -assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16] | Any) I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) I_u4_low_like: list[int] = [0] @@ -626,266 +578,122 @@ assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) - -assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) - -assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32] | Any) assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) -assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) -assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint] | Any) I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) - -assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) - assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) -assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64] | Any) I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -assert_type(def_gen.integers(128, dtype="i1"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) - -assert_type(def_gen.integers(128, dtype="int8"), np.int8) -assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) - assert_type(def_gen.integers(128, dtype=np.int8), np.int8) assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) -assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) -assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8] | Any) I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) - -assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) - assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) -assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) -assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16] | Any) I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) - -assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) - assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) -assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) -assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32] | Any) I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) - -assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) - assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) -assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64] | Any) assert_type(def_gen.bit_generator, np.random.BitGenerator) @@ -897,11 +705,12 @@ assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) -assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) +str_list: list[str] +assert_type(def_gen.choice(str_list), Any) +assert_type(def_gen.choice(str_list, 3), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(str_list, 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) @@ -929,13 +738,13 @@ assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[np.float64]) assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) -assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[np.float64]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[np.float64]) assert_type(def_gen.shuffle(np.arange(10)), None) assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) From 16d30474e5e6fce5b8c3b98aa5e55c4631b96368 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 19:21:25 +0100 Subject: [PATCH 0991/1718] MAINT: Bump astral-sh/setup-uv from 7.1.4 to 7.1.5 (#30395) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 1deb5ab82815..ac87ccacd846 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index dcac63be8a0a..c91954403b14 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 + - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 with: python-version: ${{ matrix.py }} activate-environment: true From e113b65964643a2393edfb987b003e9b174d7a79 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 8 Dec 2025 20:39:42 +0200 Subject: [PATCH 0992/1718] SIMD, BLD: Fix Highway target attribute build failure on ppc64 (#30392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On GCC < 13, compiler target attributes cause a build failure with the `power9/vsx3` feature enabled: ``` from ../numpy/_core/src/umath/loops_trigonometric.dispatch.cpp:5: hwy/ops/ppc_vsx-inl.h: In function ‘np::simd::{anonymous}::Vec np::simd::{anonymous}::LoadN(const TLane*, size_t) [with TLane = float]’: hwy/ops/ppc_vsx-inl.h:1200:19: error: inlining failed in call to ‘always_inline’ ‘hwy::N_PPC10::VFromD 1200 | HWY_API VFromD LoadN(D d, const T* HWY_RESTRICT p, ``` To fix this issue, a new Highway option `HWY_DISABLE_ATTR` has been introduced in Highway to explicitly disable compiler target attributes, as we rely on a source-based CPU dispatcher. Therefore, this patch updates to the latest Highway main. This patch also extends the native CI ppc64 build to include Clang, and sets testing against Python 3.12. --- .github/workflows/linux-ppc64le.yml | 26 +++++++++++++++++++++----- meson_cpu/ppc64/meson.build | 6 ++---- numpy/_core/src/highway | 2 +- pyproject.toml | 2 +- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index cff7fb1d5b89..7e75786b0bea 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -25,26 +25,42 @@ jobs: # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-ppc64le-p10 - name: "Native PPC64LE" + + strategy: + fail-fast: false + matrix: + config: + - name: "GCC" + args: "-Dallow-noblas=false" + - name: "clang" + args: "-Dallow-noblas=false" + + name: "${{ matrix.config.name }}" steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - persist-credentials: false - + - name: Install dependencies run: | sudo apt update - sudo apt install -y python3 python3-pip python3-dev ninja-build gfortran \ + sudo apt install -y python3.12 python3-pip python3-dev ninja-build gfortran \ build-essential libopenblas-dev liblapack-dev pkg-config pip install --upgrade pip pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt echo "/home/runner/.local/bin" >> $GITHUB_PATH + - name: Install clang + if: matrix.config.name == 'clang' + run: | + sudo apt install -y clang + export CC=clang + export CXX=clang++ + - name: Meson Build run: | - spin build -- -Dallow-noblas=false + spin build -- ${{ matrix.config.args }} - name: Meson Log if: always() diff --git a/meson_cpu/ppc64/meson.build b/meson_cpu/ppc64/meson.build index 8f2e8373c77c..58690d1fa80a 100644 --- a/meson_cpu/ppc64/meson.build +++ b/meson_cpu/ppc64/meson.build @@ -3,15 +3,13 @@ mod_features = import('features') compiler_id = meson.get_compiler('c').get_id() VSX = mod_features.new( - 'VSX', 1, args: '-mvsx', + 'VSX', 1, args: ['-mvsx', '-DHWY_COMPILE_ONLY_STATIC', '-DHWY_DISABLE_ATTR'] + + (compiler_id == 'clang' ? ['-maltivec'] : []), test_code: files(source_root + '/numpy/_core/src/_simd/checks/cpu_vsx.c')[0], extra_tests: { 'VSX_ASM': files(source_root + '/numpy/_core/src/_simd/checks/extra_vsx_asm.c')[0] } ) -if compiler_id == 'clang' - VSX.update(args: ['-mvsx', '-maltivec']) -endif VSX2 = mod_features.new( 'VSX2', 2, implies: VSX, args: {'val': '-mcpu=power8', 'match': '.*vsx'}, detect: {'val': 'VSX2', 'match': 'VSX'}, diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 37c08e5528f6..ee36c8371293 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 37c08e5528f63ead9c7e4fd99ba454c1b1a3e3f7 +Subproject commit ee36c837129310be19c17c9108c6dc3f6ae06942 diff --git a/pyproject.toml b/pyproject.toml index a459ea2cc2ee..084ba993072e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ license-files = [ 'LICENSE.txt', # BSD-3-Clause 'numpy/_core/include/numpy/libdivide/LICENSE.txt', # Zlib 'numpy/_core/src/common/pythoncapi-compat/COPYING', # 0BSD - 'numpy/_core/src/highway/LICENSE-BSD3', # BSD-3-Clause + 'numpy/_core/src/highway/LICENSE', # Dual-licensed: Apache 2.0 or BSD 3-Clause 'numpy/_core/src/multiarray/dragon4_LICENSE.txt', # MIT 'numpy/_core/src/npysort/x86-simd-sort/LICENSE.md', # BSD-3-Clause 'numpy/_core/src/umath/svml/LICENSE', # BSD-3-Clause From cee14ff2a69c02fabc73e0809ce95cb03dc92567 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 8 Dec 2025 21:42:42 +0100 Subject: [PATCH 0993/1718] DOC: prefer passing scalar types to ``dtype=`` in examples (#30394) * DOC: prefer scalar types for ``dtype=`` * DOC: revert doc change in `mtrand.pyx` Co-authored-by: Charles Harris --------- Co-authored-by: Charles Harris --- doc/neps/nep-0050-scalar-promotion.rst | 4 +- doc/neps/nep-0055-string_dtype.rst | 4 +- doc/source/reference/arrays.classes.rst | 6 +- doc/source/reference/arrays.datetime.rst | 4 +- doc/source/reference/arrays.dtypes.rst | 2 +- doc/source/reference/arrays.promotion.rst | 6 +- doc/source/reference/c-api/array.rst | 6 +- doc/source/reference/thread_safety.rst | 6 +- doc/source/user/absolute_beginners.rst | 2 +- doc/source/user/basics.creation.rst | 48 +++++----- doc/source/user/basics.io.genfromtxt.rst | 24 ++--- doc/source/user/basics.subclassing.rst | 4 +- doc/source/user/basics.ufuncs.rst | 6 +- doc/source/user/c-info.ufunc-tutorial.rst | 2 +- doc/source/user/quickstart.rst | 8 +- numpy/_core/_add_newdocs.py | 90 +++++++++---------- .../_core/code_generators/ufunc_docstrings.py | 28 +++--- numpy/_core/fromnumeric.py | 16 ++-- numpy/_core/function_base.py | 6 +- numpy/_core/memmap.py | 12 +-- numpy/_core/multiarray.py | 8 +- numpy/_core/numeric.py | 28 +++--- numpy/_core/numerictypes.py | 2 +- numpy/ctypeslib/_ctypeslib.py | 2 +- numpy/doc/ufuncs.py | 2 +- numpy/fft/_helper.py | 4 +- numpy/fft/_pocketfft.py | 4 +- numpy/lib/_array_utils_impl.py | 2 +- numpy/lib/_function_base_impl.py | 6 +- numpy/lib/_index_tricks_impl.py | 2 +- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/_twodim_base_impl.py | 4 +- numpy/lib/_type_check_impl.py | 10 +-- numpy/linalg/_linalg.py | 4 +- numpy/ma/core.py | 6 +- numpy/ma/extras.py | 6 +- numpy/matlib.py | 6 +- numpy/matrixlib/defmatrix.py | 6 +- numpy/random/_generator.pyx | 2 +- 39 files changed, 194 insertions(+), 196 deletions(-) diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index aa04dd2c740e..974f6691d363 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -509,9 +509,9 @@ will be ignored. This means, that operations will never silently use the The user will have to write one of:: np.array([3]) + np.array(2**100) - np.array([3]) + np.array(2**100, dtype=object) + np.array([3]) + np.array(2**100, dtype=np.object_) -As such implicit conversion to ``object`` should be rare and the work-around +As such implicit conversion to ``object_`` should be rare and the work-around is clear, we expect that the backwards compatibility concerns are fairly small. diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 7e29e1425e8c..555052fa16f7 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -224,7 +224,7 @@ to fixed-width unicode arrays:: In [3]: data = [str(i) * 10 for i in range(100_000)] - In [4]: %timeit arr_object = np.array(data, dtype=object) + In [4]: %timeit arr_object = np.array(data, dtype=np.object_) 3.15 ms ± 74.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) In [5]: %timeit arr_stringdtype = np.array(data, dtype=StringDType()) @@ -242,7 +242,7 @@ for strings, the string loading performance of ``StringDType`` should improve. String operations have similar performance:: - In [7]: %timeit np.array([s.capitalize() for s in data], dtype=object) + In [7]: %timeit np.array([s.capitalize() for s in data], dtype=np.object_) 31.6 ms ± 728 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) In [8]: %timeit np.char.capitalize(arr_stringdtype) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 80821c2c08fa..755a13ff7252 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -479,16 +479,16 @@ Example: >>> import numpy as np - >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a = np.memmap('newfile.dat', dtype=np.float64, mode='w+', shape=1000) >>> a[10] = 10.0 >>> a[30] = 30.0 >>> del a - >>> b = np.fromfile('newfile.dat', dtype=float) + >>> b = np.fromfile('newfile.dat', dtype=np.float64) >>> print(b[10], b[30]) 10.0 30.0 - >>> a = np.memmap('newfile.dat', dtype=float) + >>> a = np.memmap('newfile.dat', dtype=np.float64) >>> print(a[10], a[30]) 10.0 30.0 diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index c0ec6a572c83..9cb7f59db78b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -124,10 +124,10 @@ datetime type with generic units. >>> import numpy as np - >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype=np.datetime64) array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') - >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype='datetime64') + >>> np.array(['2001-01-01T12:00', '2002-02-03T13:56:03.172'], dtype=np.datetime64) array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'], dtype='datetime64[ms]') diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index fcb3e122e6de..262c22655c76 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -561,7 +561,7 @@ This equivalence can only be handled through ``==``, not through ``is``. >>> import numpy as np - >>> a = np.array([1, 2], dtype=float) + >>> a = np.array([1, 2], dtype=np.float64) >>> a.dtype == np.dtype(np.float64) True >>> a.dtype == np.float64 diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index d2dead0ce7b5..32e503383217 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -79,10 +79,10 @@ their precision when determining the result dtype. This is often convenient. For instance, when working with arrays of a low precision dtype, it is usually desirable for simple operations with Python scalars to preserve the dtype. - >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype=np.float32) >>> arr_float32 + 10.0 # undesirable to promote to float64 array([11. , 12.5, 12.1], dtype=float32) - >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 = np.array([3, 5, 7], dtype=np.int16) >>> arr_int16 + 10 # undesirable to promote to int64 array([13, 15, 17], dtype=int16) @@ -130,7 +130,7 @@ overflows: ... RuntimeWarning: overflow encountered in scalar add Note that NumPy warns when overflows occur for scalars, but not for arrays; -e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. +e.g., ``np.array(100, dtype=np.uint8) + 100`` will *not* warn. Numerical promotion ------------------- diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d07a00ebde73..885bdb17181e 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1786,9 +1786,9 @@ the functions that must be implemented for each slot. - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct identity otherwise as it preserves the sign for ``sum([-0.0])``. - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + ``1`` for the empty ``sum([], dtype=np.object_)`` and + ``prod([], dtype=np.object_)``. + This allows ``np.sum(np.array(["a", "b"], dtype=np.object_))`` to work. - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least ``INT_MIN`` not a good *default* when there are no items. diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 84590bfac39c..6b6b9b0ea054 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -29,8 +29,8 @@ locking if you need to mutation and multithreading. Note that operations that *do not* release the GIL will see no performance gains from use of the `threading` module, and instead might be better served with -`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do -not release the GIL. +`multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` +do not release the GIL. Free-threaded Python -------------------- @@ -47,5 +47,5 @@ Because free-threaded Python does not have a global interpreter lock to serialize access to Python objects, there are more opportunities for threads to mutate shared state and create thread safety issues. In addition to the limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=object`` are not protected by the GIL, creating data +that arrays with ``dtype=np.object_`` are not protected by the GIL, creating data races for python objects that are not possible outside free-threaded python. diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 6909db8cb7e2..f1007db45acc 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -779,7 +779,7 @@ You can add the arrays together with the plus sign. :: >>> data = np.array([1, 2]) - >>> ones = np.ones(2, dtype=int) + >>> ones = np.ones(2, dtype=np.int_) >>> data + ones array([2, 3]) diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 1a7707ee69c9..19fa737d5f8d 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -20,7 +20,7 @@ There are 6 general mechanisms for creating arrays: 6) Use of special library functions (e.g., random) You can use these methods to create ndarrays or :ref:`structured_arrays`. -This document will cover general methods for ndarray creation. +This document will cover general methods for ndarray creation. 1) Converting Python sequences to NumPy arrays ============================================== @@ -29,8 +29,8 @@ NumPy arrays can be defined using Python sequences such as lists and tuples. Lists and tuples are defined using ``[...]`` and ``(...)``, respectively. Lists and tuples can define ndarray creation: -* a list of numbers will create a 1D array, -* a list of lists will create a 2D array, +* a list of numbers will create a 1D array, +* a list of lists will create a 2D array, * further nested lists will create higher-dimensional arrays. In general, any array object is called an **ndarray** in NumPy. :: @@ -72,7 +72,7 @@ results, for example:: Notice when you perform operations with two arrays of the same ``dtype``: ``uint32``, the resulting array is the same type. When you -perform operations with different ``dtype``, NumPy will +perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in as ``int64``. @@ -88,7 +88,7 @@ you create the array. .. 40 functions seems like a small number, but the routines.array-creation - has ~47. I'm sure there are more. + has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid out in the :ref:`Array creation routines `. @@ -104,7 +104,7 @@ dimension of the array they create: The 1D array creation functions e.g. :func:`numpy.linspace` and :func:`numpy.arange` generally need at least two inputs, ``start`` and -``stop``. +``stop``. :func:`numpy.arange` creates arrays with regularly incrementing values. Check the documentation for complete information and examples. A few @@ -113,7 +113,7 @@ examples are shown:: >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=float) + >>> np.arange(2, 10, dtype=np.float64) array([2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -121,8 +121,8 @@ examples are shown:: Note: best practice for :func:`numpy.arange` is to use integer start, end, and step values. There are some subtleties regarding ``dtype``. In the second example, the ``dtype`` is defined. In the third example, the array is -``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error, -the ``stop`` value is sometimes included. +``dtype=np.float64`` to accommodate the step size of ``0.1``. Due to roundoff error, +the ``stop`` value is sometimes included. :func:`numpy.linspace` will create arrays with a specified number of elements, and spaced equally between the specified beginning and end values. For @@ -140,7 +140,7 @@ number of elements and the starting and end point. The previous ------------------------------- The 2D array creation functions e.g. :func:`numpy.eye`, :func:`numpy.diag`, and :func:`numpy.vander` -define properties of special matrices represented as 2D arrays. +define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: @@ -159,7 +159,7 @@ and the rest are 0, as such:: the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: - + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], @@ -197,7 +197,7 @@ routine is helpful in generating linear least squares models, as such:: [ 8, 4, 2, 1], [27, 9, 3, 1], [64, 16, 4, 1]]) - + 3 - general ndarray creation functions -------------------------------------- @@ -205,20 +205,20 @@ The ndarray creation functions e.g. :func:`numpy.ones`, :func:`numpy.zeros`, and :meth:`~numpy.random.Generator.random` define arrays based upon the desired shape. The ndarray creation functions can create arrays with any dimension by specifying how many dimensions -and length along that dimension in a tuple or list. +and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.zeros((2, 3)) - array([[0., 0., 0.], + array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros((2, 3, 2)) array([[[0., 0.], [0., 0.], [0., 0.]], - + [[0., 0.], [0., 0.], [0., 0.]]]) @@ -228,7 +228,7 @@ specified shape. The default dtype is ``float64``:: >>> import numpy as np >>> np.ones((2, 3)) - array([[1., 1., 1.], + array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones((2, 3, 2)) array([[[1., 1.], @@ -265,11 +265,11 @@ dimension:: >>> import numpy as np >>> np.indices((3,3)) - array([[[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], - [[0, 1, 2], - [0, 1, 2], + array([[[0, 0, 0], + [1, 1, 1], + [2, 2, 2]], + [[0, 1, 2], + [0, 1, 2], [0, 1, 2]]]) This is particularly useful for evaluating functions of multiple dimensions on @@ -322,7 +322,7 @@ arrays into a 4-by-4 array using ``block``:: [ 0., 0., 0., -4.]]) Other routines use similar syntax to join ndarrays. Check the -routine's documentation for further examples and syntax. +routine's documentation for further examples and syntax. 4) Reading arrays from disk, either from standard or custom formats =================================================================== @@ -330,7 +330,7 @@ routine's documentation for further examples and syntax. This is the most common case of large array creation. The details depend greatly on the format of data on disk. This section gives general pointers on how to handle various formats. For more detailed examples of IO look at -:ref:`How to Read and Write files `. +:ref:`How to Read and Write files `. Standard binary formats ----------------------- @@ -397,4 +397,4 @@ knowledge to interface with C or C++. NumPy is the fundamental library for array containers in the Python Scientific Computing stack. Many Python libraries, including SciPy, Pandas, and OpenCV, use NumPy ndarrays as the common format for data exchange, These libraries can create, -operate on, and work with NumPy arrays. +operate on, and work with NumPy arrays. diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index d5b6bba8f28d..2a1523ba209b 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -201,16 +201,16 @@ The main way to control how the sequences of strings we have read from the file are converted to other types is to set the ``dtype`` argument. Acceptable values for this argument are: -* a single type, such as ``dtype=float``. +* a single type, such as ``dtype=np.float64``. The output will be 2D with the given dtype, unless a name has been associated with each column with the use of the ``names`` argument - (see below). Note that ``dtype=float`` is the default for + (see below). Note that ``dtype=np.float64`` is the default for :func:`~numpy.genfromtxt`. -* a sequence of types, such as ``dtype=(int, float, float)``. +* a sequence of types, such as ``dtype=(np.int_, np.float64, np.float64)``. * a comma-separated string, such as ``dtype="i4,f8,|U3"``. * a dictionary with two keys ``'names'`` and ``'formats'``. * a sequence of tuples ``(name, type)``, such as - ``dtype=[('A', int), ('B', float)]``. + ``dtype=[('A', np.int_), ('B', np.float64)]``. * an existing :class:`numpy.dtype` object. * the special value ``None``. In that case, the type of the columns will be determined from the data @@ -243,7 +243,7 @@ each column. A first possibility is to use an explicit structured dtype, as mentioned previously:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=[(_, int) for _ in "abc"]) + >>> np.genfromtxt(data, dtype=[(_, np.int_) for _ in "abc"]) array([(1, 2, 3), (4, 5, 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> ndtype=[('a',int), ('b', float), ('c', int)] + >>> ndtype=[('a', np.int_), ('b', np.float64), ('c', np.int_)] >>> names = ["A", "B", "C"] >>> np.genfromtxt(data, names=names, dtype=ndtype) array([(1, 2., 3), (4, 5., 6)], @@ -289,7 +289,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``, ``f1`` and so forth:: >>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int)) + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_)) array([(1, 2., 3), (4, 5., 6)], dtype=[('f0', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), names="a") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), names="a") array([(1, 2., 3), (4, 5., 6)], dtype=[('a', '>> data = StringIO("1 2 3\n 4 5 6") - >>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i") + >>> np.genfromtxt(data, dtype=(np.int_, np.float64, np.int_), defaultfmt="var_%02i") array([(1, 2., 3), (4, 5., 6)], dtype=[('var_00', '>> data = "N/A, 2, 3\n4, ,???" >>> kwargs = dict(delimiter=",", - ... dtype=int, + ... dtype=np.int_, ... names="a,b,c", ... missing_values={0:"N/A", 'b':" ", 2:"???"}, ... filling_values={0:0, 'b':0, 2:-999}) diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 2a369aaae17c..202561a958a8 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -346,7 +346,7 @@ Simple example - adding an extra attribute to ndarray class InfoArray(np.ndarray): - def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + def __new__(subtype, shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard @@ -779,5 +779,3 @@ your function's signature should accept ``**kwargs``. For example: This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. - - diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 773fe86c21d2..8607c2abda9b 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -76,7 +76,7 @@ an integer (or Boolean) data-type and smaller than the size of the >>> x.dtype dtype('int64') - >>> np.multiply.reduce(x, dtype=float) + >>> np.multiply.reduce(x, dtype=np.float64) array([ 0., 28., 80.]) Finally, the *out* keyword allows you to @@ -84,10 +84,10 @@ provide an output array (or a tuple of output arrays for multi-output ufuncs). If *out* is given, the *dtype* argument is only used for the internal computations. Considering ``x`` from the previous example:: - >>> y = np.zeros(3, dtype=int) + >>> y = np.zeros(3, dtype=np.int_) >>> y array([0, 0, 0]) - >>> np.multiply.reduce(x, dtype=float, out=y) + >>> np.multiply.reduce(x, dtype=np.float64, out=y) array([ 0, 28, 80]) Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 09daa95b7875..e5773f8232b8 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -608,7 +608,7 @@ After the above has been installed, it can be imported and used as follows. >>> import npufunc >>> npufunc.logit(0.5) np.float64(0.0) ->>> a = np.linspace(0, 1, 5, dtype="f4") +>>> a = np.linspace(0, 1, 5, dtype=np.float32) >>> npufunc.logit(a) :1: RuntimeWarning: divide by zero encountered in logit array([ -inf, -1.0986123, 0. , 1.0986123, inf], diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index ef4a0467c706..1208bd1a6347 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -163,7 +163,7 @@ The type of the array can also be explicitly specified at creation time: :: - >>> c = np.array([[1, 2], [3, 4]], dtype=complex) + >>> c = np.array([[1, 2], [3, 4]], dtype=np.complex128) >>> c array([[1.+0.j, 2.+0.j], [3.+0.j, 4.+0.j]]) @@ -346,7 +346,7 @@ existing array rather than create a new one. :: >>> rg = np.random.default_rng(1) # create instance of default random number generator - >>> a = np.ones((2, 3), dtype=int) + >>> a = np.ones((2, 3), dtype=np.int_) >>> b = rg.random((2, 3)) >>> a *= 3 >>> a @@ -535,7 +535,7 @@ are given in a tuple separated by commas:: >>> def f(x, y): ... return 10 * x + y ... - >>> b = np.fromfunction(f, (5, 4), dtype=int) + >>> b = np.fromfunction(f, (5, 4), dtype=np.int_) >>> b array([[ 0, 1, 2, 3], [10, 11, 12, 13], @@ -1256,7 +1256,7 @@ set `__: ... A, B = np.meshgrid(x, y) ... C = A + B*1j ... z = np.zeros_like(C) - ... divtime = maxit + np.zeros(z.shape, dtype=int) + ... divtime = maxit + np.zeros(z.shape, dtype=np.int_) ... ... for i in range(maxit): ... z = z**2 + C diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index eded752b2721..b37014b6a648 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -418,11 +418,11 @@ original data when the :meth:`~object.__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] + >>> a = np.arange(6, dtype=np.int32)[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: + ... op_dtypes=[np.dtype(np.float32)]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here @@ -939,7 +939,7 @@ ``NPY_MAXDIMS``). Setting ``ndmax`` stops recursion at the specified depth, preserving deeper nested structures as objects instead of promoting them to - higher-dimensional arrays. In this case, ``dtype=object`` is required. + higher-dimensional arrays. In this case, ``dtype=np.object_`` is required. .. versionadded:: 2.4.0 ${ARRAY_FUNCTION_LIKE} @@ -994,7 +994,7 @@ Type provided: - >>> np.array([1, 2, 3], dtype=complex) + >>> np.array([1, 2, 3], dtype=np.complex128) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: @@ -1015,14 +1015,14 @@ Limiting the maximum dimensions with ``ndmax``: - >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=2) >>> a array([[1, 2], [3, 4]], dtype=object) >>> a.shape (2, 2) - >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b = np.array([[1, 2], [3, 4]], dtype=np.object_, ndmax=1) >>> b array([list([1, 2]), list([3, 4])], dtype=object) >>> b.shape @@ -1389,7 +1389,7 @@ array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized - >>> np.empty([2, 2], dtype=int) + >>> np.empty([2, 2], dtype=np.int_) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized @@ -1455,7 +1455,7 @@ >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=int) + >>> np.zeros((5,), dtype=np.int_) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -1487,7 +1487,7 @@ fromstring(string, dtype=None, count=-1, *, sep, like=None) -- - fromstring(string, dtype=float, count=-1, *, sep, like=None) + fromstring(string, dtype=np.float64, count=-1, *, sep, like=None) A new 1-D array initialized from text data in a string. @@ -1538,9 +1538,9 @@ Examples -------- >>> import numpy as np - >>> np.fromstring('1 2', dtype=int, sep=' ') + >>> np.fromstring('1 2', dtype=np.int_, sep=' ') array([1, 2]) - >>> np.fromstring('1, 2', dtype=int, sep=',') + >>> np.fromstring('1, 2', dtype=np.int_, sep=',') array([1, 2]) """) @@ -1649,7 +1649,7 @@ fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) -- - fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + fromfile(file, dtype=np.float64, count=-1, sep='', offset=0, *, like=None) Construct an array from data in a text or binary file. @@ -1737,7 +1737,7 @@ frombuffer(buffer, dtype=None, count=-1, offset=0, *, like=None) -- - frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None) + frombuffer(buffer, dtype=np.float64, count=-1, offset=0, *, like=None) Interpret a buffer as a 1-dimensional array. @@ -1770,7 +1770,7 @@ If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: - >>> dt = np.dtype(int) + >>> dt = np.dtype(np.int_) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP @@ -1925,9 +1925,9 @@ `start` is much larger than `step`. This can lead to unexpected behaviour. For example:: - >>> np.arange(0, 5, 0.5, dtype=int) + >>> np.arange(0, 5, 0.5, dtype=np.int_) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - >>> np.arange(-3, 3, 0.5, dtype=int) + >>> np.arange(-3, 3, 0.5, dtype=np.int_) array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) In such cases, the use of `numpy.linspace` should be preferred. @@ -2030,16 +2030,16 @@ Examples -------- >>> import numpy as np - >>> np.promote_types('f4', 'f8') + >>> np.promote_types(np.float32, np.float64) dtype('float64') - >>> np.promote_types('i8', 'f4') + >>> np.promote_types(np.int64, np.float32) dtype('float64') >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + >>> np.promote_types(np.int32, 'S8') dtype('S11') An example of a non-associative case: @@ -2376,7 +2376,7 @@ ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) -- - ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) + ndarray(shape, dtype=np.float64, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the @@ -2485,7 +2485,7 @@ First mode, `buffer` is None: >>> import numpy as np - >>> np.ndarray(shape=(2,2), dtype=float, order='F') + >>> np.ndarray(shape=(2,2), dtype=np.float64, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2493,7 +2493,7 @@ >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, - ... dtype=int) # offset = 1*itemsize, i.e. skip first element + ... dtype=np.int_) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) @@ -3573,15 +3573,15 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x array([1. , 2. , 2.5]) - >>> x.astype(int) + >>> x.astype(np.int_) array([1, 2, 2]) - >>> x.astype(int, casting="same_value") + >>> x.astype(np.int_, casting="same_value") Traceback (most recent call last): ... ValueError: could not cast 'same_value' double to long - >>> x[:2].astype(int, casting="same_value") + >>> x[:2].astype(np.int_, casting="same_value") array([1, 2]) """) @@ -3751,12 +3751,12 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> y.flags['C_CONTIGUOUS'] True - For arrays containing Python objects (e.g. dtype=object), + For arrays containing Python objects (e.g. dtype=np.object_), the copy is a shallow one. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> b = a.copy() >>> b[2][0] = 10 >>> a @@ -3766,7 +3766,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: use `copy.deepcopy`: >>> import copy - >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=np.object_) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c @@ -3868,7 +3868,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: to a single array element. The following is a rare example where this distinction is important: - >>> a = np.array([None, None], dtype=object) + >>> a = np.array([None, None], dtype=np.object_) >>> a[0] = np.array(3) >>> a array([array(3), None], dtype=object) @@ -4020,7 +4020,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: For an array with object dtype, elements are returned as-is. - >>> a = np.array([np.int64(1)], dtype=object) + >>> a = np.array([np.int64(1)], dtype=np.object_) >>> a.item() #return np.int64 np.int64(1) """) @@ -4780,7 +4780,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: .. note:: Passing None for ``dtype`` is different from omitting the parameter, since the former invokes ``dtype(None)`` which is an alias for - ``dtype('float64')``. + ``dtype(np.float64)``. Parameters ---------- @@ -5867,8 +5867,8 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: This API requires passing dtypes, define them for convenience: >>> import numpy as np - >>> int32 = np.dtype("int32") - >>> float32 = np.dtype("float32") + >>> int32 = np.dtype(np.int32) + >>> float32 = np.dtype(np.float32) The typical ufunc call does not pass an output dtype. `numpy.add` has two inputs and one output, so leave the output as ``None`` (not provided): @@ -6095,11 +6095,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> x = np.dtype('i4') + >>> x = np.dtype(np.int32) >>> x.alignment 4 - >>> x = np.dtype(float) + >>> x = np.dtype(np.float64) >>> x.alignment 8 @@ -6124,11 +6124,11 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder + >>> np.dtype(np.int8).byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder @@ -6274,13 +6274,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i2') + >>> dt = np.dtype(np.int16) >>> dt.isbuiltin 1 - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.isbuiltin 1 - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.isbuiltin 0 @@ -6348,13 +6348,13 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: -------- >>> import numpy as np - >>> dt = np.dtype('i4') + >>> dt = np.dtype(np.int32) >>> dt.kind 'i' - >>> dt = np.dtype('f8') + >>> dt = np.dtype(np.float64) >>> dt.kind 'f' - >>> dt = np.dtype([('field1', 'f8')]) + >>> dt = np.dtype([('field1', np.float64)]) >>> dt.kind 'V' @@ -6520,7 +6520,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.subdtype (dtype('float32'), (8,)) - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.subdtype >>> @@ -6542,7 +6542,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: >>> x.base dtype('float32') - >>> x = np.dtype('i2') + >>> x = np.dtype(np.int16) >>> x.base dtype('int16') diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index fe07047a1758..1630a9d6f136 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -917,7 +917,7 @@ def add_newdoc(place, name, doc): array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -1145,7 +1145,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1506,7 +1506,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 @@ -1545,7 +1545,7 @@ def add_newdoc(place, name, doc): ------- out : bool or ndarray of bool Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1957,7 +1957,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -1994,7 +1994,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3255,7 +3255,7 @@ def add_newdoc(place, name, doc): ------- out : ndarray or scalar Output array, element-wise comparison of `x1` and `x2`. - Typically of type bool, unless ``dtype=object`` is passed. + Typically of type bool, unless ``dtype=np.object_`` is passed. $OUT_SCALAR_2 See Also @@ -3373,9 +3373,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.power(x3, 1.5, dtype=complex) + >>> np.power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -3452,9 +3452,9 @@ def add_newdoc(place, name, doc): >>> p array([nan, nan]) - To get complex results, give the argument ``dtype=complex``. + To get complex results, give the argument ``dtype=np.complex128``. - >>> np.float_power(x3, 1.5, dtype=complex) + >>> np.float_power(x3, 1.5, dtype=np.complex128) array([-1.83697020e-16-1.j, -1.46957616e-15-8.j]) """) @@ -4055,7 +4055,7 @@ def add_newdoc(place, name, doc): >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -4256,7 +4256,7 @@ def add_newdoc(place, name, doc): >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -4309,7 +4309,7 @@ def add_newdoc(place, name, doc): >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter - >>> out1 = np.array([0], dtype='d') + >>> out1 = np.array([0], dtype=np.float64) >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 33fb9ec4b39f..dd94b4d0bed9 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -2026,7 +2026,7 @@ def nonzero(a): Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is - recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + recommended to use ``x[x.astype(np.bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples @@ -2385,7 +2385,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, more precise approach to summation. Especially when summing a large number of lower precision floating point numbers, such as ``float32``, numerical errors can become significant. - In such cases it can be advisable to use `dtype="float64"` to use a higher + In such cases it can be advisable to use `dtype=np.float64` to use a higher precision for the output. Examples @@ -2721,7 +2721,7 @@ def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([1, 2, 3, 4, 5, 6]) - >>> np.cumulative_prod(a, dtype=float) # specify type of output + >>> np.cumulative_prod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of ``b``: @@ -2808,7 +2808,7 @@ def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, array([1, 2, 3, 4, 5, 6]) >>> np.cumulative_sum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + >>> np.cumulative_sum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> b = np.array([[1, 2, 3], [4, 5, 6]]) @@ -2892,7 +2892,7 @@ def cumsum(a, axis=None, dtype=None, out=None): [4, 5, 6]]) >>> np.cumsum(a) array([ 1, 3, 6, 10, 15, 21]) - >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + >>> np.cumsum(a, dtype=np.float64) # specifies type of output value(s) array([ 1., 3., 6., 10., 15., 21.]) >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns @@ -3096,7 +3096,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, array([1, 3]) >>> np.max(a, where=[False, True], initial=-1, axis=0) array([-1, 3]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.max(b) np.float64(nan) @@ -3235,7 +3235,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, >>> np.min(a, where=[False, True], initial=10, axis=0) array([10, 1]) - >>> b = np.arange(5, dtype=float) + >>> b = np.arange(5, dtype=np.float64) >>> b[2] = np.nan >>> np.min(b) np.float64(nan) @@ -3456,7 +3456,7 @@ def cumprod(a, axis=None, dtype=None, out=None): ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.cumprod(a, dtype=float) # specify type of output + >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 7fcc6b4f770a..b01ba108d2c4 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -38,7 +38,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can - still be obtained with ``np.linspace(start, stop, num).astype(int)`` + still be obtained with ``np.linspace(start, stop, num).astype(np.int_)`` Parameters ---------- @@ -375,9 +375,9 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Note that the above may not produce exact integers: - >>> np.geomspace(1, 256, num=9, dtype=int) + >>> np.geomspace(1, 256, num=9, dtype=np.int_) array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) - >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + >>> np.around(np.geomspace(1, 256, num=9)).astype(np.int_) array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) Negative, decreasing, and complex inputs are allowed: diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 8cfa7f94a8da..e0b638c6f976 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -130,7 +130,7 @@ class memmap(ndarray): Examples -------- >>> import numpy as np - >>> data = np.arange(12, dtype='float32') + >>> data = np.arange(12, dtype=np.float32) >>> data.resize((3,4)) This example uses a temporary file so that doctest doesn't write @@ -142,7 +142,7 @@ class memmap(ndarray): Create a memmap with dtype and shape that matches our data: - >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp = np.memmap(filename, dtype=np.float32, mode='w+', shape=(3,4)) >>> fp memmap([[0., 0., 0., 0.], [0., 0., 0., 0.], @@ -165,7 +165,7 @@ class memmap(ndarray): Load the memmap and verify data was stored: - >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> newfp memmap([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], @@ -173,13 +173,13 @@ class memmap(ndarray): Read-only memmap: - >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr = np.memmap(filename, dtype=np.float32, mode='r', shape=(3,4)) >>> fpr.flags.writeable False Copy-on-write memmap: - >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc = np.memmap(filename, dtype=np.float32, mode='c', shape=(3,4)) >>> fpc.flags.writeable True @@ -205,7 +205,7 @@ class memmap(ndarray): Offset into a memmap: - >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo = np.memmap(filename, dtype=np.float32, mode='r', offset=16) >>> fpo memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index dd5a66a8785f..54d240c89e3e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -700,7 +700,7 @@ def min_scalar_type(a, /): >>> np.min_scalar_type(1e50) dtype('float64') - >>> np.min_scalar_type(np.arange(4,dtype='f8')) + >>> np.min_scalar_type(np.arange(4, dtype=np.float64)) dtype('float64') """ @@ -732,10 +732,10 @@ def result_type(*arrays_and_dtypes): Examples -------- >>> import numpy as np - >>> np.result_type(3, np.arange(7, dtype='i1')) + >>> np.result_type(3, np.arange(7, dtype=np.int8)) dtype('int8') - >>> np.result_type('i4', 'c8') + >>> np.result_type(np.int32, np.complex64) dtype('complex128') >>> np.result_type(3.0, -2) @@ -961,7 +961,7 @@ def bincount(x, /, weights=None, minlength=0): The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=float)) + >>> np.bincount(np.arange(5, dtype=np.float64)) Traceback (most recent call last): ... TypeError: Cannot cast array data from dtype('float64') to dtype('int64') diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 11527c9de442..d4e1685501d7 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -151,7 +151,7 @@ def zeros_like( array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.zeros_like(y) @@ -211,7 +211,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): >>> np.ones(5) array([1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=int) + >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -300,7 +300,7 @@ def ones_like( array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=float) + >>> y = np.arange(3, dtype=np.float64) >>> y array([0., 1., 2.]) >>> np.ones_like(y) @@ -448,21 +448,21 @@ def full_like( Examples -------- >>> import numpy as np - >>> x = np.arange(6, dtype=int) + >>> x = np.arange(6, dtype=np.int_) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) - >>> np.full_like(x, 0.1, dtype=np.double) + >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> np.full_like(x, np.nan, dtype=np.double) + >>> np.full_like(x, np.nan, dtype=np.float64) array([nan, nan, nan, nan, nan, nan]) - >>> y = np.arange(6, dtype=np.double) + >>> y = np.arange(6, dtype=np.float64) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) - >>> y = np.zeros([2, 2, 3], dtype=int) + >>> y = np.zeros([2, 2, 3], dtype=np.int_) >>> np.full_like(y, [0, 0, 255]) array([[[ 0, 0, 255], [ 0, 0, 255]], @@ -982,7 +982,7 @@ def outer(a, b, out=None): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1107,7 +1107,7 @@ def tensordot(a, b, axes=2): An extended example taking advantage of the overloading of + and \\*: >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) - >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=np.object_) >>> A = A.reshape((2, 2)) >>> a; A array([[[1, 2], @@ -1922,20 +1922,20 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- >>> import numpy as np - >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64) array([[0., 0.], [1., 1.]]) - >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64) array([[0., 1.], [0., 1.]]) - >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_) array([[ True, False, False], [False, True, False], [False, False, True]]) - >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_) array([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 265ad4f8eb1f..70570c8c0f39 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -220,7 +220,7 @@ def issctype(rep): Strings are also a scalar type: - >>> issctype(np.dtype('str')) + >>> issctype(np.dtype(np.str_)) True """ diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index 9255603cd5d0..a18e11810418 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -502,7 +502,7 @@ def as_ctypes_type(dtype): -------- Converting a simple dtype: - >>> dt = np.dtype('int8') + >>> dt = np.dtype(np.int8) >>> ctype = np.ctypeslib.as_ctypes_type(dt) >>> ctype diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index 7324168e1dc8..f97e9ff3f80c 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,7 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + >>> np.add(np.arange(2, dtype=np.float64), np.arange(2, dtype=np.float64), x, ... casting='unsafe') array([0, 2]) >>> x diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 77adeac9207f..b3598534bcdf 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -156,7 +156,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=np.float64) >>> fourier = np.fft.fft(signal) >>> n = signal.size >>> timestep = 0.1 @@ -215,7 +215,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- >>> import numpy as np - >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=np.float64) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 1ce7c76b8636..93f96c9a10b6 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -302,7 +302,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): >>> import matplotlib.pyplot as plt >>> t = np.arange(400) - >>> n = np.zeros((400,), dtype=complex) + >>> n = np.zeros((400,), dtype=np.complex128) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, label='real') @@ -1005,7 +1005,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt - >>> n = np.zeros((200,200), dtype=complex) + >>> n = np.zeros((200,200), dtype=np.complex128) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index c3996e1f2b92..25d78c1eb6a6 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -30,7 +30,7 @@ def byte_bounds(a): Examples -------- >>> import numpy as np - >>> I = np.eye(2, dtype='f'); I.dtype + >>> I = np.eye(2, dtype=np.float32); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e407598d62c6..3e0005079104 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -654,7 +654,7 @@ class ndarray is returned. ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] - >>> np.asarray_chkfinite(a, dtype=float) + >>> np.asarray_chkfinite(a, dtype=np.float64) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. @@ -5239,7 +5239,7 @@ def delete(arr, obj, axis=None): Often it is preferable to use a boolean mask. For example: >>> arr = np.arange(12) + 1 - >>> mask = np.ones(len(arr), dtype=bool) + >>> mask = np.ones(len(arr), dtype=np.bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -5622,7 +5622,7 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) - >>> a = np.array([1, 2], dtype=int) + >>> a = np.array([1, 2], dtype=np.int_) >>> c = np.append(a, []) >>> c array([1., 2.]) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 40fef85b1853..5ee60d0fceaf 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -977,7 +977,7 @@ def diag_indices(n, ndim=2): And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 72e746f19eba..0e135917cd52 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1343,7 +1343,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, single escaped character: >>> s = StringIO('"Hello, my name is ""Monty""!"') - >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + >>> np.loadtxt(s, dtype=np.str_, delimiter=",", quotechar='"') array('Hello, my name is "Monty"!', dtype='>> import numpy as np - >>> np.eye(2, dtype=int) + >>> np.eye(2, dtype=np.int_) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) @@ -418,7 +418,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- >>> import numpy as np - >>> np.tri(3, 5, 2, dtype=int) + >>> np.tri(3, 5, 2, dtype=np.int_) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 1]]) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 584088cdc21d..37192043513f 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -240,26 +240,26 @@ def isreal(x): Examples -------- >>> import numpy as np - >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=np.complex128) >>> np.isreal(a) array([False, True, True, True, True, False]) The function does not work on string arrays. - >>> a = np.array([2j, "a"], dtype="U") + >>> a = np.array([2j, "a"], dtype=np.str_) >>> np.isreal(a) # Warns about non-elementwise comparison False - Returns True for all elements in input array of ``dtype=object`` even if + Returns True for all elements in input array of ``dtype=np.object_`` even if any of the elements is complex. - >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) array([ True, True, True]) isreal should not be used with object arrays - >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> a = np.array([1+2j, 2+1j], dtype=np.object_) >>> np.isreal(a) array([ True, True]) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 6884c9b7ef8d..00e485346577 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -938,7 +938,7 @@ def outer(x1, x2, /): An example using a "vector" of letters: - >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> x = np.array(['a', 'b', 'c'], dtype=np.object_) >>> np.linalg.outer(x, [1, 2, 3]) array([['a', 'aa', 'aaa'], ['b', 'bb', 'bbb'], @@ -1764,7 +1764,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) True - >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat = np.zeros((9, 6), dtype=np.complex128) >>> smat[:6, :6] = np.diag(S) >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) True diff --git a/numpy/ma/core.py b/numpy/ma/core.py index e0ead317d9f6..f6d7b0a8c8e5 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2277,7 +2277,7 @@ def masked_object(x, value, copy=True, shrink=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> food = np.array(['green_eggs', 'ham'], dtype=np.object_) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat @@ -2286,7 +2286,7 @@ def masked_object(x, value, copy=True, shrink=True): fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring - >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=np.object_) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], @@ -2403,7 +2403,7 @@ def masked_invalid(a, copy=True): -------- >>> import numpy as np >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=float) + >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 7387d4f9beb7..769c38fdc900 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -1078,7 +1078,7 @@ def mask_rowcols(a, axis=None): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1135,7 +1135,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1186,7 +1186,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- >>> import numpy as np - >>> a = np.zeros((3, 3), dtype=int) + >>> a = np.zeros((3, 3), dtype=np.int_) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], diff --git a/numpy/matlib.py b/numpy/matlib.py index f27d503cdbca..151cb6b369b4 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -56,7 +56,7 @@ def empty(shape, dtype=None, order='C'): >>> np.matlib.empty((2, 2)) # filled with random data matrix([[ 6.76425276e-320, 9.79033856e-307], # random [ 7.39337286e-309, 3.22135945e-309]]) - >>> np.matlib.empty((2, 2), dtype=int) + >>> np.matlib.empty((2, 2), dtype=np.int_) matrix([[ 6600475, 0], # random [ 6586976, 22740995]]) @@ -177,7 +177,7 @@ def identity(n, dtype=None): Examples -------- >>> import numpy.matlib - >>> np.matlib.identity(3, dtype=int) + >>> np.matlib.identity(3, dtype=np.int_) matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @@ -222,7 +222,7 @@ def eye(n, M=None, k=0, dtype=float, order='C'): Examples -------- >>> import numpy.matlib - >>> np.matlib.eye(3, k=1, dtype=float) + >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 39b9a935500e..d706e09ed947 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -315,11 +315,11 @@ def sum(self, axis=None, dtype=None, out=None): >>> x.sum(axis=1) matrix([[3], [7]]) - >>> x.sum(axis=1, dtype='float') + >>> x.sum(axis=1, dtype=np.float64) matrix([[3.], [7.]]) - >>> out = np.zeros((2, 1), dtype='float') - >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + >>> out = np.zeros((2, 1), dtype=np.float64) + >>> x.sum(axis=1, dtype=np.float64, out=np.asmatrix(out)) matrix([[3.], [7.]]) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ed9b6a3d12a8..35794bdfca6a 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -793,7 +793,7 @@ cdef class Generator: than the optimized sampler even if each element of ``p`` is 1 / len(a). ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish - to normalize using ``p = p / np.sum(p, dtype=float)``. + to normalize using ``p = p / np.sum(p, dtype=np.float64)``. When passing ``a`` as an integer type and ``size`` is not specified, the return type is a native Python ``int``. From 7bd9dbc31c51b5e710428a24a73245edc32ccf68 Mon Sep 17 00:00:00 2001 From: Sayed Awad Date: Mon, 8 Dec 2025 22:44:20 +0200 Subject: [PATCH 0994/1718] BENCH: Add `--cpu-affinity` option to set CPU affinity for running the benchmark (#30393) --- .spin/cmds.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index e522108a5107..5c4d5e90f6d7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -397,6 +397,11 @@ def lint(ctx, fix): help="The factor above or below which a benchmark result is " "considered reportable. This is passed on to the asv command." ) +@click.option( + '--cpu-affinity', default=None, multiple=False, + help="Set CPU affinity for running the benchmark, in format: 0 or 0,1,2 or 0-3." + "Default: not set" +) @click.argument( 'commits', metavar='', required=False, @@ -404,7 +409,8 @@ def lint(ctx, fix): ) @meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): +def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, + commits, build_dir): """🏋 Run benchmarks. \b @@ -447,6 +453,9 @@ def bench(ctx, tests, compare, verbose, quick, factor, commits, build_dir): if quick: bench_args = ['--quick'] + bench_args + if cpu_affinity: + bench_args += ['--cpu-affinity', cpu_affinity] + if not compare: # No comparison requested; we build and benchmark the current version From b70fc7747edacb5644d32f8d1d22259d24a3167b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 9 Dec 2025 02:08:34 +0100 Subject: [PATCH 0995/1718] TYP: simplified char code literals, ``ctypeslib``, and ``dtype.__new__`` (#30396) * TYP: simplfied dtype char code literals * TYP: remove duplicate and redundant uses of char code literals * TYP: rewrite `ctypeslib` in terms of fixed-width types * TYP: `dtype.__new__` cleanup and complex `ctypes` support * TYP: add missing `type ` keyword --- numpy/__init__.pyi | 119 ++++++---- numpy/_core/numeric.pyi | 10 +- numpy/_typing/__init__.py | 11 - numpy/_typing/_char_codes.py | 227 +++++++------------ numpy/ctypeslib/_ctypeslib.pyi | 187 ++++++++------- numpy/random/_generator.pyi | 6 +- numpy/random/mtrand.pyi | 8 +- numpy/typing/tests/data/reveal/ctypeslib.pyi | 103 +++++---- numpy/typing/tests/data/reveal/dtype.pyi | 8 +- 9 files changed, 313 insertions(+), 366 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b8ede6c2a85a..aad324f54c4d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3,6 +3,7 @@ import builtins import ctypes as ct import datetime as dt import inspect +import sys from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -87,24 +88,15 @@ from numpy._typing import ( # type: ignore[deprecated] _Float64Codes, _Complex64Codes, _Complex128Codes, - _ByteCodes, - _ShortCodes, _IntCCodes, _IntPCodes, _LongCodes, _LongLongCodes, - _UByteCodes, - _UShortCodes, _UIntCCodes, _UIntPCodes, _ULongCodes, _ULongLongCodes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, _LongDoubleCodes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, _DT64Codes, _TD64Codes, @@ -1105,7 +1097,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | None, align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1151,7 +1143,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[int_ | np.bool]: ... + ) -> dtype[int_ | Any]: ... @overload def __new__( cls, @@ -1160,7 +1152,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[float64 | int_ | np.bool]: ... + ) -> dtype[float64 | Any]: ... @overload def __new__( cls, @@ -1169,7 +1161,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + ) -> dtype[complex128 | Any]: ... @overload def __new__( cls, @@ -1219,7 +1211,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], + dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1228,7 +1220,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], + dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1269,13 +1261,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[ulong]: ... + ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1284,7 +1276,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1325,13 +1317,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 copy: builtins.bool = False, *, metadata: dict[builtins.str, Any] = ..., - ) -> dtype[long]: ... + ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @overload def __new__( cls, - dtype: _Float16Codes | _HalfCodes, + dtype: _Float16Codes, align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1340,7 +1332,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Float32Codes | _SingleCodes, + dtype: _Float32Codes | type[ct.c_float], align: builtins.bool = False, copy: builtins.bool = False, *, @@ -1357,34 +1349,63 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... - # `complexfloating` string-based representations - @overload - def __new__( - cls, - dtype: _Complex64Codes | _CSingleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex64]: ... - @overload - def __new__( - cls, - dtype: _Complex128Codes | _CDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[complex128]: ... - @overload - def __new__( - cls, - dtype: _CLongDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, - *, - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[clongdouble]: ... + # `complexfloating` string-based representations and ctypes + if sys.version_info >= (3, 14) and sys.platform != "win32": + @overload + def __new__( + cls, + dtype: _Complex64Codes | type[ct.c_float_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | type[ct.c_double_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... + else: + @overload + def __new__( + cls, + dtype: _Complex64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index f7399ef44856..ddf0bfa31977 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -36,13 +36,11 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, - _CDoubleCodes, _Complex128Codes, - _DoubleCodes, _DTypeLike, _DTypeLikeBool, _Float64Codes, - _IntCodes, + _IntPCodes, _NestedSequence, _NumberLike_co, _ScalarLike_co, @@ -660,9 +658,9 @@ type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[com type _ArrayLike1DTD64_co = _ArrayLike1D[_TD64_co] type _ArrayLike1DObject_co = _ArrayLike1D[np.object_] -type _DTypeLikeInt = type[int] | _IntCodes -type _DTypeLikeFloat64 = type[float] | _Float64Codes | _DoubleCodes -type _DTypeLikeComplex128 = type[complex] | _Complex128Codes | _CDoubleCodes +type _DTypeLikeInt = type[int] | _IntPCodes +type _DTypeLikeFloat64 = type[float] | _Float64Codes +type _DTypeLikeComplex128 = type[complex] | _Complex128Codes ### diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index d6b6105e79b8..4de797bd4e37 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -29,16 +29,12 @@ # from ._char_codes import ( _BoolCodes as _BoolCodes, - _ByteCodes as _ByteCodes, _BytesCodes as _BytesCodes, - _CDoubleCodes as _CDoubleCodes, _CharacterCodes as _CharacterCodes, _CLongDoubleCodes as _CLongDoubleCodes, _Complex64Codes as _Complex64Codes, _Complex128Codes as _Complex128Codes, _ComplexFloatingCodes as _ComplexFloatingCodes, - _CSingleCodes as _CSingleCodes, - _DoubleCodes as _DoubleCodes, _DT64Codes as _DT64Codes, _FlexibleCodes as _FlexibleCodes, _Float16Codes as _Float16Codes, @@ -46,14 +42,12 @@ _Float64Codes as _Float64Codes, _FloatingCodes as _FloatingCodes, _GenericCodes as _GenericCodes, - _HalfCodes as _HalfCodes, _InexactCodes as _InexactCodes, _Int8Codes as _Int8Codes, _Int16Codes as _Int16Codes, _Int32Codes as _Int32Codes, _Int64Codes as _Int64Codes, _IntCCodes as _IntCCodes, - _IntCodes as _IntCodes, _IntegerCodes as _IntegerCodes, _IntPCodes as _IntPCodes, _LongCodes as _LongCodes, @@ -61,24 +55,19 @@ _LongLongCodes as _LongLongCodes, _NumberCodes as _NumberCodes, _ObjectCodes as _ObjectCodes, - _ShortCodes as _ShortCodes, _SignedIntegerCodes as _SignedIntegerCodes, - _SingleCodes as _SingleCodes, _StrCodes as _StrCodes, _StringCodes as _StringCodes, _TD64Codes as _TD64Codes, - _UByteCodes as _UByteCodes, _UInt8Codes as _UInt8Codes, _UInt16Codes as _UInt16Codes, _UInt32Codes as _UInt32Codes, _UInt64Codes as _UInt64Codes, _UIntCCodes as _UIntCCodes, - _UIntCodes as _UIntCodes, _UIntPCodes as _UIntPCodes, _ULongCodes as _ULongCodes, _ULongLongCodes as _ULongLongCodes, _UnsignedIntegerCodes as _UnsignedIntegerCodes, - _UShortCodes as _UShortCodes, _VoidCodes as _VoidCodes, ) diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 6d1e06dc894c..518f9b473e4a 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,51 +1,43 @@ from typing import Literal -type _BoolCodes = Literal[ - "bool", "bool_", - "?", "|?", "=?", "?", - "b1", "|b1", "=b1", "b1", -] # fmt: skip - -type _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] -type _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] -type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] -type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] +type _BoolCodes = Literal["bool", "bool_", "?", "b1", "|b1", "=b1", "b1"] -type _Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] -type _Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +type _Int8Codes = Literal["int8", "byte", "b", "i1", "|i1", "=i1", "i1"] +type _Int16Codes = Literal["int16", "short", "h", "i2", "|i2", "=i2", "i2"] type _Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] type _Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] -type _Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] -type _Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] -type _Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] - -type _Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] -type _Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] +type _UInt8Codes = Literal["uint8", "ubyte", "B", "u1", "|u1", "=u1", "u1"] +type _UInt16Codes = Literal["uint16", "ushort", "H", "u2", "|u2", "=u2", "u2"] +type _UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +type _UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] -type _ByteCodes = Literal["byte", "b", "|b", "=b", "b"] -type _ShortCodes = Literal["short", "h", "|h", "=h", "h"] type _IntCCodes = Literal["intc", "i", "|i", "=i", "i"] -type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] type _LongCodes = Literal["long", "l", "|l", "=l", "l"] -type _IntCodes = _IntPCodes type _LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] +type _IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] -type _UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] -type _UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] type _UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] -type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] type _ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] -type _UIntCodes = _UIntPCodes type _ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] +type _UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] + +type _Float16Codes = Literal["float16", "half", "e", "f2", "|f2", "=f2", "f2"] +type _Float32Codes = Literal["float32", "single", "f", "f4", "|f4", "=f4", "f4"] +type _Float64Codes = Literal[ + "float64", "float", "double", "d", "f8", "|f8", "=f8", "f8" +] -type _HalfCodes = Literal["half", "e", "|e", "=e", "e"] -type _SingleCodes = Literal["single", "f", "|f", "=f", "f"] -type _DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] type _LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] -type _CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] -type _CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +type _Complex64Codes = Literal[ + "complex64", "csingle", "F", "c8", "|c8", "=c8", "c8" +] + +type _Complex128Codes = Literal[ + "complex128", "complex", "cdouble", "D", "c16", "|c16", "=c16", "c16" +] + type _CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] type _StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] @@ -53,97 +45,57 @@ type _VoidCodes = Literal["void", "V", "|V", "=V", "V"] type _ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] +# datetime64 +type _DT64Codes_any = Literal["datetime64", "M", "M8", "|M8", "=M8", "M8"] +type _DT64Codes_date = Literal[ + "datetime64[Y]", "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "datetime64[M]", "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "datetime64[W]", "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "datetime64[D]", "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", +] # fmt: skip +type _DT64Codes_datetime = Literal[ + "datetime64[h]", "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "datetime64[m]", "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "datetime64[s]", "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "datetime64[ms]", "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "datetime64[us]", "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "datetime64[μs]", "M8[μs]", "|M8[μs]", "=M8[μs]", "M8[μs]", +] # fmt: skip +type _DT64Codes_int = Literal[ + "datetime64[ns]", "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "datetime64[ps]", "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "datetime64[fs]", "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "datetime64[as]", "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] # fmt: skip type _DT64Codes = Literal[ - "datetime64", "|datetime64", "=datetime64", - "datetime64", - "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", - "datetime64[Y]", - "datetime64[M]", "|datetime64[M]", "=datetime64[M]", - "datetime64[M]", - "datetime64[W]", "|datetime64[W]", "=datetime64[W]", - "datetime64[W]", - "datetime64[D]", "|datetime64[D]", "=datetime64[D]", - "datetime64[D]", - "datetime64[h]", "|datetime64[h]", "=datetime64[h]", - "datetime64[h]", - "datetime64[m]", "|datetime64[m]", "=datetime64[m]", - "datetime64[m]", - "datetime64[s]", "|datetime64[s]", "=datetime64[s]", - "datetime64[s]", - "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", - "datetime64[ms]", - "datetime64[us]", "|datetime64[us]", "=datetime64[us]", - "datetime64[us]", - "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", - "datetime64[ns]", - "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", - "datetime64[ps]", - "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", - "datetime64[fs]", - "datetime64[as]", "|datetime64[as]", "=datetime64[as]", - "datetime64[as]", - "M", "|M", "=M", "M", - "M8", "|M8", "=M8", "M8", - "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", -] -type _TD64Codes = Literal[ - "timedelta64", "|timedelta64", "=timedelta64", - "timedelta64", - "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", - "timedelta64[Y]", - "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", - "timedelta64[M]", - "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", - "timedelta64[W]", - "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", - "timedelta64[D]", - "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", - "timedelta64[h]", - "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", - "timedelta64[m]", - "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", - "timedelta64[s]", - "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", - "timedelta64[ms]", - "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", - "timedelta64[us]", - "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", - "timedelta64[ns]", - "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", - "timedelta64[ps]", - "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", - "timedelta64[fs]", - "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", - "timedelta64[as]", - "m", "|m", "=m", "m", - "m8", "|m8", "=m8", "m8", - "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, ] +# timedelta64 +type _TD64Codes_any = Literal["timedelta64", "m", "m8", "|m8", "=m8", "m8"] +type _TD64Codes_int = Literal[ + "timedelta64[Y]", "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "timedelta64[M]", "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "timedelta64[ns]", "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "timedelta64[ps]", "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "timedelta64[fs]", "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "timedelta64[as]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] # fmt: skip +type _TD64Codes_timedelta = Literal[ + "timedelta64[W]", "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "timedelta64[D]", "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "timedelta64[h]", "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "timedelta64[m]", "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "timedelta64[s]", "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "timedelta64[ms]", "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "timedelta64[us]", "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "timedelta64[μs]", "m8[μs]", "|m8[μs]", "=m8[μs]", "m8[μs]", +] # fmt: skip +type _TD64Codes = Literal[_TD64Codes_any, _TD64Codes_int, _TD64Codes_timedelta] + # NOTE: `StringDType' has no scalar type, and therefore has no name that can # be passed to the `dtype` constructor type _StringCodes = Literal["T", "|T", "=T", "T"] @@ -154,52 +106,43 @@ # Another advantage of nesting, is that they always have a "flat" # `Literal.__args__`, which is a tuple of *literally* all its literal values. -type _UnsignedIntegerCodes = Literal[ - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UIntCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, -] type _SignedIntegerCodes = Literal[ _Int8Codes, _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, - _ByteCodes, - _ShortCodes, _IntCCodes, _LongCodes, _LongLongCodes, + _IntPCodes, +] +type _UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, ] type _FloatingCodes = Literal[ _Float16Codes, _Float32Codes, _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes + _LongDoubleCodes, ] type _ComplexFloatingCodes = Literal[ _Complex64Codes, _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, _CLongDoubleCodes, ] type _IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] type _InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] type _NumberCodes = Literal[_IntegerCodes, _InexactCodes] -type _CharacterCodes = Literal[_StrCodes, _BytesCodes] -type _FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] +type _CharacterCodes = Literal[_BytesCodes, _StrCodes] +type _FlexibleCodes = Literal[_CharacterCodes, _VoidCodes] type _GenericCodes = Literal[ _BoolCodes, diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 1d4780aeb422..2e88d7d9464f 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,28 +1,9 @@ -# NOTE: Numpy's mypy plugin is used for importing the correct -# platform-specific `ctypes._SimpleCData[int]` sub-type -import ctypes +import ctypes as ct from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence -from ctypes import c_int64 as _c_intp from typing import Any, ClassVar, Literal as L, overload import numpy as np -from numpy import ( - byte, - double, - intc, - long, - longdouble, - longlong, - short, - single, - ubyte, - uintc, - ulong, - ulonglong, - ushort, - void, -) from numpy._core._internal import _ctypes from numpy._core.multiarray import flagsobj from numpy._typing import ( @@ -31,21 +12,27 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _BoolCodes, - _ByteCodes, - _DoubleCodes, _DTypeLike, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, _IntCCodes, + _IntPCodes, _LongCodes, _LongDoubleCodes, _LongLongCodes, _ShapeLike, - _ShortCodes, - _SingleCodes, - _UByteCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, _UIntCCodes, + _UIntPCodes, _ULongCodes, _ULongLongCodes, - _UShortCodes, _VoidDTypeLike, ) @@ -61,7 +48,7 @@ type _FlagsKind = L[ ] # TODO: Add a shape type parameter -class _ndptr[OptionalDTypeT: np.dtype | None](ctypes.c_void_p): +class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): # In practice these 4 classvars are defined in the dynamic class # returned by `ndpointer` _dtype_: OptionalDTypeT = ... @@ -83,10 +70,11 @@ class _concrete_ndptr[DTypeT: np.dtype](_ndptr[DTypeT]): @property def contents(self) -> np.ndarray[_AnyShape, DTypeT]: ... -def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ct.CDLL: ... -c_intp = _c_intp +c_intp = ct.c_int64 # most platforms are 64-bit nowadays +# @overload def ndpointer( dtype: None = None, @@ -125,105 +113,110 @@ def ndpointer( flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None, ) -> type[_ndptr[np.dtype]]: ... -@overload -def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ... -@overload -def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ... -@overload -def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ... -@overload -def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ... -@overload -def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ... -@overload -def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ... -@overload -def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ... -@overload -def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ... -@overload -def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ... -@overload -def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ... -@overload -def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ... -@overload -def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ... -@overload -def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ... -@overload -def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ... -@overload -def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ... -@overload -def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure` -@overload +# +@overload # bool +def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ct.c_bool]) -> type[ct.c_bool]: ... +@overload # int8 +def as_ctypes_type(dtype: _Int8Codes | _DTypeLike[np.int8] | type[ct.c_int8]) -> type[ct.c_int8]: ... +@overload # int16 +def as_ctypes_type(dtype: _Int16Codes | _DTypeLike[np.int16] | type[ct.c_int16]) -> type[ct.c_int16]: ... +@overload # int32 +def as_ctypes_type(dtype: _Int32Codes | _DTypeLike[np.int32] | type[ct.c_int32]) -> type[ct.c_int32]: ... +@overload # int64 +def as_ctypes_type(dtype: _Int64Codes | _DTypeLike[np.int64] | type[ct.c_int64]) -> type[ct.c_int64]: ... +@overload # intc +def as_ctypes_type(dtype: _IntCCodes | type[ct.c_int]) -> type[ct.c_int]: ... +@overload # long +def as_ctypes_type(dtype: _LongCodes | type[ct.c_long]) -> type[ct.c_long]: ... +@overload # longlong +def as_ctypes_type(dtype: _LongLongCodes | type[ct.c_longlong]) -> type[ct.c_longlong]: ... +@overload # intp +def as_ctypes_type(dtype: _IntPCodes | type[ct.c_ssize_t] | type[int]) -> type[ct.c_ssize_t]: ... +@overload # uint8 +def as_ctypes_type(dtype: _UInt8Codes | _DTypeLike[np.uint8] | type[ct.c_uint8]) -> type[ct.c_uint8]: ... +@overload # uint16 +def as_ctypes_type(dtype: _UInt16Codes | _DTypeLike[np.uint16] | type[ct.c_uint16]) -> type[ct.c_uint16]: ... +@overload # uint32 +def as_ctypes_type(dtype: _UInt32Codes | _DTypeLike[np.uint32] | type[ct.c_uint32]) -> type[ct.c_uint32]: ... +@overload # uint64 +def as_ctypes_type(dtype: _UInt64Codes | _DTypeLike[np.uint64] | type[ct.c_uint64]) -> type[ct.c_uint64]: ... +@overload # uintc +def as_ctypes_type(dtype: _UIntCCodes | type[ct.c_uint]) -> type[ct.c_uint]: ... +@overload # ulong +def as_ctypes_type(dtype: _ULongCodes | type[ct.c_ulong]) -> type[ct.c_ulong]: ... +@overload # ulonglong +def as_ctypes_type(dtype: _ULongLongCodes | type[ct.c_ulonglong]) -> type[ct.c_ulonglong]: ... +@overload # uintp +def as_ctypes_type(dtype: _UIntPCodes | type[ct.c_size_t]) -> type[ct.c_size_t]: ... +@overload # float32 +def as_ctypes_type(dtype: _Float32Codes | _DTypeLike[np.float32] | type[ct.c_float]) -> type[ct.c_float]: ... +@overload # float64 +def as_ctypes_type(dtype: _Float64Codes | _DTypeLike[np.float64] | type[float | ct.c_double]) -> type[ct.c_double]: ... +@overload # longdouble +def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[np.longdouble] | type[ct.c_longdouble]) -> type[ct.c_longdouble]: ... +@overload # void +def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ct.Union` or `ct.Structure` +@overload # fallback def as_ctypes_type(dtype: str) -> type[Any]: ... +# @overload -def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... +def as_array(obj: ct._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ... @overload def as_array[ScalarT: np.generic](obj: _ArrayLike[ScalarT], shape: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def as_ctypes(obj: np.bool) -> ctypes.c_bool: ... -@overload -def as_ctypes(obj: byte) -> ctypes.c_byte: ... -@overload -def as_ctypes(obj: short) -> ctypes.c_short: ... -@overload -def as_ctypes(obj: intc) -> ctypes.c_int: ... -@overload -def as_ctypes(obj: long) -> ctypes.c_long: ... +def as_ctypes(obj: np.bool) -> ct.c_bool: ... @overload -def as_ctypes(obj: longlong) -> ctypes.c_longlong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.int8) -> ct.c_int8: ... @overload -def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ... +def as_ctypes(obj: np.int16) -> ct.c_int16: ... @overload -def as_ctypes(obj: ushort) -> ctypes.c_ushort: ... +def as_ctypes(obj: np.int32) -> ct.c_int32: ... @overload -def as_ctypes(obj: uintc) -> ctypes.c_uint: ... +def as_ctypes(obj: np.int64) -> ct.c_int64: ... @overload -def as_ctypes(obj: ulong) -> ctypes.c_ulong: ... +def as_ctypes(obj: np.uint8) -> ct.c_uint8: ... @overload -def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: np.uint16) -> ct.c_uint16: ... @overload -def as_ctypes(obj: single) -> ctypes.c_float: ... +def as_ctypes(obj: np.uint32) -> ct.c_uint32: ... @overload -def as_ctypes(obj: double) -> ctypes.c_double: ... +def as_ctypes(obj: np.uint64) -> ct.c_uint64: ... @overload -def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ... +def as_ctypes(obj: np.float32) -> ct.c_float: ... @overload -def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: np.float64) -> ct.c_double: ... @overload -def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ... +def as_ctypes(obj: np.longdouble) -> ct.c_longdouble: ... @overload -def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ... +def as_ctypes(obj: np.void) -> Any: ... # `ct.Union` or `ct.Structure` @overload -def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ... +def as_ctypes(obj: NDArray[np.bool]) -> ct.Array[ct.c_bool]: ... @overload -def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ... +def as_ctypes(obj: NDArray[np.int8]) -> ct.Array[ct.c_int8]: ... @overload -def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ... +def as_ctypes(obj: NDArray[np.int16]) -> ct.Array[ct.c_int16]: ... @overload -def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.int32]) -> ct.Array[ct.c_int32]: ... @overload -def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ... +def as_ctypes(obj: NDArray[np.int64]) -> ct.Array[ct.c_int64]: ... @overload -def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ... +def as_ctypes(obj: NDArray[np.uint8]) -> ct.Array[ct.c_uint8]: ... @overload -def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ... +def as_ctypes(obj: NDArray[np.uint16]) -> ct.Array[ct.c_uint16]: ... @overload -def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ... +def as_ctypes(obj: NDArray[np.uint32]) -> ct.Array[ct.c_uint32]: ... @overload -def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ... # type: ignore[overload-cannot-match] +def as_ctypes(obj: NDArray[np.uint64]) -> ct.Array[ct.c_uint64]: ... @overload -def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ... +def as_ctypes(obj: NDArray[np.float32]) -> ct.Array[ct.c_float]: ... @overload -def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ... +def as_ctypes(obj: NDArray[np.float64]) -> ct.Array[ct.c_double]: ... @overload -def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ... +def as_ctypes(obj: NDArray[np.longdouble]) -> ct.Array[ct.c_longdouble]: ... @overload -def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure` +def as_ctypes(obj: NDArray[np.void]) -> ct.Array[Any]: ... # `ct.Union` or `ct.Structure` diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index f3f911b98ce7..f3fb9bb7baf5 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -9,7 +9,6 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _DoubleCodes, _DTypeLike, _Float32Codes, _Float64Codes, @@ -17,7 +16,6 @@ from numpy._typing import ( _Int64Codes, _NestedSequence, _ShapeLike, - _SingleCodes, ) from .bit_generator import BitGenerator, SeedSequence @@ -27,8 +25,8 @@ type _ArrayF32 = NDArray[np.float32] type _ArrayF64 = NDArray[np.float64] type _DTypeLikeI64 = _DTypeLike[np.int64] | _Int64Codes -type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes | _SingleCodes -type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes | _DoubleCodes +type _DTypeLikeF32 = _DTypeLike[np.float32] | _Float32Codes +type _DTypeLikeF64 = type[float] | _DTypeLike[np.float64] | _Float64Codes # we use `str` to avoid type-checker performance issues because of the many `Literal` variants type _DTypeLikeFloat = type[float] | _DTypeLike[np.float32 | np.float64] | str diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index c2cbb17aa98d..7a654971f19b 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -29,7 +29,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, + _IntPCodes, _LongCodes, _ShapeLike, _SupportsDType, @@ -37,7 +37,7 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, + _UIntPCodes, _ULongCodes, ) from numpy.random.bit_generator import BitGenerator @@ -209,7 +209,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 ) -> uint: ... @overload def randint( @@ -257,7 +257,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 ) -> int_: ... @overload def randint( diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 0564d725cf62..e3558925e4d0 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -6,19 +6,17 @@ import numpy.typing as npt from numpy import ctypeslib AR_bool: npt.NDArray[np.bool] -AR_ubyte: npt.NDArray[np.ubyte] -AR_ushort: npt.NDArray[np.ushort] -AR_uintc: npt.NDArray[np.uintc] -AR_ulong: npt.NDArray[np.ulong] -AR_ulonglong: npt.NDArray[np.ulonglong] -AR_byte: npt.NDArray[np.byte] -AR_short: npt.NDArray[np.short] -AR_intc: npt.NDArray[np.intc] -AR_long: npt.NDArray[np.long] -AR_longlong: npt.NDArray[np.longlong] -AR_single: npt.NDArray[np.single] -AR_double: npt.NDArray[np.double] -AR_longdouble: npt.NDArray[np.longdouble] +AR_i8: npt.NDArray[np.int8] +AR_u8: npt.NDArray[np.uint8] +AR_i16: npt.NDArray[np.int16] +AR_u16: npt.NDArray[np.uint16] +AR_i32: npt.NDArray[np.int32] +AR_u32: npt.NDArray[np.uint32] +AR_i64: npt.NDArray[np.int64] +AR_u64: npt.NDArray[np.uint64] +AR_f32: npt.NDArray[np.float32] +AR_f64: npt.NDArray[np.float64] +AR_f80: npt.NDArray[np.longdouble] AR_void: npt.NDArray[np.void] pointer: ct._Pointer[Any] @@ -33,49 +31,56 @@ assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._con assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) -assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.int8), type[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint8), type[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes_type(np.int16), type[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint16), type[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes_type(np.int32), type[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint32), type[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes_type(np.int64), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type(np.uint64), type[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes_type(np.float32), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.float64), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(np.longdouble), type[ct.c_longdouble]) +assert_type(np.ctypeslib.as_ctypes_type("?"), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type("intp"), type[ct.c_ssize_t]) assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[ct.c_double]) assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) -assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) -assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) -assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) +assert_type(np.ctypeslib.as_ctypes(AR_u8.take(0)), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(AR_u16.take(0)), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(AR_u32.take(0)), ct.c_uint32) -assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) -assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) -assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) -assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) -assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) -assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(np.bool()), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(np.int8()), ct.c_int8) +assert_type(np.ctypeslib.as_ctypes(np.uint8()), ct.c_uint8) +assert_type(np.ctypeslib.as_ctypes(np.int16()), ct.c_int16) +assert_type(np.ctypeslib.as_ctypes(np.uint16()), ct.c_uint16) +assert_type(np.ctypeslib.as_ctypes(np.int32()), ct.c_int32) +assert_type(np.ctypeslib.as_ctypes(np.uint32()), ct.c_uint32) +assert_type(np.ctypeslib.as_ctypes(np.int64()), ct.c_int64) +assert_type(np.ctypeslib.as_ctypes(np.uint64()), ct.c_uint64) +assert_type(np.ctypeslib.as_ctypes(np.float32()), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(np.float64()), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(np.longdouble()), ct.c_longdouble) +assert_type(np.ctypeslib.as_ctypes(np.void(b"")), Any) assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) -assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) -assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) -assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) -assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) -assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) -assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) -assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) -assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_i8), ct.Array[ct.c_int8]) +assert_type(np.ctypeslib.as_ctypes(AR_u8), ct.Array[ct.c_uint8]) +assert_type(np.ctypeslib.as_ctypes(AR_i16), ct.Array[ct.c_int16]) +assert_type(np.ctypeslib.as_ctypes(AR_u16), ct.Array[ct.c_uint16]) +assert_type(np.ctypeslib.as_ctypes(AR_i32), ct.Array[ct.c_int32]) +assert_type(np.ctypeslib.as_ctypes(AR_u32), ct.Array[ct.c_uint32]) +assert_type(np.ctypeslib.as_ctypes(AR_i64), ct.Array[ct.c_int64]) +assert_type(np.ctypeslib.as_ctypes(AR_u64), ct.Array[ct.c_uint64]) +assert_type(np.ctypeslib.as_ctypes(AR_f32), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_f64), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_f80), ct.Array[ct.c_longdouble]) assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) -assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(AR_u8), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) - -assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) -assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) -assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) -assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index f8f939d4609f..48e9d54b7951 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -44,9 +44,9 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | Any]) +assert_type(np.dtype(float), np.dtype[np.float64 | Any]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | Any]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) @@ -64,7 +64,7 @@ assert_type(np.dtype(Fraction), np.dtype[np.object_]) assert_type(np.dtype("?"), np.dtype[np.bool]) assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) -assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) From a48fb8562cce7e564f50f84f3717621b08edf490 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Tue, 9 Dec 2025 04:20:42 -0500 Subject: [PATCH 0996/1718] BUG: Add missing return status check of NpyIter_EnableExternalLoop(). (#30404) --- numpy/_core/src/multiarray/nditer_pywrap.c | 5 ++++- numpy/_core/tests/test_nditer.py | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index a0e1b09a584c..992bc013af3a 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -1423,7 +1423,10 @@ npyiter_enable_external_loop( return NULL; } - NpyIter_EnableExternalLoop(self->iter); + if (NpyIter_EnableExternalLoop(self->iter) != NPY_SUCCEED) { + return NULL; + } + /* EnableExternalLoop invalidates cached values */ if (npyiter_cache_values(self) < 0) { return NULL; diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 943c25cdaa13..d2fc69a03b5f 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3206,6 +3206,13 @@ def test_iter_too_large_with_multiindex(): with assert_raises(ValueError): _multiarray_tests.test_nditer_too_large(arrays, i * 2 + 1, mode) + +def test_invalid_call_of_enable_external_loop(): + with pytest.raises(ValueError, + match='Iterator flag EXTERNAL_LOOP cannot be used'): + np.nditer(([[1], [2]], [3, 4]), ['multi_index']).enable_external_loop() + + def test_writebacks(): a = np.arange(6, dtype='f4') au = a.byteswap() From 3a3dc5e5d12da326d518ae080bd977a8874108de Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 15:33:50 +0100 Subject: [PATCH 0997/1718] TYP: `linalg`: improved `*inv` and `*vals` return types --- numpy/linalg/_linalg.pyi | 229 +++++++++++++--------- numpy/typing/tests/data/fail/linalg.pyi | 19 +- numpy/typing/tests/data/reveal/linalg.pyi | 32 +-- 3 files changed, 163 insertions(+), 117 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 6cbd3b43a07a..00135bc5d15b 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -11,7 +11,6 @@ from typing import ( import numpy as np from numpy import ( - complex128, complexfloating, float64, floating, @@ -23,7 +22,7 @@ from numpy import ( vecdot, ) from numpy._core.fromnumeric import matrix_transpose -from numpy._globals import _NoValueType +from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, @@ -33,7 +32,6 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, @@ -78,34 +76,49 @@ __all__ = [ ] type _ModeKind = L["reduced", "complete", "r", "raw"] +type _SideKind = L["L", "U", "l", "u"] + +type _inexact64 = np.float32 | np.complex64 + +# anything that safe-casts (from floating) into float64/complex128 +type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] +type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] +# the invariant `list` type avoids overlap with `_IntoArrayF64` +type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] ### fortran_int = np.intc +# TODO: generic class EigResult(NamedTuple): eigenvalues: NDArray[Any] eigenvectors: NDArray[Any] +# TODO: generic class EighResult(NamedTuple): eigenvalues: NDArray[Any] eigenvectors: NDArray[Any] +# TODO: generic class QRResult(NamedTuple): Q: NDArray[Any] R: NDArray[Any] +# TODO: generic class SlogdetResult(NamedTuple): # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any +# TODO: generic class SVDResult(NamedTuple): U: NDArray[Any] S: NDArray[Any] Vh: NDArray[Any] +# TODO: narrow return types @overload def tensorsolve( a: _ArrayLikeInt_co, @@ -125,6 +138,7 @@ def tensorsolve( axes: Iterable[int] | None = None, ) -> NDArray[complexfloating]: ... +# TODO: narrow return types @overload def solve( a: _ArrayLikeInt_co, @@ -141,28 +155,69 @@ def solve( b: _ArrayLikeComplex_co, ) -> NDArray[complexfloating]: ... -@overload -def tensorinv( - a: _ArrayLikeInt_co, - ind: int = 2, -) -> NDArray[float64]: ... -@overload -def tensorinv( - a: _ArrayLikeFloat_co, - ind: int = 2, -) -> NDArray[floating]: ... -@overload -def tensorinv( +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... +@overload # fallback +def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def inv[ScalarT: _inexact64](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # fallback +def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with the other inverse functions and cholesky +@overload # inexact32 array-likes +def pinv[ScalarT: _inexact64]( + a: _ArrayLike[ScalarT], + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def pinv( + a: _ToArrayF64, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def pinv( + a: _AsArrayC128, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[np.complex128]: ... +@overload # fallback +def pinv( a: _ArrayLikeComplex_co, - ind: int = 2, -) -> NDArray[complexfloating]: ... + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, +) -> NDArray[Any]: ... -@overload -def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... -@overload -def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... -@overload -def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +# keep in sync with the inverse functions +@overload # inexact32 array-likes +def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 array-likes +def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... +@overload # ~complex128 array-likes +def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... +@overload # fallback +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... # TODO: The supported input and output dtypes are dependent on the value of `n`. # For example: `n < 0` always casts integer types to float64 @@ -171,13 +226,7 @@ def matrix_power( n: SupportsIndex, ) -> NDArray[Any]: ... -@overload -def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... -@overload -def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... -@overload -def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... - +# TODO: narrow return types @overload def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... @overload @@ -203,6 +252,7 @@ def outer( /, ) -> NDArray[Any]: ... +# TODO: narrow return types @overload def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload @@ -210,18 +260,7 @@ def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... -@overload -def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... -@overload -def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... - -@overload -def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... -@overload -def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... - +# TODO: narrow return types @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @overload @@ -229,22 +268,15 @@ def eig(a: _ArrayLikeFloat_co) -> EigResult: ... @overload def eig(a: _ArrayLikeComplex_co) -> EigResult: ... +# TODO: narrow return types @overload -def eigh( - a: _ArrayLikeInt_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeInt_co, UPLO: _SideKind = "L") -> EighResult: ... @overload -def eigh( - a: _ArrayLikeFloat_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeFloat_co, UPLO: _SideKind = "L") -> EighResult: ... @overload -def eigh( - a: _ArrayLikeComplex_co, - UPLO: L["L", "U", "l", "u"] = "L", -) -> EighResult: ... +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... +# TODO: narrow return types @overload def svd( a: _ArrayLikeInt_co, @@ -297,15 +329,41 @@ def svd( hermitian: bool = False, ) -> NDArray[floating]: ... -# -@overload -def svdvals( - x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / -) -> NDArray[np.float64]: ... -@overload -def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... -@overload -def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `complexfloating` (excluding concrete types) +def eigvals(a: NDArray[np.complexfloating[Never]]) -> NDArray[np.complexfloating]: ... +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... +@overload # ~complex128 +def eigvals(a: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # +float64 +def eigvals(a: _ToArrayF64) -> NDArray[np.complex128] | NDArray[np.float64]: ... +@overload # ~complex64 +def eigvals(a: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # ~float32 +def eigvals(a: _ArrayLike[np.float32]) -> NDArray[np.complex64] | NDArray[np.float32]: ... +@overload # fallback +def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... + +# keep in sync with svdvals +@overload # abstract `inexact` (excluding concrete types) +def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... +@overload # ~inexact32 +def eigvalsh(a: _ArrayLike[_inexact64], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +@overload # +complex128 +def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... +@overload # fallback +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.floating]: ... + +# keep in sync with eigvalsh +@overload # abstract `inexact` (excluding concrete types) +def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... +@overload # ~inexact32 +def svdvals(x: _ArrayLike[_inexact64], /) -> NDArray[np.float32]: ... +@overload # +complex128 +def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... +@overload # fallback +def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... # TODO: Returns a scalar for 2D arrays and # a `(x.ndim - 2)`` dimensional array otherwise @@ -320,31 +378,6 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -@overload -def pinv( - a: _ArrayLikeInt_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[float64]: ... -@overload -def pinv( - a: _ArrayLikeFloat_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[floating]: ... -@overload -def pinv( - a: _ArrayLikeComplex_co, - rcond: _ArrayLikeFloat_co | None = None, - hermitian: bool = False, - *, - rtol: _ArrayLikeFloat_co | _NoValueType = ..., -) -> NDArray[complexfloating]: ... - # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... @@ -353,28 +386,36 @@ def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def det(a: _ArrayLikeComplex_co) -> Any: ... +# TODO: narrow return types @overload -def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None +) -> tuple[ NDArray[float64], NDArray[float64], int32, NDArray[float64], ]: ... @overload -def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None +) -> tuple[ NDArray[floating], NDArray[floating], int32, NDArray[floating], ]: ... @overload -def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ +def lstsq( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None +) -> tuple[ NDArray[complexfloating], NDArray[floating], int32, NDArray[floating], ]: ... +# TODO: narrow return types @overload def norm( x: ArrayLike, @@ -398,6 +439,7 @@ def norm( keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def matrix_norm( x: ArrayLike, @@ -415,6 +457,7 @@ def matrix_norm( keepdims: bool = False, ) -> Any: ... +# TODO: narrow return types @overload def vector_norm( x: ArrayLike, @@ -483,6 +526,7 @@ def multi_dot( out: NDArray[Any] | None = None, ) -> Any: ... +# TODO: narrow return types def diagonal( x: ArrayLike, # >= 2D array /, @@ -490,6 +534,7 @@ def diagonal( offset: SupportsIndex = 0, ) -> NDArray[Any]: ... +# TODO: narrow return types def trace( x: ArrayLike, # >= 2D array /, @@ -498,6 +543,7 @@ def trace( dtype: DTypeLike | None = None, ) -> Any: ... +# TODO: narrow return types @overload def cross( x1: _ArrayLikeUInt_co, @@ -531,6 +577,7 @@ def cross( axis: int = -1, ) -> NDArray[complexfloating]: ... +# TODO: narrow return types @overload def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index 78aceb235f8d..b05327b7436d 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -9,14 +9,13 @@ np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] -np.linalg.tensorinv(AR_O) # type: ignore[arg-type] - -np.linalg.inv(AR_O) # type: ignore[arg-type] +np.linalg.tensorinv(AR_O) # type: ignore[type-var] +np.linalg.inv(AR_O) # type: ignore[type-var] +np.linalg.pinv(AR_O) # type: ignore[type-var] +np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.cholesky(AR_O) # type: ignore[arg-type] - np.linalg.qr(AR_O) # type: ignore[arg-type] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] @@ -25,6 +24,10 @@ np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] + np.linalg.eig(AR_O) # type: ignore[arg-type] np.linalg.eigh(AR_O) # type: ignore[arg-type] @@ -32,17 +35,11 @@ np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] np.linalg.svd(AR_O) # type: ignore[arg-type] -np.linalg.svdvals(AR_O) # type: ignore[arg-type] -np.linalg.svdvals(AR_M) # type: ignore[arg-type] -np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] - np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] -np.linalg.pinv(AR_O) # type: ignore[arg-type] - np.linalg.slogdet(AR_O) # type: ignore[arg-type] np.linalg.det(AR_O) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 3f73e842aa6c..947f0c08f9ef 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,7 +10,9 @@ from numpy.linalg._linalg import ( SVDResult, ) +int_list_2d: list[list[int]] float_list_2d: list[list[float]] +complex_list_2d: list[list[complex]] AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] @@ -30,12 +32,12 @@ assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) @@ -43,8 +45,8 @@ assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) @@ -59,12 +61,12 @@ assert_type(np.linalg.qr(AR_f8), QRResult) assert_type(np.linalg.qr(AR_c16), QRResult) assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) -assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) -assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) assert_type(np.linalg.eig(AR_i8), EigResult) assert_type(np.linalg.eig(AR_f8), EigResult) @@ -89,9 +91,9 @@ assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) -assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.cond(AR_i8), Any) assert_type(np.linalg.cond(AR_f8), Any) @@ -102,8 +104,8 @@ assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) From 4d249878bdb51a38c1fc6c28751644be9ad3ff27 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:06:52 +0100 Subject: [PATCH 0998/1718] TYP: `linalg`: improved `matrix_power` --- numpy/linalg/_linalg.pyi | 33 ++++++++++++++++++----- numpy/typing/tests/data/reveal/linalg.pyi | 9 ++++--- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 00135bc5d15b..be25e8714e67 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -83,9 +83,14 @@ type _inexact64 = np.float32 | np.complex64 # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] -# the invariant `list` type avoids overlap with `_IntoArrayF64` +# the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayInt = _ArrayLike[np.int_] | list[int] | _NestedSequence[list[int]] +type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + ### fortran_int = np.intc @@ -156,6 +161,7 @@ def solve( ) -> NDArray[complexfloating]: ... # keep in sync with the other inverse functions and cholesky +# TODO: transparent shape types @overload # inexact32 array-likes def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... @overload # +float64 array-likes @@ -176,6 +182,7 @@ def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky +# TODO: transparent shape types @overload # inexact32 array-likes def pinv[ScalarT: _inexact64]( a: _ArrayLike[ScalarT], @@ -210,6 +217,7 @@ def pinv( ) -> NDArray[Any]: ... # keep in sync with the inverse functions +# TODO: transparent shape types @overload # inexact32 array-likes def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... @overload # +float64 array-likes @@ -219,12 +227,23 @@ def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex12 @overload # fallback def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... -# TODO: The supported input and output dtypes are dependent on the value of `n`. -# For example: `n < 0` always casts integer types to float64 -def matrix_power( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - n: SupportsIndex, -) -> NDArray[Any]: ... +# NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. +# If you have a use case for it, please open an issue. +# TODO: transparent shape types +@overload # +int, n ≥ 0 +def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... +@overload # +integer | ~object, n ≥ 0 +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _PosInt) -> NDArray[ScalarT]: ... +@overload # +float64, n < 0 +def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... +@overload # ~float64 +def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... +@overload # ~complex128 +def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... +@overload # ~inexact32 +def matrix_power[ScalarT: _inexact64](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +@overload # fallback +def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 947f0c08f9ef..1c046912819b 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -39,10 +39,11 @@ assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) -assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[np.complex128]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[np.object_]) assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) From 5b2ec65bac624cefb8fbcd0be147f051d25ea0e7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:34:00 +0100 Subject: [PATCH 0999/1718] TYP: `linalg`: improved `eigh` return types --- numpy/linalg/_linalg.pyi | 99 ++++++++++++----------- numpy/typing/tests/data/reveal/linalg.pyi | 13 ++- 2 files changed, 64 insertions(+), 48 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index be25e8714e67..ff4b0254db63 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,6 +1,7 @@ from collections.abc import Iterable from typing import ( Any, + Generic, Literal as L, NamedTuple, Never, @@ -8,6 +9,7 @@ from typing import ( SupportsInt, overload, ) +from typing_extensions import TypeVar import numpy as np from numpy import ( @@ -78,7 +80,7 @@ __all__ = [ type _ModeKind = L["reduced", "complete", "r", "raw"] type _SideKind = L["L", "U", "l", "u"] -type _inexact64 = np.float32 | np.complex64 +type _inexact32 = np.float32 | np.complex64 # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] @@ -91,24 +93,27 @@ type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[ type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) + ### fortran_int = np.intc -# TODO: generic -class EigResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class EigResult(NamedTuple, Generic[_InexactT_co]): + eigenvalues: NDArray[_InexactT_co] + eigenvectors: NDArray[_InexactT_co] -# TODO: generic -class EighResult(NamedTuple): - eigenvalues: NDArray[Any] - eigenvectors: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + eigenvalues: NDArray[_FloatingT_co] + eigenvectors: NDArray[_InexactT_co] -# TODO: generic -class QRResult(NamedTuple): - Q: NDArray[Any] - R: NDArray[Any] +# NOTE: only generic when `typing.TYPE_CHECKING` +class QRResult(NamedTuple, Generic[_InexactT_co]): + Q: NDArray[_InexactT_co] + R: NDArray[_InexactT_co] # TODO: generic class SlogdetResult(NamedTuple): @@ -162,36 +167,36 @@ def solve( # keep in sync with the other inverse functions and cholesky # TODO: transparent shape types -@overload # inexact32 array-likes -def tensorinv[ScalarT: _inexact64](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... +@overload # +float64 def tensorinv(a: _ToArrayF64, ind: int = 2) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def tensorinv(a: _AsArrayC128, ind: int = 2) -> NDArray[np.complex128]: ... @overload # fallback def tensorinv(a: _ArrayLikeComplex_co, ind: int = 2) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky -@overload # inexact32 array-likes -def inv[ScalarT: _inexact64](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def inv[ScalarT: _inexact32](a: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +@overload # +float64 def inv(a: _ToArrayF64) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... @overload # fallback def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky # TODO: transparent shape types -@overload # inexact32 array-likes -def pinv[ScalarT: _inexact64]( +@overload # inexact32 +def pinv[ScalarT: _inexact32]( a: _ArrayLike[ScalarT], rcond: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, ) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # +float64 def pinv( a: _ToArrayF64, rcond: _ArrayLikeFloat_co | None = None, @@ -199,7 +204,7 @@ def pinv( *, rtol: _ArrayLikeFloat_co | _NoValueType = _NoValue, ) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def pinv( a: _AsArrayC128, rcond: _ArrayLikeFloat_co | None = None, @@ -218,11 +223,11 @@ def pinv( # keep in sync with the inverse functions # TODO: transparent shape types -@overload # inexact32 array-likes -def cholesky[ScalarT: _inexact64](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... -@overload # +float64 array-likes +@overload # inexact32 +def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... +@overload # +float64 def cholesky(a: _ToArrayF64, /, *, upper: bool = False) -> NDArray[np.float64]: ... -@overload # ~complex128 array-likes +@overload # ~complex128 def cholesky(a: _AsArrayC128, /, *, upper: bool = False) -> NDArray[np.complex128]: ... @overload # fallback def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: ... @@ -241,7 +246,7 @@ def matrix_power(a: _AsArrayF64, n: SupportsIndex) -> NDArray[np.float64]: ... @overload # ~complex128 def matrix_power(a: _AsArrayC128, n: SupportsIndex) -> NDArray[np.complex128]: ... @overload # ~inexact32 -def matrix_power[ScalarT: _inexact64](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... +def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) -> NDArray[ScalarT]: ... @overload # fallback def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... @@ -271,14 +276,6 @@ def outer( /, ) -> NDArray[Any]: ... -# TODO: narrow return types -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... - # TODO: narrow return types @overload def eig(a: _ArrayLikeInt_co) -> EigResult: ... @@ -287,13 +284,25 @@ def eig(a: _ArrayLikeFloat_co) -> EigResult: ... @overload def eig(a: _ArrayLikeComplex_co) -> EigResult: ... +# +@overload # workaround for microsoft/pyright#10232 +def eigh(a: NDArray[Never], UPLO: _SideKind = "L") -> EighResult: ... +@overload # ~inexact32 +def eigh[ScalarT: _inexact32](a: _ArrayLike[ScalarT], UPLO: _SideKind = "L") -> EighResult[np.float32, ScalarT]: ... +@overload # +float64 +def eigh(a: _ToArrayF64, UPLO: _SideKind = "L") -> EighResult[np.float64, np.float64]: ... +@overload # ~complex128 +def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.complex128]: ... +@overload # fallback +def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... + # TODO: narrow return types @overload -def eigh(a: _ArrayLikeInt_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def eigh(a: _ArrayLikeFloat_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... @overload -def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... # TODO: narrow return types @overload @@ -324,14 +333,14 @@ def svd( *, compute_uv: L[False], hermitian: bool = False, -) -> NDArray[float64]: ... +) -> NDArray[np.float64]: ... @overload def svd( a: _ArrayLikeInt_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False, -) -> NDArray[float64]: ... +) -> NDArray[np.float64]: ... @overload def svd( a: _ArrayLikeComplex_co, @@ -368,7 +377,7 @@ def eigvals(a: _ArrayLikeComplex_co) -> np.ndarray: ... @overload # abstract `inexact` (excluding concrete types) def eigvalsh(a: NDArray[np.inexact[Never]], UPLO: _SideKind = "L") -> NDArray[np.floating]: ... @overload # ~inexact32 -def eigvalsh(a: _ArrayLike[_inexact64], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... +def eigvalsh(a: _ArrayLike[_inexact32], UPLO: _SideKind = "L") -> NDArray[np.float32]: ... @overload # +complex128 def eigvalsh(a: _ToArrayC128, UPLO: _SideKind = "L") -> NDArray[np.float64]: ... @overload # fallback @@ -378,7 +387,7 @@ def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.float @overload # abstract `inexact` (excluding concrete types) def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... @overload # ~inexact32 -def svdvals(x: _ArrayLike[_inexact64], /) -> NDArray[np.float32]: ... +def svdvals(x: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... @overload # +complex128 def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 1c046912819b..71c02c6b7500 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -13,6 +13,8 @@ from numpy.linalg._linalg import ( int_list_2d: list[list[int]] float_list_2d: list[list[float]] complex_list_2d: list[list[complex]] + +AR_any: np.ndarray AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] @@ -73,9 +75,14 @@ assert_type(np.linalg.eig(AR_i8), EigResult) assert_type(np.linalg.eig(AR_f8), EigResult) assert_type(np.linalg.eig(AR_c16), EigResult) -assert_type(np.linalg.eigh(AR_i8), EighResult) -assert_type(np.linalg.eigh(AR_f8), EighResult) -assert_type(np.linalg.eigh(AR_c16), EighResult) +assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) +assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) +assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) +assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) +# mypy is being silly again here: +# > Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]" +assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] assert_type(np.linalg.svd(AR_i8), SVDResult) assert_type(np.linalg.svd(AR_f8), SVDResult) From 2ce9c89507d54b1994f7be80f8277bfe29ee91b2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 16:56:13 +0100 Subject: [PATCH 1000/1718] TYP: `linalg`: improved `eig` return types --- numpy/linalg/_linalg.pyi | 20 ++++++++++++-------- numpy/typing/tests/data/reveal/linalg.pyi | 15 ++++++++++++--- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index ff4b0254db63..645c9f8bfc2d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -276,12 +276,18 @@ def outer( /, ) -> NDArray[Any]: ... -# TODO: narrow return types -@overload -def eig(a: _ArrayLikeInt_co) -> EigResult: ... -@overload -def eig(a: _ArrayLikeFloat_co) -> EigResult: ... -@overload +# NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values +@overload # abstract `inexact` and `floating` (excluding concrete types) +def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... +@overload # ~complex128 +def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... +@overload # +float64 +def eig(a: _ToArrayF64) -> EigResult[np.complex128] | EigResult[np.float64]: ... +@overload # ~complex64 +def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... +@overload # ~float32 +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64] | EigResult[np.float32]: ... +@overload # fallback def eig(a: _ArrayLikeComplex_co) -> EigResult: ... # @@ -358,8 +364,6 @@ def svd( ) -> NDArray[floating]: ... # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values -@overload # abstract `complexfloating` (excluding concrete types) -def eigvals(a: NDArray[np.complexfloating[Never]]) -> NDArray[np.complexfloating]: ... @overload # abstract `inexact` and `floating` (excluding concrete types) def eigvals(a: NDArray[np.inexact[Never]]) -> np.ndarray: ... @overload # ~complex128 diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 71c02c6b7500..76eebd15470a 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -16,8 +16,10 @@ complex_list_2d: list[list[complex]] AR_any: np.ndarray AR_i8: npt.NDArray[np.int64] +AR_f_: npt.NDArray[np.floating] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c_: npt.NDArray[np.complexfloating] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] @@ -71,9 +73,16 @@ assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult) -assert_type(np.linalg.eig(AR_f8), EigResult) -assert_type(np.linalg.eig(AR_c16), EigResult) +assert_type(np.linalg.eig(AR_i8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) +# mypy is being silly again here: +# > Expression is of type "EigResult[Any]", not "EigResult[Any]" +assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) From 6a0f1e016f92d6b3ae23a09b0893c8f460f81c1f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:25:38 +0100 Subject: [PATCH 1001/1718] TYP: `linalg`: improved `qr` return types --- numpy/linalg/_linalg.pyi | 34 ++++++++++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 16 +++++------ numpy/typing/tests/data/reveal/linalg.pyi | 30 +++++++++++++++----- 3 files changed, 58 insertions(+), 22 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 645c9f8bfc2d..1c51277fe3a8 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -93,6 +93,8 @@ type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[ type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +type _tuple2[T] = tuple[T, T] + _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -302,13 +304,31 @@ def eigh(a: _AsArrayC128, UPLO: _SideKind = "L") -> EighResult[np.float64, np.co @overload # fallback def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... -# TODO: narrow return types -@overload -def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... -@overload -def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... +# +@overload # ~inexact32, reduced|complete +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # ~inexact32, r +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... +@overload # ~inexact32, raw +def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, raw +def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, raw +def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, raw +def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index b05327b7436d..426c80555d9b 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -16,9 +16,16 @@ np.linalg.cholesky(AR_O) # type: ignore[type-var] np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] -np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.qr(AR_O) # type: ignore[type-var] np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] +np.linalg.svd(AR_O) # type: ignore[arg-type] + np.linalg.eigvals(AR_O) # type: ignore[arg-type] np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] @@ -28,13 +35,6 @@ np.linalg.svdvals(AR_O) # type: ignore[arg-type] np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] -np.linalg.eig(AR_O) # type: ignore[arg-type] - -np.linalg.eigh(AR_O) # type: ignore[arg-type] -np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] - -np.linalg.svd(AR_O) # type: ignore[arg-type] - np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 76eebd15470a..0538b45ddc71 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -61,9 +61,27 @@ assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] -assert_type(np.linalg.qr(AR_i8), QRResult) -assert_type(np.linalg.qr(AR_f8), QRResult) -assert_type(np.linalg.qr(AR_c16), QRResult) +assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_f4), QRResult[np.float32]) +assert_type(np.linalg.qr(AR_f4, "r"), npt.NDArray[np.float32]) +assert_type(np.linalg.qr(AR_f4, "raw"), tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.qr(AR_f8), QRResult[np.float64]) +assert_type(np.linalg.qr(AR_f8, "r"), npt.NDArray[np.float64]) +assert_type(np.linalg.qr(AR_f8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.qr(AR_c8), QRResult[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "r"), npt.NDArray[np.complex64]) +assert_type(np.linalg.qr(AR_c8, "raw"), tuple[npt.NDArray[np.complex64], npt.NDArray[np.complex64]]) +assert_type(np.linalg.qr(AR_c16), QRResult[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "r"), npt.NDArray[np.complex128]) +assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.NDArray[np.complex128]]) +# Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` +assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.qr(AR_any, "r"), np.ndarray) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` +assert_type(np.linalg.qr(AR_any, "raw"), tuple[np.ndarray, np.ndarray]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) @@ -78,8 +96,7 @@ assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64 assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) -# mypy is being silly again here: -# > Expression is of type "EigResult[Any]", not "EigResult[Any]" +# Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] @@ -89,8 +106,7 @@ assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) assert_type(np.linalg.eigh(AR_f8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_c8), EighResult[np.float32, np.complex64]) assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) -# mypy is being silly again here: -# > Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]" +# Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] assert_type(np.linalg.svd(AR_i8), SVDResult) From 3f2b499c64703d7a1c14558db514035427d69f7b Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:50:48 +0100 Subject: [PATCH 1002/1718] TYP: `linalg`: improved `svd` return types --- numpy/linalg/_linalg.pyi | 125 +++++++++------------- numpy/typing/tests/data/reveal/linalg.pyi | 38 ++++--- 2 files changed, 78 insertions(+), 85 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 1c51277fe3a8..e5293b6c965d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -102,34 +102,30 @@ _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant= fortran_int = np.intc -# NOTE: only generic when `typing.TYPE_CHECKING` +# NOTE: These names tuples are only generic when `typing.TYPE_CHECKING`. + class EigResult(NamedTuple, Generic[_InexactT_co]): eigenvalues: NDArray[_InexactT_co] eigenvectors: NDArray[_InexactT_co] -# NOTE: only generic when `typing.TYPE_CHECKING` class EighResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): eigenvalues: NDArray[_FloatingT_co] eigenvectors: NDArray[_InexactT_co] -# NOTE: only generic when `typing.TYPE_CHECKING` class QRResult(NamedTuple, Generic[_InexactT_co]): Q: NDArray[_InexactT_co] R: NDArray[_InexactT_co] -# TODO: generic +class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): + U: NDArray[_InexactT_co] + S: NDArray[_FloatingT_co] + Vh: NDArray[_InexactT_co] + +# TODO: Make generic class SlogdetResult(NamedTuple): - # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and - # a `(x.ndim - 2)`` dimensional arrays otherwise sign: Any logabsdet: Any -# TODO: generic -class SVDResult(NamedTuple): - U: NDArray[Any] - S: NDArray[Any] - Vh: NDArray[Any] - # TODO: narrow return types @overload def tensorsolve( @@ -307,81 +303,66 @@ def eigh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> EighResult: ... # @overload # ~inexact32, reduced|complete def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["reduced", "complete"] = "reduced") -> QRResult[ScalarT]: ... -@overload # +float64, reduced|complete -def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... -@overload # ~complex128, reduced|complete -def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... -@overload # fallback, reduced|complete -def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... @overload # ~inexact32, r def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["r"]) -> NDArray[ScalarT]: ... -@overload # +float64, r -def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... -@overload # ~complex128, r -def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... -@overload # fallback, r -def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... @overload # ~inexact32, raw def qr[ScalarT: _inexact32](a: _ArrayLike[ScalarT], mode: L["raw"]) -> _tuple2[NDArray[ScalarT]]: ... +@overload # +float64, reduced|complete +def qr(a: _ToArrayF64, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.float64]: ... +@overload # +float64, r +def qr(a: _ToArrayF64, mode: L["r"]) -> NDArray[np.float64]: ... @overload # +float64, raw def qr(a: _ToArrayF64, mode: L["raw"]) -> _tuple2[NDArray[np.float64]]: ... +@overload # ~complex128, reduced|complete +def qr(a: _AsArrayC128, mode: L["reduced", "complete"] = "reduced") -> QRResult[np.complex128]: ... +@overload # ~complex128, r +def qr(a: _AsArrayC128, mode: L["r"]) -> NDArray[np.complex128]: ... @overload # ~complex128, raw def qr(a: _AsArrayC128, mode: L["raw"]) -> _tuple2[NDArray[np.complex128]]: ... +@overload # fallback, reduced|complete +def qr(a: _ArrayLikeComplex_co, mode: L["reduced", "complete"] = "reduced") -> QRResult: ... +@overload # fallback, r +def qr(a: _ArrayLikeComplex_co, mode: L["r"]) -> np.ndarray: ... @overload # fallback, raw def qr(a: _ArrayLikeComplex_co, mode: L["raw"]) -> _tuple2[np.ndarray]: ... -# TODO: narrow return types -@overload -def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload +# +@overload # workaround for microsoft/pyright#10232, compute_uv=True (default) +def svd(a: NDArray[Never], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False) -> SVDResult: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (positional) +def svd(a: NDArray[Never], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # workaround for microsoft/pyright#10232, compute_uv=False (keyword) +def svd(a: NDArray[Never], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # ~inexact32, compute_uv=True (default) +def svd[ScalarT: _inexact32]( + a: _ArrayLike[ScalarT], full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float32, ScalarT]: ... +@overload # ~inexact32, compute_uv=False (positional) +def svd(a: _ArrayLike[_inexact32], full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float32]: ... +@overload # ~inexact32, compute_uv=False (keyword) def svd( - a: _ArrayLikeFloat_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload + a: _ArrayLike[_inexact32], full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False +) -> NDArray[np.float32]: ... +@overload # +float64, compute_uv=True (default) def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - compute_uv: L[True] = True, - hermitian: bool = False, -) -> SVDResult: ... -@overload + a: _ToArrayF64, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.float64]: ... +@overload # ~complex128, compute_uv=True (default) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool = True, - *, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[np.float64]: ... -@overload + a: _AsArrayC128, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult[np.float64, np.complex128]: ... +@overload # +float64 | ~complex128, compute_uv=False (positional) +def svd(a: _ToArrayC128, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # +float64 | ~complex128, compute_uv=False (keyword) +def svd(a: _ToArrayC128, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> NDArray[np.float64]: ... +@overload # fallback, compute_uv=True (default) def svd( - a: _ArrayLikeInt_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[np.float64]: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool = True, - *, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... -@overload -def svd( - a: _ArrayLikeComplex_co, - full_matrices: bool, - compute_uv: L[False], - hermitian: bool = False, -) -> NDArray[floating]: ... + a: _ArrayLikeComplex_co, full_matrices: bool = True, compute_uv: L[True] = True, hermitian: bool = False +) -> SVDResult: ... +@overload # fallback, compute_uv=False (positional) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... +@overload # fallback, compute_uv=False (keyword) +def svd(a: _ArrayLikeComplex_co, full_matrices: bool = True, *, compute_uv: L[False], hermitian: bool = False) -> np.ndarray: ... # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values @overload # abstract `inexact` and `floating` (excluding concrete types) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 0538b45ddc71..5401e01aad31 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -79,9 +79,9 @@ assert_type(np.linalg.qr(AR_c16, "raw"), tuple[npt.NDArray[np.complex128], npt.N # Mypy bug: `Expression is of type "QRResult[Any]", not "QRResult[Any]"` assert_type(np.linalg.qr(AR_any), QRResult[Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` -assert_type(np.linalg.qr(AR_any, "r"), np.ndarray) # type: ignore[assert-type] +assert_type(np.linalg.qr(AR_any, "r"), npt.NDArray[Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "tuple[Any, ...]", <--snip-->"` -assert_type(np.linalg.qr(AR_any, "raw"), tuple[np.ndarray, np.ndarray]) # type: ignore[assert-type] +assert_type(np.linalg.qr(AR_any, "raw"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) # type: ignore[assert-type] assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) @@ -97,9 +97,9 @@ assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex12 assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) # Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` -assert_type(np.linalg.eig(AR_f_), EigResult) # type: ignore[assert-type] -assert_type(np.linalg.eig(AR_c_), EigResult) # type: ignore[assert-type] -assert_type(np.linalg.eig(AR_any), EigResult) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_f_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_c_), EigResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.eig(AR_any), EigResult[Any]) # type: ignore[assert-type] assert_type(np.linalg.eigh(AR_i8), EighResult[np.float64, np.float64]) assert_type(np.linalg.eigh(AR_f4), EighResult[np.float32, np.float32]) @@ -109,20 +109,32 @@ assert_type(np.linalg.eigh(AR_c16), EighResult[np.float64, np.complex128]) # Mypy bug: `Expression is of type "EighResult[Any, Any]", not "EighResult[Any, Any]"` assert_type(np.linalg.eigh(AR_any), EighResult[Any, Any]) # type: ignore[assert-type] -assert_type(np.linalg.svd(AR_i8), SVDResult) -assert_type(np.linalg.svd(AR_f8), SVDResult) -assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8), SVDResult[np.float64, np.float64]) assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) -assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) -assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_f4), SVDResult[np.float32, np.float32]) +assert_type(np.linalg.svd(AR_f4, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_f8), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_c8), SVDResult[np.float32, np.complex64]) +assert_type(np.linalg.svd(AR_c8, compute_uv=False), npt.NDArray[np.float32]) +assert_type(np.linalg.svd(AR_c16), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(int_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(int_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(float_list_2d), SVDResult[np.float64, np.float64]) +assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) +assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) +# Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` +assert_type(np.linalg.svd(AR_any), SVDResult[Any]) # type: ignore[assert-type] +# Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` +assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) -assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) From 86c7f6e86467169a578db8676f45ceaa7110aaea Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 17:55:18 +0100 Subject: [PATCH 1003/1718] TYP: `linalg`: optionally generic `SlogdetResult` --- numpy/linalg/_linalg.pyi | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e5293b6c965d..c5ac21745c31 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -96,13 +96,15 @@ type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -1 type _tuple2[T] = tuple[T, T] _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) +_FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) +_InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) ### fortran_int = np.intc -# NOTE: These names tuples are only generic when `typing.TYPE_CHECKING`. +# NOTE: These named tuple types are only generic when `typing.TYPE_CHECKING` class EigResult(NamedTuple, Generic[_InexactT_co]): eigenvalues: NDArray[_InexactT_co] @@ -121,10 +123,9 @@ class SVDResult(NamedTuple, Generic[_FloatingT_co, _InexactT_co]): S: NDArray[_FloatingT_co] Vh: NDArray[_InexactT_co] -# TODO: Make generic -class SlogdetResult(NamedTuple): - sign: Any - logabsdet: Any +class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_co]): + sign: _FloatingOrArrayT_co + logabsdet: _InexactOrArrayT_co # TODO: narrow return types @overload @@ -398,10 +399,6 @@ def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensional array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... - # TODO: Returns `int` for <2D arrays and `intp` otherwise def matrix_rank( A: _ArrayLikeComplex_co, @@ -411,6 +408,10 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensional array otherwise +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... + # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... From f1805ba20cc805c6da8436003f691b699f9dff5c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 18:23:23 +0100 Subject: [PATCH 1004/1718] TYP: `linalg`: improved `matrix_rank` return (shape-)types --- numpy/linalg/_linalg.pyi | 55 ++++++++++++++++++++--- numpy/typing/tests/data/reveal/linalg.pyi | 27 +++++++++-- 2 files changed, 72 insertions(+), 10 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index c5ac21745c31..73602fcdb29d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,4 +1,4 @@ -from collections.abc import Iterable +from collections.abc import Iterable, Sequence from typing import ( Any, Generic, @@ -77,8 +77,10 @@ __all__ = [ "vecdot", ] -type _ModeKind = L["reduced", "complete", "r", "raw"] -type _SideKind = L["L", "U", "l", "u"] +type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] + +type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 @@ -86,15 +88,13 @@ type _inexact32 = np.float32 | np.complex64 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc -type _AsArrayInt = _ArrayLike[np.int_] | list[int] | _NestedSequence[list[int]] type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _SideKind = L["L", "U", "l", "u"] type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] -type _tuple2[T] = tuple[T, T] - _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -399,7 +399,48 @@ def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... -# TODO: Returns `int` for <2D arrays and `intp` otherwise +# +@overload # workaround for microsoft/pyright#10232 +def matrix_rank( + A: np.ndarray[tuple[Never, ...], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # <2d +def matrix_rank( + A: complex | np.number | Sequence[complex | np.number] | np.ndarray[_AtMost1D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> L[0, 1]: ... +@overload # =2d +def matrix_rank( + A: Sequence[Sequence[complex | np.number]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.int_: ... +@overload # =3d +def matrix_rank( + A: Sequence[Sequence[Sequence[complex | np.number]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... +@overload # ≥4d +def matrix_rank( + A: Sequence[Sequence[Sequence[_NestedSequence[complex | np.number]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.int_]: ... +@overload # ?d def matrix_rank( A: _ArrayLikeComplex_co, tol: _ArrayLikeFloat_co | None = None, diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 5401e01aad31..7ffb8df594ad 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt @@ -11,15 +11,18 @@ from numpy.linalg._linalg import ( ) int_list_2d: list[list[int]] +float_list_1d: list[float] float_list_2d: list[list[float]] +float_list_3d: list[list[list[float]]] +float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] AR_any: np.ndarray -AR_i8: npt.NDArray[np.int64] AR_f_: npt.NDArray[np.floating] +AR_c_: npt.NDArray[np.complexfloating] +AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] -AR_c_: npt.NDArray[np.complexfloating] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] @@ -27,6 +30,15 @@ AR_m: npt.NDArray[np.timedelta64] AR_S: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +SC_f8: np.float64 +AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] + +### + assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) @@ -147,6 +159,15 @@ assert_type(np.linalg.cond(AR_c16), Any) assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) +assert_type(np.linalg.matrix_rank(SC_f8), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) +assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) +assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) +assert_type(np.linalg.matrix_rank(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) From eb2b304fe8a355d8f4d980667af0aec73bb6c958 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 18:46:53 +0100 Subject: [PATCH 1005/1718] TYP: `linalg`: improved `cond` return types (shape-type-dependent) --- numpy/linalg/_linalg.pyi | 37 ++++++++++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 2 +- numpy/typing/tests/data/reveal/linalg.pyi | 21 ++++++++----- 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 73602fcdb29d..46927bc7b044 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -78,19 +78,26 @@ __all__ = [ ] type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] +type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 +type _to_inexact64 = np.complex128 | np.float64 | np.integer | np.bool # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] -type _ToArrayC128 = _ArrayLike[np.complex128 | np.float64 | np.integer | np.bool] | _NestedSequence[complex] +type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _ToArrayC128_2d = np.ndarray[tuple[int, int], np.dtype[_to_inexact64]] | Sequence[Sequence[complex]] +type _ToArrayC128_3nd = np.ndarray[_AtLeast3D, np.dtype[_to_inexact64]] | Sequence[Sequence[_NestedSequence[complex]]] + +type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] @@ -410,7 +417,7 @@ def matrix_rank( ) -> Any: ... @overload # <2d def matrix_rank( - A: complex | np.number | Sequence[complex | np.number] | np.ndarray[_AtMost1D, np.dtype[np.number]], + A: complex | Sequence[complex] | np.ndarray[_AtMost1D, np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -418,7 +425,7 @@ def matrix_rank( ) -> L[0, 1]: ... @overload # =2d def matrix_rank( - A: Sequence[Sequence[complex | np.number]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + A: Sequence[Sequence[complex]] | np.ndarray[tuple[int, int], np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -426,7 +433,7 @@ def matrix_rank( ) -> np.int_: ... @overload # =3d def matrix_rank( - A: Sequence[Sequence[Sequence[complex | np.number]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + A: Sequence[Sequence[Sequence[complex]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -434,7 +441,7 @@ def matrix_rank( ) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... @overload # ≥4d def matrix_rank( - A: Sequence[Sequence[Sequence[_NestedSequence[complex | np.number]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + A: Sequence[Sequence[Sequence[_NestedSequence[complex]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -449,9 +456,23 @@ def matrix_rank( rtol: _ArrayLikeFloat_co | None = None, ) -> Any: ... -# TODO: Returns a scalar for 2D arrays and -# a `(x.ndim - 2)`` dimensional array otherwise -def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... +# +@overload # workaround for microsoft/pyright#10232 +def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... +@overload # 2d ~inexact32 +def cond(x: np.ndarray[tuple[int, int], np.dtype[_inexact32]], p: _OrderKind | None = None) -> np.float32: ... +@overload # 2d +inexact64 +def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... +@overload # 2d ~number +def cond(x: np.ndarray[tuple[int, int], np.dtype[np.number]], p: _OrderKind | None = None) -> np.floating: ... +@overload # >2d ~inexact32 +def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +@overload # >2d +inexact64 +def cond(x: _ToArrayC128_3nd, p: _OrderKind | None = None) -> NDArray[np.float64]: ... +@overload # >2d ~number +def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +@overload # fallback +def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... # TODO: Returns a 2-tuple of scalars for 2D arrays and # a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index 426c80555d9b..ec79af1d90e0 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -36,7 +36,7 @@ np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] np.linalg.cond(AR_O) # type: ignore[arg-type] -np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 7ffb8df594ad..c8c281cee77c 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -37,6 +37,9 @@ AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] + ### assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) @@ -55,6 +58,10 @@ assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) + assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[np.float64]) assert_type(np.linalg.matrix_power(AR_i8, 1), npt.NDArray[np.int64]) assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[np.float64]) @@ -152,10 +159,6 @@ assert_type(np.linalg.svdvals(int_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(float_list_2d), npt.NDArray[np.float64]) assert_type(np.linalg.svdvals(complex_list_2d), npt.NDArray[np.float64]) -assert_type(np.linalg.cond(AR_i8), Any) -assert_type(np.linalg.cond(AR_f8), Any) -assert_type(np.linalg.cond(AR_c16), Any) - assert_type(np.linalg.matrix_rank(AR_i8), Any) assert_type(np.linalg.matrix_rank(AR_f8), Any) assert_type(np.linalg.matrix_rank(AR_c16), Any) @@ -169,9 +172,13 @@ assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtyp assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) -assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) +assert_type(np.linalg.cond(AR_f4_2d), np.float32) +assert_type(np.linalg.cond(AR_f8_2d), np.float64) +assert_type(np.linalg.cond(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) From 884494b757ed91153cad58ca7440e344f388cc64 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 19:08:11 +0100 Subject: [PATCH 1006/1718] TYP: `linalg`: improved `det` and `slogdet` return types (shape-type-dependent) --- numpy/linalg/_linalg.pyi | 62 +++++++++++++++++------ numpy/typing/tests/data/fail/linalg.pyi | 5 +- numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++ 3 files changed, 62 insertions(+), 18 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 46927bc7b044..f54ed43662d6 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -85,17 +85,23 @@ type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 -type _to_inexact64 = np.complex128 | np.float64 | np.integer | np.bool +type _to_float64 = np.float64 | np.integer | np.bool +type _to_inexact64 = np.complex128 | _to_float64 + +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] # anything that safe-casts (from floating) into float64/complex128 -type _ToArrayF64 = _ArrayLike[np.float64 | np.integer | np.bool] | _NestedSequence[float] +type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] # the invariant `list` type avoids overlap with bool, int, etc type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] -type _ToArrayC128_2d = np.ndarray[tuple[int, int], np.dtype[_to_inexact64]] | Sequence[Sequence[complex]] -type _ToArrayC128_3nd = np.ndarray[_AtLeast3D, np.dtype[_to_inexact64]] | Sequence[Sequence[_NestedSequence[complex]]] +type _ToArrayF64_2d = _Array2D[_to_float64] | Sequence[Sequence[float]] +type _ToArrayF64_3nd = _Array3ND[_to_float64] | Sequence[Sequence[_NestedSequence[float]]] +type _ToArrayC128_2d = _Array2D[_to_inexact64] | Sequence[Sequence[complex]] +type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequence[complex]]] type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] @@ -172,7 +178,6 @@ def solve( ) -> NDArray[complexfloating]: ... # keep in sync with the other inverse functions and cholesky -# TODO: transparent shape types @overload # inexact32 def tensorinv[ScalarT: _inexact32](a: _ArrayLike[ScalarT], ind: int = 2) -> NDArray[ScalarT]: ... @overload # +float64 @@ -193,7 +198,6 @@ def inv(a: _AsArrayC128) -> NDArray[np.complex128]: ... def inv(a: _ArrayLikeComplex_co) -> np.ndarray: ... # keep in sync with the other inverse functions and cholesky -# TODO: transparent shape types @overload # inexact32 def pinv[ScalarT: _inexact32]( a: _ArrayLike[ScalarT], @@ -228,7 +232,6 @@ def pinv( ) -> NDArray[Any]: ... # keep in sync with the inverse functions -# TODO: transparent shape types @overload # inexact32 def cholesky[ScalarT: _inexact32](a: _ArrayLike[ScalarT], /, *, upper: bool = False) -> NDArray[ScalarT]: ... @overload # +float64 @@ -240,7 +243,6 @@ def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: # NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. # If you have a use case for it, please open an issue. -# TODO: transparent shape types @overload # +int, n ≥ 0 def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... @overload # +integer | ~object, n ≥ 0 @@ -425,7 +427,7 @@ def matrix_rank( ) -> L[0, 1]: ... @overload # =2d def matrix_rank( - A: Sequence[Sequence[complex]] | np.ndarray[tuple[int, int], np.dtype[np.number]], + A: Sequence[Sequence[complex]] | _Array2D[np.number], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -460,11 +462,11 @@ def matrix_rank( @overload # workaround for microsoft/pyright#10232 def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... @overload # 2d ~inexact32 -def cond(x: np.ndarray[tuple[int, int], np.dtype[_inexact32]], p: _OrderKind | None = None) -> np.float32: ... +def cond(x: _Array2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... @overload # 2d +inexact64 def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... @overload # 2d ~number -def cond(x: np.ndarray[tuple[int, int], np.dtype[np.number]], p: _OrderKind | None = None) -> np.floating: ... +def cond(x: _Array2D[np.number], p: _OrderKind | None = None) -> np.floating: ... @overload # >2d ~inexact32 def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... @overload # >2d +inexact64 @@ -474,12 +476,42 @@ def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = @overload # fallback def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise +# keep in sync with `det` +@overload # workaround for microsoft/pyright#10232 +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> SlogdetResult: ... +@overload # 2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +@overload # >2d ~inexact32 +def slogdet[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +@overload # 2d +float64 +def slogdet(a: _Array2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +@overload # >2d +float64 +def slogdet(a: _Array3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +@overload # 2d ~complex128 +def slogdet(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> SlogdetResult[np.float64, np.complex128]: ... +@overload # >2d ~complex128 +def slogdet( + a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]] +) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +@overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... -# TODO: Returns a 2-tuple of scalars for 2D arrays and -# a 2-tuple of `(a.ndim - 2)`` dimensional arrays otherwise +# keep in sync with `slogdet` +@overload # workaround for microsoft/pyright#10232 +def det(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> Any: ... +@overload # 2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> ScalarT: ... +@overload # >2d ~inexact32 +def det[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> NDArray[ScalarT]: ... +@overload # 2d +float64 +def det(a: _Array2D[_to_float64]) -> np.float64: ... +@overload # >2d +float64 +def det(a: _Array3ND[_to_float64]) -> NDArray[np.float64]: ... +@overload # 2d ~complex128 +def det(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> np.complex128: ... +@overload # >2d ~complex128 +def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) -> NDArray[np.complex128]: ... +@overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... # TODO: narrow return types diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index ec79af1d90e0..eda82c48c85a 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -35,13 +35,12 @@ np.linalg.svdvals(AR_O) # type: ignore[arg-type] np.linalg.svdvals(AR_M) # type: ignore[arg-type] np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] + np.linalg.cond(AR_O) # type: ignore[arg-type] np.linalg.cond(AR_f8, p="bob") # type: ignore[call-overload] -np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] - np.linalg.slogdet(AR_O) # type: ignore[arg-type] - np.linalg.det(AR_O) # type: ignore[arg-type] np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index c8c281cee77c..d638166df386 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -16,6 +16,7 @@ float_list_2d: list[list[float]] float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] +complex_list_3d: list[list[list[complex]]] AR_any: np.ndarray AR_f_: npt.NDArray[np.floating] @@ -183,10 +184,22 @@ assert_type(np.linalg.cond(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f4_2d), SlogdetResult[np.float32, np.float32]) +assert_type(np.linalg.slogdet(AR_f8_2d), SlogdetResult[np.float64, np.float64]) +assert_type(np.linalg.slogdet(AR_f4_3d), SlogdetResult[npt.NDArray[np.float32], npt.NDArray[np.float32]]) +assert_type(np.linalg.slogdet(AR_f8_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.linalg.slogdet(complex_list_2d), SlogdetResult[np.float64, np.complex128]) +assert_type(np.linalg.slogdet(complex_list_3d), SlogdetResult[npt.NDArray[np.float64], npt.NDArray[np.complex128]]) assert_type(np.linalg.det(AR_i8), Any) assert_type(np.linalg.det(AR_f8), Any) assert_type(np.linalg.det(AR_c16), Any) +assert_type(np.linalg.det(AR_f4_2d), np.float32) +assert_type(np.linalg.det(AR_f8_2d), np.float64) +assert_type(np.linalg.det(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.det(complex_list_2d), np.complex128) +assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) From 6715fe9c65be186df907a347ff25db20dcc3fb1c Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 9 Dec 2025 19:53:23 +0100 Subject: [PATCH 1007/1718] TYP: apply copilot's nits --- numpy/linalg/_linalg.pyi | 10 +++++----- numpy/typing/tests/data/reveal/linalg.pyi | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index f54ed43662d6..38926e84d0a6 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -105,7 +105,7 @@ type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequ type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] -type _PosInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) @@ -244,9 +244,9 @@ def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> np.ndarray: # NOTE: Technically this also accepts boolean array-likes, but that case is not very useful, so we skip it. # If you have a use case for it, please open an issue. @overload # +int, n ≥ 0 -def matrix_power(a: _NestedSequence[int], n: _PosInt) -> NDArray[np.int_]: ... +def matrix_power(a: _NestedSequence[int], n: _NonNegInt) -> NDArray[np.int_]: ... @overload # +integer | ~object, n ≥ 0 -def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _PosInt) -> NDArray[ScalarT]: ... +def matrix_power[ScalarT: np.integer | np.object_](a: _ArrayLike[ScalarT], n: _NonNegInt) -> NDArray[ScalarT]: ... @overload # +float64, n < 0 def matrix_power(a: _ToArrayF64, n: _NegInt) -> NDArray[np.float64]: ... @overload # ~float64 @@ -402,9 +402,9 @@ def eigvalsh(a: _ArrayLikeComplex_co, UPLO: _SideKind = "L") -> NDArray[np.float @overload # abstract `inexact` (excluding concrete types) def svdvals(a: NDArray[np.inexact[Never]], /) -> NDArray[np.floating]: ... @overload # ~inexact32 -def svdvals(x: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... +def svdvals(a: _ArrayLike[_inexact32], /) -> NDArray[np.float32]: ... @overload # +complex128 -def svdvals(x: _ToArrayC128, /) -> NDArray[np.float64]: ... +def svdvals(a: _ToArrayC128, /) -> NDArray[np.float64]: ... @overload # fallback def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index d638166df386..a40aa84d9c05 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -146,7 +146,7 @@ assert_type(np.linalg.svd(float_list_2d, compute_uv=False), npt.NDArray[np.float assert_type(np.linalg.svd(complex_list_2d), SVDResult[np.float64, np.complex128]) assert_type(np.linalg.svd(complex_list_2d, compute_uv=False), npt.NDArray[np.float64]) # Mypy bug: `Expression is of type "SVDResult[Any, Any]", not "SVDResult[Any, Any]"` -assert_type(np.linalg.svd(AR_any), SVDResult[Any]) # type: ignore[assert-type] +assert_type(np.linalg.svd(AR_any), SVDResult[Any, Any]) # type: ignore[assert-type] # Mypy bug: `Expression is of type "ndarray[Any, Any]", not "ndarray[tuple[Any, ...], dtype[Any]]"` assert_type(np.linalg.svd(AR_any, compute_uv=False), npt.NDArray[Any]) # type: ignore[assert-type] From a64d310e17f67df3f7ef0383362c944eb8003e17 Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Wed, 10 Dec 2025 03:34:23 -0500 Subject: [PATCH 1008/1718] ENH,REF: New-style generic sorting ArrayMethod loops (#30328) * ENH,REF: New arraymethod-style default sorting loops * ENH: Implement single default sorting loops in npy_sort.c * STYLE: Add missing newline --- numpy/_core/meson.build | 1 + numpy/_core/src/common/npy_sort.c | 67 +++++++++++++++++++ numpy/_core/src/common/npy_sort.h.src | 13 ++++ numpy/_core/src/multiarray/item_selection.c | 2 + .../_core/src/multiarray/stringdtype/dtype.c | 20 ++---- 5 files changed, 89 insertions(+), 14 deletions(-) create mode 100644 numpy/_core/src/common/npy_sort.c diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index b5695c8f3cde..aa4da9c11146 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1121,6 +1121,7 @@ src_multiarray_umath_common = [ 'src/common/numpyos.c', 'src/common/npy_cpu_features.c', 'src/common/npy_cpu_dispatch.c', + 'src/common/npy_sort.c', src_file.process('src/common/templ_common.h.src') ] if have_blas diff --git a/numpy/_core/src/common/npy_sort.c b/numpy/_core/src/common/npy_sort.c new file mode 100644 index 000000000000..632962e884dd --- /dev/null +++ b/numpy/_core/src/common/npy_sort.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include "npy_sort.h" +#include "dtypemeta.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_SortImpl *sort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + sort_func = npy_quicksort_impl; + break; + case NPY_SORT_STABLE: + sort_func = npy_mergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return sort_func(data[0], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +NPY_NO_EXPORT int +npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata) +{ + PyArray_CompareFunc *cmp = (PyArray_CompareFunc *)context->method->static_data; + + PyArrayMethod_SortParameters *sort_params = + (PyArrayMethod_SortParameters *)context->parameters; + PyArray_ArgSortImpl *argsort_func = NULL; + + switch (sort_params->flags) { + case NPY_SORT_DEFAULT: + argsort_func = npy_aquicksort_impl; + break; + case NPY_SORT_STABLE: + argsort_func = npy_amergesort_impl; + break; + default: + PyErr_SetString(PyExc_ValueError, "Invalid sort kind"); + return -1; + } + + return argsort_func(data[0], (npy_intp *)data[1], dimensions[0], context, + context->descriptors[0]->elsize, cmp); +} + +#ifdef __cplusplus +} +#endif diff --git a/numpy/_core/src/common/npy_sort.h.src b/numpy/_core/src/common/npy_sort.h.src index 1f82b07659f4..95d6f9d1ee70 100644 --- a/numpy/_core/src/common/npy_sort.h.src +++ b/numpy/_core/src/common/npy_sort.h.src @@ -5,6 +5,7 @@ #include #include #include +#include #define NPY_ENOMEM 1 #define NPY_ECOMP 2 @@ -107,6 +108,18 @@ NPY_NO_EXPORT int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *ar NPY_NO_EXPORT int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); NPY_NO_EXPORT int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr); +/* + ***************************************************************************** + ** NEW-STYLE GENERIC SORT ** + ***************************************************************************** + */ + +NPY_NO_EXPORT int npy_default_sort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); +NPY_NO_EXPORT int npy_default_argsort_loop(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); /* ***************************************************************************** diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f9d753f9e7ba..ff100c3d9d5d 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -3179,6 +3179,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = sort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; @@ -3290,6 +3291,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) } context.descriptors = loop_descrs; context.parameters = &sort_params; + context.method = argsort_method; // Arrays are always contiguous for sorting npy_intp strides[2] = {loop_descrs[0]->elsize, loop_descrs[1]->elsize}; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 85514ef15df6..77216d25d46f 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -692,14 +692,9 @@ stringdtype_wrap_sort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_SortImpl *sort_loop = - ((PyArrayMethod_SortParameters *)context->parameters)->flags - == NPY_SORT_STABLE ? &npy_mergesort_impl : &npy_quicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = sort_loop( - data[0], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_sort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -737,14 +732,9 @@ stringdtype_wrap_argsort_loop( { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)context->descriptors[0]; - PyArray_ArgSortImpl *argsort_loop = - ((PyArrayMethod_SortParameters *)context->parameters) - ->flags == NPY_SORT_STABLE ? &npy_amergesort_impl : &npy_aquicksort_impl; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); - int ret = argsort_loop( - data[0], (npy_intp *)data[1], dimensions[0], context, - context->descriptors[0]->elsize, &_sort_compare); + int ret = npy_default_argsort_loop(context, data, dimensions, strides, transferdata); NpyString_release_allocator(allocator); return ret; } @@ -965,9 +955,10 @@ init_stringdtype_sorts(void) PyArray_DTypeMeta *stringdtype = &PyArray_StringDType; PyArray_DTypeMeta *sort_dtypes[2] = {stringdtype, stringdtype}; - PyType_Slot sort_slots[3] = { + PyType_Slot sort_slots[4] = { {NPY_METH_resolve_descriptors, &stringdtype_sort_resolve_descriptors}, {NPY_METH_get_loop, &stringdtype_get_sort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec sort_spec = { @@ -989,8 +980,9 @@ init_stringdtype_sorts(void) Py_DECREF(sort_method); PyArray_DTypeMeta *argsort_dtypes[2] = {stringdtype, &PyArray_IntpDType}; - PyType_Slot argsort_slots[2] = { + PyType_Slot argsort_slots[3] = { {NPY_METH_get_loop, &stringdtype_get_argsort_loop}, + {_NPY_METH_static_data, &_sort_compare}, {0, NULL} }; PyArrayMethod_Spec argsort_spec = { From 5b62b47946c342f777c55ef445e00e76693a01f3 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 10 Dec 2025 20:03:38 +0530 Subject: [PATCH 1009/1718] TST: skip deep recursion tests on TSAN and ASAN builds (#30410) --- numpy/_core/tests/test_dtype.py | 12 ++++-------- numpy/_core/tests/test_multiarray.py | 13 ++++++------- numpy/_core/tests/test_regression.py | 13 +++++++------ numpy/testing/_private/utils.py | 27 +++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 21 deletions(-) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 72bcc3e22962..b1f965d5164b 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -21,13 +21,12 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, - IS_WASM, assert_, assert_array_equal, assert_equal, assert_raises, ) +from numpy.testing._private.utils import requires_deep_recursion def assert_dtype_equal(a, b): @@ -978,16 +977,14 @@ def test1(self): ('yi', np.dtype((a, (3, 2))))]) assert_dtype_equal(c, d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_list_recursion(self): l = [] l.append(('f', l)) with pytest.raises(RecursionError): np.dtype(l) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_tuple_recursion(self): d = np.int32 for i in range(100000): @@ -997,8 +994,7 @@ def test_tuple_recursion(self): with contextlib.suppress(RecursionError): np.dtype(d) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_dict_recursion(self): d = {"names": ['self'], "formats": [None], "offsets": [0]} d['formats'][0] = d diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index aa2dee8d58bb..11365d1d1efb 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -38,7 +38,6 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, IS_WASM, assert_, assert_allclose, @@ -55,7 +54,11 @@ runstring, temppath, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) def assert_arg_sorted(arr, arg): @@ -9349,6 +9352,7 @@ def test_to_bool_scalar(self): assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) + @requires_deep_recursion def test_to_bool_scalar_not_convertible(self): class NotConvertible: @@ -9357,11 +9361,6 @@ def __bool__(self): assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) - if IS_PYSTON: - pytest.skip("Pyston disables recursion checking") - if IS_WASM: - pytest.skip("Pyodide/WASM has limited stack size") - self_containing = np.array([None]) self_containing[0] = self_containing diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index dc457b2d5fc1..8844c461211a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -18,7 +18,6 @@ HAS_REFCOUNT, IS_64BIT, IS_PYPY, - IS_PYSTON, IS_WASM, _assert_valid_refcount, assert_, @@ -29,7 +28,11 @@ assert_raises, assert_raises_regex, ) -from numpy.testing._private.utils import _no_tracing, requires_memory +from numpy.testing._private.utils import ( + _no_tracing, + requires_deep_recursion, + requires_memory, +) class TestRegression: @@ -1777,8 +1780,7 @@ def test_reduce_contiguous(self): assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) @@ -1787,8 +1789,7 @@ def test_object_array_self_reference(self): assert_raises(RecursionError, float, a) a[()] = None - @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") - @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + @requires_deep_recursion def test_object_array_circular_reference(self): # Test the same for a circular reference. a = np.array(0, dtype=object) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 8a89c464b49d..87d9f0394fb3 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2830,3 +2830,30 @@ def run_threaded(func, max_workers=8, pass_count=False, barrier.abort() for f in futures: f.result() + + +def requires_deep_recursion(func): + """Decorator to skip test if deep recursion is not supported.""" + import pytest + + @wraps(func) + def wrapper(*args, **kwargs): + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("WASM has limited stack size") + cflags = sysconfig.get_config_var('CFLAGS') or '' + config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' + address_sanitizer = ( + '-fsanitize=address' in cflags or + '--with-address-sanitizer' in config_args + ) + thread_sanitizer = ( + '-fsanitize=thread' in cflags or + '--with-thread-sanitizer' in config_args + ) + if address_sanitizer or thread_sanitizer: + pytest.skip("AddressSanitizer and ThreadSanitizer do not support " + "deep recursion") + return func(*args, **kwargs) + return wrapper From 5ab88be054a9a522ade872c82938e9e8d6206190 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 10 Dec 2025 08:59:17 +0100 Subject: [PATCH 1010/1718] MAINT: clean up `setuptools` installs and `setup.py`-related leftovers No longer used for anything after we dropped Python 3.11 support. There are a few example `setup.py` files in the repo, but it turns out we are not testing those (which is fine). --- .github/workflows/linux.yml | 1 + numpy/_build_utils/__init__.py | 22 ---------------------- numpy/_core/include/numpy/numpyconfig.h | 6 ------ numpy/tests/test_scripts.py | 5 +---- requirements/setuptools_requirement.txt | 2 -- requirements/test_requirements.txt | 9 ++------- 6 files changed, 4 insertions(+), 41 deletions(-) delete mode 100644 numpy/_build_utils/__init__.py delete mode 100644 requirements/setuptools_requirement.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 516f0538572b..c42e3e95a5d7 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -269,6 +269,7 @@ jobs: run: | sudo apt update sudo apt install make swig + pip install setuptools make -C tools/swig/test test array_api_tests: diff --git a/numpy/_build_utils/__init__.py b/numpy/_build_utils/__init__.py deleted file mode 100644 index 10b282d8d9ee..000000000000 --- a/numpy/_build_utils/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Don't use the deprecated NumPy C API. Define this to a fixed version -# instead of NPY_API_VERSION in order not to break compilation for -# released SciPy versions when NumPy introduces a new deprecation. Use -# in setup.py:: -# -# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api) -# -numpy_nodepr_api = { - "define_macros": [("NPY_NO_DEPRECATED_API", "NPY_1_9_API_VERSION")] -} - - -def import_file(folder, module_name): - """Import a file directly, avoiding importing scipy""" - import importlib - import pathlib - - fname = pathlib.Path(folder) / f'{module_name}.py' - spec = importlib.util.spec_from_file_location(module_name, str(fname)) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0bd934e37a8d..40b5f1454d67 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -7,12 +7,6 @@ * On Mac OS X, because there is only one configuration stage for all the archs * in universal builds, any macro which depends on the arch needs to be * hardcoded. - * - * Note that distutils/pip will attempt a universal2 build when Python itself - * is built as universal2, hence this hardcoding is needed even if we do not - * support universal2 wheels anymore (see gh-22796). - * This code block can be removed after we have dropped the setup.py based - * build completely. */ #ifdef __APPLE__ #undef NPY_SIZEOF_LONG diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 01b743941cf2..e5f0a07436c8 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -5,15 +5,13 @@ import os import subprocess import sys -from os.path import dirname, isfile, join as pathjoin +from os.path import dirname import pytest import numpy as np from numpy.testing import IS_WASM, assert_equal -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) - def find_f2py_commands(): if sys.platform == 'win32': @@ -33,7 +31,6 @@ def find_f2py_commands(): return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") @pytest.mark.xfail(reason="Test is unreliable") @pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) def test_f2py(f2py_cmd): diff --git a/requirements/setuptools_requirement.txt b/requirements/setuptools_requirement.txt deleted file mode 100644 index 21f900d46078..000000000000 --- a/requirements/setuptools_requirement.txt +++ /dev/null @@ -1,2 +0,0 @@ -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 2e2f679cee72..3094d4eb8470 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,7 +1,4 @@ Cython -wheel==0.38.1 -setuptools==65.5.1 ; python_version < '3.12' -setuptools ; python_version >= '3.12' hypothesis==6.142.2 pytest==7.4.0 pytest-cov==4.1.0 @@ -9,10 +6,8 @@ meson ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout -# For testing types. Notes on the restrictions: -# - Mypy relies on C API features not present in PyPy -# NOTE: Keep mypy in sync with environment.yml -mypy==1.19.0; platform_python_implementation != "PyPy" +# For testing types +mypy==1.19.0 # for optional f2py encoding detection charset-normalizer tzdata From 2df152d2a2410eac37542ed5f4f414e3aa99c1a4 Mon Sep 17 00:00:00 2001 From: Ian Hunt-Isaak Date: Thu, 11 Dec 2025 05:50:02 -0500 Subject: [PATCH 1011/1718] DOC: crosslinking and explanation for (g)ufuncs (#30384) * DOC: crosslinking and explanation for (g)ufuncs Added more crosslinks between gufunc and ufunc pages to increase discoverability. Expanded example of basics.ufuncs, and corrected statement about the number of methods ufuncs possess. * fix doctest * simplify cross linking * Apply suggestion from @mattip Co-authored-by: Matti Picus * Extend "scalar" to "single-element scalar" * Extend "scalar" to "single-element scalar" --------- Co-authored-by: Matti Picus --- .../reference/c-api/generalized-ufuncs.rst | 2 + doc/source/reference/ufuncs.rst | 4 ++ doc/source/user/basics.ufuncs.rst | 43 +++++++++++++++++-- 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 755ab2141cbd..b8a37e98b81e 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -4,6 +4,8 @@ Generalized universal function API ================================== +.. seealso:: :ref:`ufuncs` + There is a general need for looping over not only functions on scalars but also over functions on vectors (or arrays). This concept is realized in NumPy by generalizing the universal functions diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 0c675718818b..cac15b66cf14 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -19,6 +19,10 @@ that takes a fixed number of specific inputs and produces a fixed number of specific outputs. For detailed information on universal functions, see :ref:`ufuncs-basics`. + +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of only single-element scalars. + :class:`ufunc` ============== diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst index 8607c2abda9b..5c91ab6c0168 100644 --- a/doc/source/user/basics.ufuncs.rst +++ b/doc/source/user/basics.ufuncs.rst @@ -18,6 +18,25 @@ is, a ufunc is a ":term:`vectorized `" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. +There are also :ref:`generalized ufuncs ` which +are functions over vectors (or arrays) instead of single-element scalars. +For example, :func:`numpy.add` is a ufunc that operates element-by-element, +while :func:`numpy.matmul` is a gufunc that operates on vectors/matrices:: + + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.add(a, a) # element-wise addition + array([[ 0, 2], + [ 4, 6], + [ 8, 10]]) + >>> np.matmul(a, a.T) # matrix multiplication (3x2) @ (2x3) -> (3x3) + array([[ 1, 3, 5], + [ 3, 13, 23], + [ 5, 23, 41]]) + In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are implemented in compiled C code. The basic ufuncs operate on scalars, but @@ -35,12 +54,30 @@ One can also produce custom :class:`numpy.ufunc` instances using the Ufunc methods ============= -All ufuncs have four methods. They can be found at -:ref:`ufuncs.methods`. However, these methods only make sense on scalar -ufuncs that take two input arguments and return one output argument. +All ufuncs have 5 methods. 4 reduce-like methods +(:meth:`~numpy.ufunc.reduce`, :meth:`~numpy.ufunc.accumulate`, +:meth:`~numpy.ufunc.reduceat`, :meth:`~numpy.ufunc.outer`) and one +for inplace operations (:meth:`~numpy.ufunc.at`). +See :ref:`ufuncs.methods` for more. However, these methods only make sense on +ufuncs that take two input arguments and return one output argument (so-called +"scalar" ufuncs since the inner loop operates on a single scalar value). Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. +For example, :func:`numpy.add` takes two inputs and returns one output, +so its methods work:: + + >>> np.add.reduce([1, 2, 3]) + 6 + +But :func:`numpy.divmod` returns two outputs (quotient and remainder), +so calling its methods raises an error:: + + >>> np.divmod.reduce([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: reduce only supported for functions returning a single value + The reduce-like methods all take an *axis* keyword, a *dtype* keyword, and an *out* keyword, and the arrays must all have dimension >= 1. The *axis* keyword specifies the axis of the array over which the reduction From bd7e23aafe5747ff36ea3b3e286e2282f5f09e14 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 11 Dec 2025 17:09:44 +0200 Subject: [PATCH 1012/1718] MAINT: remove some old work-arounds and test skips for PyPy (#30417) * MAINT: remove some old work-arounds and test skips for PyPy * typos * lint --- .../_core/src/multiarray/stringdtype/dtype.c | 7 ----- numpy/_core/tests/test_array_coercion.py | 3 +- numpy/_core/tests/test_mem_policy.py | 6 ---- numpy/_core/tests/test_multiarray.py | 6 +--- numpy/_core/tests/test_regression.py | 2 -- numpy/_core/tests/test_strings.py | 29 ++++--------------- numpy/lib/tests/test_format.py | 2 -- numpy/lib/tests/test_io.py | 6 ---- numpy/lib/tests/test_loadtxt.py | 22 +------------- 9 files changed, 9 insertions(+), 74 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 77216d25d46f..7abf4b9303af 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -399,14 +399,7 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) } } else { -#ifndef PYPY_VERSION val_obj = PyUnicode_FromStringAndSize(sdata.buf, sdata.size); -#else - // work around pypy issue #4046, can delete this when the fix is in - // a released version of pypy - val_obj = PyUnicode_FromStringAndSize( - sdata.buf == NULL ? "" : sdata.buf, sdata.size); -#endif if (val_obj == NULL) { goto fail; } diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index ba28e886282f..9c8c4a09cfc9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -12,7 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational -from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal +from numpy.testing import IS_64BIT, assert_array_equal def arraylikes(): @@ -282,7 +282,6 @@ def test_scalar_coercion(self, scalar): assert_array_equal(arr, arr3) assert_array_equal(arr, arr4) - @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") @pytest.mark.parametrize("cast_to", scalar_instances()) def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 313d3efe779a..720ea1aa91b8 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -343,9 +343,6 @@ async def async_test_context_locality(get_module): def test_context_locality(get_module): - if (sys.implementation.name == 'pypy' - and sys.pypy_version_info[:3] < (7, 3, 6)): - pytest.skip('no context-locality support in PyPy < 7.3.6') asyncio.run(async_test_context_locality(get_module)) @@ -411,9 +408,6 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.xfail(sys.implementation.name == "pypy", - reason=("bad interaction between getenv and " - "os.environ inside pytest")) @pytest.mark.parametrize("policy", ["0", "1", None]) @pytest.mark.thread_unsafe(reason="modifies environment variables") def test_switch_owner(get_module, policy): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 11365d1d1efb..62f3bd4a77c4 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4017,7 +4017,7 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy3.{9, 10}-v7.3.13, #24862") + @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -5928,7 +5928,6 @@ def test_fromfile_offset(self, tmp_path, param_filename): np.fromfile, tmp_filename, dtype=x.dtype, sep=",", offset=1) - @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): def dup_str(fd): return 'abc' @@ -9824,9 +9823,6 @@ def test_1d_format(self): assert_raises(TypeError, '{:30}'.format, a) -from numpy.testing import IS_PYPY - - class TestCTypes: def test_ctypes_is_available(self): diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 8844c461211a..66b462df9784 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2291,8 +2291,6 @@ def test_reshape_size_overflow(self): new_shape = (2, 7, 7, 43826197) assert_raises(ValueError, a.reshape, new_shape) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_invalid_structured_dtypes(self): # gh-2865 # mapping python objects to other dtypes diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index b36d1db76b20..d8d23d47b5b8 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy._core._exceptions import _UFuncNoLoopError -from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing import assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -19,8 +19,6 @@ MAX = np.iinfo(np.int64).max -IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) - @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): arr_string = np.array(["a", "b"], dtype="S") @@ -1123,10 +1121,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U0001D7F6', '\U00011066', '\U000104A0', - pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISALNUM", - strict=True)), + '\U0001F107', ]) def test_isalnum_unicode(self, in_, dt): in_ = np.array(in_, dtype=dt) @@ -1140,10 +1135,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', True), - pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISLOWER", - strict=True)), + ('\U00010429', True), ('\U0001044E', True), ]) def test_islower_unicode(self, in_, out, dt): @@ -1158,10 +1150,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F40D', False), ('\U0001F46F', False), ('\u2177', False), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISUPPER", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ]) def test_isupper_unicode(self, in_, out, dt): @@ -1171,15 +1160,9 @@ def test_isupper_unicode(self, in_, out, dt): @pytest.mark.parametrize("in_,out", [ ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), - pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010401\U00010429', True), ('\U00010427\U0001044E', True), - pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY_LT_7_3_16, - reason="PYPY bug in Py_UNICODE_ISISTITLE", - strict=True)), + ('\U00010429', False), ('\U0001044E', False), ('\U0001F40D', False), ('\U0001F46F', False), diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index c70e3d5ebd43..52994f13bd05 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -1035,8 +1035,6 @@ def test_header_growth_axis(): float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) ]}), ]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 5ba634b9f612..4051e203dacf 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -843,8 +843,6 @@ def test_comments_multiple(self): a = np.array([[1, 2, 3], [4, 5, 6]], int) assert_array_equal(x, a) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_comments_multi_chars(self): c = TextIO() c.write('/* comment\n1,2,3,5\n') @@ -1061,8 +1059,6 @@ def test_from_float_hex(self): c, dtype=dt, converters=float.fromhex, encoding="latin1") assert_equal(res, tgt, err_msg=f"{dt}") - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_no_default_hex_conversion(self): """ Ensure that fromhex is only used for values with the correct prefix and @@ -1073,8 +1069,6 @@ def test_default_float_converter_no_default_hex_conversion(self): match=".*convert string 'a' to float64 at row 0, column 1"): np.loadtxt(c) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_default_float_converter_exception(self): """ Ensure that the exception message raised during failed floating point diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 7a4ed17e7f07..a164bf38f189 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -13,7 +13,7 @@ import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_scientific_notation(): @@ -204,8 +204,6 @@ def test_maxrows_no_blank_lines(dtype): assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) def test_exception_message_bad_values(dtype): txt = StringIO("1,2\n3,XXX\n5,6") @@ -393,8 +391,6 @@ def test_bool(): assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_integer_signs(dtype): @@ -411,8 +407,6 @@ def test_integer_signs(dtype): np.loadtxt([f"{sign}2\n"], dtype=dtype) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) @pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") def test_implicit_cast_float_to_int_fails(dtype): @@ -483,8 +477,6 @@ def conv(x): assert sys.getrefcount(sentinel) == 2 -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_character_not_bytes_compatible(): """Test exception when a character cannot be encoded as 'S'.""" data = StringIO("–") # == \u2013 @@ -502,8 +494,6 @@ def test_invalid_converter(conv): np.loadtxt(StringIO("1 2\n3 4"), converters=conv) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_converters_dict_raises_non_integer_key(): with pytest.raises(TypeError, match="keys of the converters dict"): np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) @@ -569,8 +559,6 @@ def test_quote_support_default(): assert_array_equal(res, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_quotechar_multichar_error(): txt = StringIO("1,2\n3,4") msg = r".*must be a single unicode character or None" @@ -730,8 +718,6 @@ def test_unicode_whitespace_stripping_complex(dtype): assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", "FD") @pytest.mark.parametrize("field", ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) @@ -740,8 +726,6 @@ def test_bad_complex(dtype, field): np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_nul_character_error(dtype): @@ -753,8 +737,6 @@ def test_nul_character_error(dtype): np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"] + "efgdFDG" + "?") def test_no_thousands_support(dtype): @@ -1022,8 +1004,6 @@ def test_str_dtype_unit_discovery_with_converter(): assert_equal(a, expected) -@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_control_character_empty(): with pytest.raises(TypeError, match="Text reading control character must"): np.loadtxt(StringIO("1 2 3"), delimiter="") From 067f8b0d397d1cb1f4146b41771d4d3ea26236cd Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 11 Dec 2025 17:42:18 +0100 Subject: [PATCH 1013/1718] DOC: Improve cross-links in thread safety documentation (#30373) * DOC: Improve cross-links in thread safety documentation Those are mostly cosmetic changes and extra cross-links to improve the documentation. In particular I'd like to use this page as an example of good top-level documentation for threading/freethreading in the py-threading-guide https://py-free-threading.github.io/ This also does a number of updates to the misc.rst file which was basically too indented, and missing a bunch of links. I think many of the info misc.rst on how to interop with C/C++ could be removed, and likely all the information on seterr/geterr moved to routines.err.rst but I don't want to include this moves as part of this PR as it's likely more subject to discussion. There are minor updates in conf.py to avoid 1 sphinx warnings, and I know the thread safety backlink in geterr/seterr/...etc, are a bit overkill, but as numpy will be used as an example and in general is referred to as one of the core scientific Python project I thinks it's OK go go a bit further than usual. [skip actions] [skip cirrus] * typo --- doc/neps/conf.py | 6 +- doc/source/conf.py | 2 +- doc/source/reference/c-api/array.rst | 2 + .../reference/random/multithreading.rst | 3 + doc/source/reference/routines.err.rst | 72 +++++++ doc/source/reference/routines.io.rst | 7 + doc/source/reference/thread_safety.rst | 49 ++++- doc/source/user/misc.rst | 197 ++---------------- numpy/_core/_ufunc_config.py | 24 +++ numpy/_core/arrayprint.py | 7 + 10 files changed, 178 insertions(+), 191 deletions(-) diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 33faaf17ff64..056002135dbd 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -39,10 +39,10 @@ templates_path = ['../source/_templates/'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: +# You can specify multiple suffix as a dict mapping suffixes to parsers: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +# source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} +source_suffix = {'.rst': 'restructuredtext'} # The master toctree document. master_doc = 'content' diff --git a/doc/source/conf.py b/doc/source/conf.py index 238a7d11f8a4..f6e7fc57bde7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -114,7 +114,7 @@ class PyTypeObject(ctypes.Structure): templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # General substitutions. project = 'NumPy' diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 885bdb17181e..d28e535f9428 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -4272,6 +4272,8 @@ Memory management Returns 0 if nothing was done, -1 on error, and 1 if action was taken. +.. _array.ndarray.capi.threading: + Threading support ~~~~~~~~~~~~~~~~~ diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 73d2fc9ee5ad..28e045f10dc0 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,6 +9,9 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. +.. seealso:: + :ref:`thread_safety` for general information about thread safety in NumPy. + This example makes use of :mod:`concurrent.futures` to fill an array using multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. diff --git a/doc/source/reference/routines.err.rst b/doc/source/reference/routines.err.rst index 5272073a3b00..f46634793fa3 100644 --- a/doc/source/reference/routines.err.rst +++ b/doc/source/reference/routines.err.rst @@ -1,8 +1,80 @@ +.. _fp_error_handling: + Floating point error handling ============================= .. currentmodule:: numpy +Error handling settings are stored in :py:mod:`python:contextvars` +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + +.. _misc-error-handling: + +How numpy handles numerical exceptions +-------------------------------------- + +The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` +and ``'ignore'`` for ``underflow``. But this can be changed, and it can be +set individually for different kinds of exceptions. The different behaviors +are: + +- ``'ignore'`` : Take no action when the exception occurs. +- ``'warn'`` : Print a :py:exc:`RuntimeWarning` (via the Python :py:mod:`warnings` module). +- ``'raise'`` : Raise a :py:exc:`FloatingPointError`. +- ``'call'`` : Call a specified function. +- ``'print'`` : Print a warning directly to ``stdout``. +- ``'log'`` : Record error in a Log object. + +These behaviors can be set for all kinds of errors or specific ones: + +- ``all`` : apply to all numeric exceptions +- ``invalid`` : when NaNs are generated +- ``divide`` : divide by zero (for integers as well!) +- ``overflow`` : floating point overflows +- ``underflow`` : floating point underflows + +Note that integer divide-by-zero is handled by the same machinery. + +The error handling mode can be configured :func:`numpy.errstate` +context manager. + +Examples +-------- + +:: + + >>> with np.errstate(all='warn'): + ... np.zeros(5, dtype=np.float32) / 0.0 + :2: RuntimeWarning: invalid value encountered in divide + array([nan, nan, nan, nan, nan], dtype=float32) + +:: + + >>> with np.errstate(under='ignore'): + ... np.array([1.e-100])**10 + array([0.]) + +:: + + >>> with np.errstate(invalid='raise'): + ... np.sqrt(np.array([-1.])) + ... + Traceback (most recent call last): + File "", line 2, in + np.sqrt(np.array([-1.])) + ~~~~~~~^^^^^^^^^^^^^^^^^ + FloatingPointError: invalid value encountered in sqrt + +:: + + >>> def errorhandler(errstr, errflag): + ... print("saw stupid error!") + >>> with np.errstate(call=errorhandler, all='call'): + ... np.zeros(5, dtype=np.int32) / 0 + saw stupid error! + array([nan, nan, nan, nan, nan]) + Setting and getting error handling ---------------------------------- diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst index 2b8dd98f36a4..ccd4467af545 100644 --- a/doc/source/reference/routines.io.rst +++ b/doc/source/reference/routines.io.rst @@ -59,8 +59,15 @@ Memory mapping files memmap lib.format.open_memmap +.. _text_formatting_options: + Text formatting options ----------------------- + +Text formatting settings are maintained in a :py:mod:`context variable `, +allowing different threads or async tasks to have independent configurations. +For more information, see :ref:`thread_safety`. + .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst index 6b6b9b0ea054..b07419259690 100644 --- a/doc/source/reference/thread_safety.rst +++ b/doc/source/reference/thread_safety.rst @@ -5,7 +5,7 @@ Thread Safety ************* NumPy supports use in a multithreaded context via the `threading` module in the -standard library. Many NumPy operations release the GIL, so unlike many +standard library. Many NumPy operations release the :term:`python:GIL`, so unlike many situations in Python, it is possible to improve parallel performance by exploiting multithreaded parallelism in Python. @@ -22,7 +22,7 @@ are not reproducible, let alone correct. It is also possible to crash the Python interpreter by, for example, resizing an array while another thread is reading from it to compute a ufunc operation. -In the future, we may add locking to ndarray to make writing multithreaded +In the future, we may add locking to :class:`~numpy.ndarray` to make writing multithreaded algorithms using NumPy arrays safer, but for now we suggest focusing on read-only access of arrays that are shared between threads, or adding your own locking if you need to mutation and multithreading. @@ -32,6 +32,18 @@ from use of the `threading` module, and instead might be better served with `multiprocessing`. In particular, operations on arrays with ``dtype=np.object_`` do not release the GIL. +Context-local state +------------------- + +NumPy maintains some state for ufuncs context-local basis, which means each +thread in a multithreaded program or task in an asyncio program has its own +independent configuration of the `numpy.errstate` (see +:doc:`/reference/routines.err`), and of :ref:`text_formatting_options`. + +You can update state stored in a context variable by entering a context manager. +As soon as the context manager exits, the state will be reset to its value +before entering the context manager. + Free-threaded Python -------------------- @@ -40,12 +52,27 @@ Free-threaded Python Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support for python runtimes with the GIL disabled. See https://py-free-threading.github.io for more information about installing and -using free-threaded Python, as well as information about supporting it in -libraries that depend on NumPy. - -Because free-threaded Python does not have a global interpreter lock to -serialize access to Python objects, there are more opportunities for threads to -mutate shared state and create thread safety issues. In addition to the -limitations about locking of the ndarray object noted above, this also means -that arrays with ``dtype=np.object_`` are not protected by the GIL, creating data -races for python objects that are not possible outside free-threaded python. +using :py:term:`free-threaded ` Python, as well as +information about supporting it in libraries that depend on NumPy. + +Because free-threaded Python does not have a +global interpreter lock to serialize access to Python objects, there are more +opportunities for threads to mutate shared state and create thread safety +issues. In addition to the limitations about locking of the +:class:`~numpy.ndarray` object noted above, this also means that arrays with +``dtype=np.object_`` are not protected by the GIL, creating data races for python +objects that are not possible outside free-threaded python. + +C-API Threading Support +----------------------- + +For developers writing C extensions that interact with NumPy, several parts of +the :doc:`C-API array documentation ` provide detailed +information about multithreading considerations. + +See Also +-------- + +* :doc:`/reference/random/multithreading` - Practical example of using NumPy's + random number generators in a multithreaded context with + :mod:`concurrent.futures`. diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst index 6d652e3ca67f..a882afa37afd 100644 --- a/doc/source/user/misc.rst +++ b/doc/source/user/misc.rst @@ -7,7 +7,7 @@ Miscellaneous IEEE 754 floating point special values -------------------------------------- -Special values defined in numpy: nan, inf, +Special values defined in numpy: :data:`~numpy.nan`, :data:`~numpy.inf` NaNs can be used as a poor-man's mask (if you don't care what the original value was) @@ -17,29 +17,39 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) >>> np.nonzero(myarr == np.nan) (array([], dtype=int64),) + +:: + >>> np.nan == np.nan # is always False! Use special numpy functions instead. False + +:: + >>> myarr[myarr == np.nan] = 0. # doesn't work >>> myarr array([ 1., 0., nan, 3.]) + +:: + >>> myarr[np.isnan(myarr)] = 0. # use this instead find >>> myarr array([1., 0., 0., 3.]) -Other related special value functions: :: +Other related special value functions: - isinf(): True if value is inf - isfinite(): True if not nan or inf - nan_to_num(): Map nan to 0, inf to max float, -inf to min float +- :func:`~numpy.isnan` - True if value is nan +- :func:`~numpy.isinf` - True if value is inf +- :func:`~numpy.isfinite` - True if not nan or inf +- :func:`~numpy.nan_to_num` - Map nan to 0, inf to max float, -inf to min float The following corresponds to the usual functions except that nans are excluded -from the results: :: +from the results: - nansum() - nanmax() - nanmin() - nanargmax() - nanargmin() +- :func:`~numpy.nansum` +- :func:`~numpy.nanmax` +- :func:`~numpy.nanmin` +- :func:`~numpy.nanargmax` +- :func:`~numpy.nanargmin` >>> x = np.arange(10.) >>> x[3] = np.nan @@ -47,168 +57,3 @@ from the results: :: nan >>> np.nansum(x) 42.0 - -How numpy handles numerical exceptions --------------------------------------- - -The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow`` -and ``'ignore'`` for ``underflow``. But this can be changed, and it can be -set individually for different kinds of exceptions. The different behaviors -are: - - - 'ignore' : Take no action when the exception occurs. - - 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module). - - 'raise' : Raise a `FloatingPointError`. - - 'call' : Call a function specified using the `seterrcall` function. - - 'print' : Print a warning directly to ``stdout``. - - 'log' : Record error in a Log object specified by `seterrcall`. - -These behaviors can be set for all kinds of errors or specific ones: - - - all : apply to all numeric exceptions - - invalid : when NaNs are generated - - divide : divide by zero (for integers as well!) - - overflow : floating point overflows - - underflow : floating point underflows - -Note that integer divide-by-zero is handled by the same machinery. -These behaviors are set on a per-thread basis. - -Examples --------- - -:: - - >>> oldsettings = np.seterr(all='warn') - >>> np.zeros(5,dtype=np.float32)/0. - Traceback (most recent call last): - ... - RuntimeWarning: invalid value encountered in divide - >>> j = np.seterr(under='ignore') - >>> np.array([1.e-100])**10 - array([0.]) - >>> j = np.seterr(invalid='raise') - >>> np.sqrt(np.array([-1.])) - Traceback (most recent call last): - ... - FloatingPointError: invalid value encountered in sqrt - >>> def errorhandler(errstr, errflag): - ... print("saw stupid error!") - >>> np.seterrcall(errorhandler) - >>> j = np.seterr(all='call') - >>> np.zeros(5, dtype=np.int32)/0 - saw stupid error! - array([nan, nan, nan, nan, nan]) - >>> j = np.seterr(**oldsettings) # restore previous - ... # error-handling settings - -Interfacing to C ----------------- -Only a survey of the choices. Little detail on how each works. - -1) Bare metal, wrap your own C-code manually. - - - Plusses: - - - Efficient - - No dependencies on other tools - - - Minuses: - - - Lots of learning overhead: - - - need to learn basics of Python C API - - need to learn basics of numpy C API - - need to learn how to handle reference counting and love it. - - - Reference counting often difficult to get right. - - - getting it wrong leads to memory leaks, and worse, segfaults - -2) Cython - - - Plusses: - - - avoid learning C API's - - no dealing with reference counting - - can code in pseudo python and generate C code - - can also interface to existing C code - - should shield you from changes to Python C api - - has become the de-facto standard within the scientific Python community - - fast indexing support for arrays - - - Minuses: - - - Can write code in non-standard form which may become obsolete - - Not as flexible as manual wrapping - -3) ctypes - - - Plusses: - - - part of Python standard library - - good for interfacing to existing shareable libraries, particularly - Windows DLLs - - avoids API/reference counting issues - - good numpy support: arrays have all these in their ctypes - attribute: :: - - a.ctypes.data - a.ctypes.data_as - a.ctypes.shape - a.ctypes.shape_as - a.ctypes.strides - a.ctypes.strides_as - - - Minuses: - - - can't use for writing code to be turned into C extensions, only a wrapper - tool. - -4) SWIG (automatic wrapper generator) - - - Plusses: - - - around a long time - - multiple scripting language support - - C++ support - - Good for wrapping large (many functions) existing C libraries - - - Minuses: - - - generates lots of code between Python and the C code - - can cause performance problems that are nearly impossible to optimize - out - - interface files can be hard to write - - doesn't necessarily avoid reference counting issues or needing to know - API's - -5) Psyco - - - Plusses: - - - Turns pure python into efficient machine code through jit-like - optimizations - - very fast when it optimizes well - - - Minuses: - - - Only on intel (windows?) - - Doesn't do much for numpy? - -Interfacing to Fortran: ------------------------ -The clear choice to wrap Fortran code is -`f2py `_. - -Pyfort is an older alternative, but not supported any longer. -Fwrap is a newer project that looked promising but isn't being developed any -longer. - -Interfacing to C++: -------------------- - 1) Cython - 2) CXX - 3) Boost.python - 4) SWIG - 5) SIP (used mainly in PyQT) diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index b16147c18ee6..6a7476670d95 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -57,6 +57,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate + Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: @@ -68,6 +69,8 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. + **Concurrency note:** see :ref:`fp_error_handling` + .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples @@ -127,6 +130,8 @@ def geterr(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- >>> import numpy as np @@ -172,6 +177,10 @@ def setbufsize(size): bufsize : int Previous size of ufunc buffer in bytes. + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- When exiting a `numpy.errstate` context manager the bufsize is restored: @@ -205,6 +214,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples -------- >>> import numpy as np @@ -256,6 +271,11 @@ def seterrcall(func): -------- seterr, geterr, geterrcall + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + Examples -------- Callback upon error: @@ -331,6 +351,8 @@ def geterrcall(): For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np @@ -399,6 +421,8 @@ class errstate: For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. + **Concurrency note:** see :ref:`fp_error_handling` + Examples -------- >>> import numpy as np diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 8d576d9e1d56..96c17285bb3d 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -248,6 +248,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- get_printoptions, printoptions, array2string + Notes ----- @@ -255,6 +256,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, * Use `printoptions` as a context manager to set the values temporarily. * These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + Examples -------- Floating point precision can be set: @@ -357,6 +360,8 @@ def get_printoptions(): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + See Also -------- set_printoptions, printoptions @@ -419,6 +424,8 @@ def printoptions(*args, **kwargs): ----- These print options apply only to NumPy ndarrays, not to scalars. + **Concurrency note:** see :ref:`text_formatting_options` + """ token = _set_printoptions(*args, **kwargs) From 753dd1f2709c1b6bf3c0c0b084031da16fd1c920 Mon Sep 17 00:00:00 2001 From: Kaiyuan Yang <1079700998@qq.com> Date: Fri, 12 Dec 2025 03:27:27 +0800 Subject: [PATCH 1014/1718] BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG (#30418) * BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG Closes #30389 * fix: lint errors and wasm import problems --- numpy/_core/include/numpy/arrayscalars.h | 8 ++-- numpy/_core/tests/test_multiprocessing.py | 51 +++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) create mode 100644 numpy/_core/tests/test_multiprocessing.py diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index ff048061f70a..46bc58cc2a35 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -173,9 +173,11 @@ typedef struct { #define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) #define PyArrayScalar_FromLong(i) \ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) -#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ - return Py_INCREF(PyArrayScalar_FromLong(i)), \ - PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) #define PyArrayScalar_RETURN_FALSE \ return Py_INCREF(PyArrayScalar_False), \ PyArrayScalar_False diff --git a/numpy/_core/tests/test_multiprocessing.py b/numpy/_core/tests/test_multiprocessing.py new file mode 100644 index 000000000000..2c5c2fcfb8ed --- /dev/null +++ b/numpy/_core/tests/test_multiprocessing.py @@ -0,0 +1,51 @@ +import pytest + +import numpy as np +from numpy.testing import IS_WASM + +pytestmark = pytest.mark.thread_unsafe( + reason="tests in this module are explicitly multi-processed" +) + +def bool_array_writer(shm_name, n): + # writer routine for test_read_write_bool_array + import time + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + arr[i] = True + time.sleep(0.00001) + +def bool_array_reader(shm_name, n): + # reader routine for test_read_write_bool_array + from multiprocessing import shared_memory + shm = shared_memory.SharedMemory(name=shm_name) + arr = np.ndarray(n, dtype=np.bool_, buffer=shm.buf) + for i in range(n): + while not arr[i]: + pass + +@pytest.mark.skipif(IS_WASM, + reason="WASM does not support _posixshmem") +def test_read_write_bool_array(): + # See: gh-30389 + # + # Prior to Python 3.13, boolean scalar singletons (np.True / np.False) were + # regular reference-counted objects. Due to the double evaluation in + # PyArrayScalar_RETURN_BOOL_FROM_LONG, concurrent reads and writes of a + # boolean array could corrupt their refcounts, potentially causing a crash + # (e.g., `free(): invalid pointer`). + # + # This test creates a multi-process race between a writer and a reader to + # ensure that NumPy does not exhibit such failures. + from concurrent.futures import ProcessPoolExecutor + from multiprocessing import shared_memory + n = 10000 + shm = shared_memory.SharedMemory(create=True, size=n) + with ProcessPoolExecutor(max_workers=2) as executor: + f_writer = executor.submit(bool_array_writer, shm.name, n) + f_reader = executor.submit(bool_array_reader, shm.name, n) + shm.unlink() + f_writer.result() + f_reader.result() From 54840fb112db7b4cfa0b8dddf180407a01b8da2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 17:05:47 +0000 Subject: [PATCH 1015/1718] MAINT: Bump github/codeql-action from 4.31.7 to 4.31.8 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.7 to 4.31.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/cf1bb45a277cb3c205638b2cd5c984db1c46a412...1b168cd39490f61582a9beae412bb7057a6b2c4e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d7622b4d6dff..aaa14b37588a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/init@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/autobuild@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + uses: github/codeql-action/analyze@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 8488e97ef116..140d92d43e61 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v2.1.27 + uses: github/codeql-action/upload-sarif@1b168cd39490f61582a9beae412bb7057a6b2c4e # v2.1.27 with: sarif_file: results.sarif From d601b6cbf93ff8f6316a8f2c30ed35059f8a2808 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 17:05:52 +0000 Subject: [PATCH 1016/1718] MAINT: Bump actions/cache from 4.3.0 to 5.0.1 Bumps [actions/cache](https://github.com/actions/cache) from 4.3.0 to 5.0.1. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0057852bfaa89a56745cba8c7296529d2fc39830...9255dc7a253b0ccc959486e2bca901246202afeb) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 916417ebc513..f82bf1aa2626 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -96,7 +96,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -204,7 +204,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index f2af68370b99..15d3cf947222 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -49,7 +49,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -73,7 +73,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 0fe53eacf0aa1b5c931661d68b8668e34874cd98 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:26:55 +0100 Subject: [PATCH 1017/1718] TYP: avoid using `np.` in `__init__.pyi` --- numpy/__init__.pyi | 275 ++++++++++++++++++++++----------------------- 1 file changed, 137 insertions(+), 138 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index aad324f54c4d..af13ddd20cce 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -10,7 +10,6 @@ from decimal import Decimal from fractions import Fraction from uuid import UUID -import numpy as np from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes @@ -729,8 +728,8 @@ _DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant ### Type Aliases (for internal use only) -type _Falsy = L[False, 0] | np.bool[L[False]] -type _Truthy = L[True, 1] | np.bool[L[True]] +type _Falsy = L[False, 0] | bool_[L[False]] +type _Truthy = L[True, 1] | bool_[L[True]] type _1D = tuple[int] type _2D = tuple[int, int] @@ -750,8 +749,8 @@ type _ArrayNumeric = NDArray[number | timedelta64 | object_] type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 -type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | np.bool -type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | bool_ type _Complex128_co = complex | number[_64Bit] | _Complex64_co type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None @@ -888,8 +887,8 @@ type _SortSide = L["left", "right"] type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co -type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None -type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +type _ConvertibleToTD64 = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | bool_ | None +type _ConvertibleToDT64 = dt.date | int | _CharLike_co | character | number | datetime64 | bool_ | None type _NDIterFlagsKind = L[ "buffered", @@ -1063,8 +1062,8 @@ pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... little_endian: Final[builtins.bool] = ... -False_: Final[np.bool[L[False]]] = ... -True_: Final[np.bool[L[True]]] = ... +False_: Final[bool_[L[False]]] = ... +True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None # not in __all__ @@ -1129,12 +1128,12 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, + dtype: type[builtins.bool | bool_ | ct.c_bool] | _BoolCodes, align: builtins.bool = False, copy: builtins.bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[np.bool]: ... + ) -> dtype[bool_]: ... @overload def __new__( cls, @@ -1732,7 +1731,7 @@ class _ArrayOrScalarCommon: SupportsIndex, # version _ShapeLike, # Shape _DTypeT_co, # DType - np.bool, # F-continuous + bool_, # F-continuous bytes | list[Any], # Data ], /) -> None: ... @@ -2168,7 +2167,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # flexible | object_ | bool def __setitem__( - self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + self: ndarray[Any, dtype[flexible | object_ | bool_] | dtypes.StringDType], key: _ToIndices, value: object, /, @@ -2286,7 +2285,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def all( self, @@ -2295,7 +2294,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload def all[ArrayT: ndarray]( self, @@ -2323,7 +2322,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: L[False, 0] = False, *, where: _ArrayLikeBool_co = True - ) -> np.bool: ... + ) -> bool_: ... @overload def any( self, @@ -2332,7 +2331,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): keepdims: SupportsIndex = False, *, where: _ArrayLikeBool_co = True, - ) -> np.bool | NDArray[np.bool]: ... + ) -> bool_ | NDArray[bool_]: ... @overload def any[ArrayT: ndarray]( self, @@ -2410,7 +2409,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... # `nonzero()` raises for 0d arrays/generics - def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], dtype[intp]], ...]: ... @overload def searchsorted( @@ -2657,7 +2656,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... def __index__(self: NDArray[integer], /) -> int: ... - def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2682,67 +2681,67 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __le__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayString, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[bool_]: ... # Unary ops @@ -2772,9 +2771,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __matmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __matmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __matmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2801,9 +2800,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __matmul__ def __rmatmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmatmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmatmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2829,14 +2828,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __mod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __mod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2856,14 +2855,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mod__ def __rmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rmod__[ScalarT: floating | integer](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmod__[ScalarT: floating | integer](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2883,17 +2882,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __divmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], rhs: int | np.bool, / + self: NDArray[ScalarT], rhs: int | bool_, / ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload def __divmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... + def __divmod__(self: NDArray[bool_], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload def __divmod__[ScalarT: floating | integer]( - self: NDArray[np.bool], rhs: _ArrayLike[ScalarT], / + self: NDArray[bool_], rhs: _ArrayLike[ScalarT], / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @@ -2910,17 +2909,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __divmod__ def __rdivmod__[ScalarT: floating | integer]( - self: NDArray[ScalarT], lhs: int | np.bool, / + self: NDArray[ScalarT], lhs: int | bool_, / ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... @overload def __rdivmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... + def __rdivmod__(self: NDArray[bool_], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... @overload def __rdivmod__[ScalarT: floating | integer]( - self: NDArray[np.bool], lhs: _ArrayLike[ScalarT], / + self: NDArray[bool_], lhs: _ArrayLike[ScalarT], / ) -> _2Tuple[NDArray[ScalarT]]: ... @overload def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @@ -2937,13 +2936,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __add__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __add__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -2985,13 +2984,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __radd__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __radd__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3033,13 +3032,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __sub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3071,13 +3070,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rsub__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3109,13 +3108,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __mul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __mul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3151,13 +3150,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload - def __rmul__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rmul__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3256,14 +3255,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__floordiv__` @overload def __floordiv__[ScalarT: integer | floating]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __floordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __floordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3288,14 +3287,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rfloordiv__` @overload def __rfloordiv__[ScalarT: integer | floating]( - self: NDArray[ScalarT], other: int | np.bool, / + self: NDArray[ScalarT], other: int | bool_, / ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload - def __rfloordiv__[ScalarT: integer | floating](self: NDArray[np.bool], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... + def __rfloordiv__[ScalarT: integer | floating](self: NDArray[bool_], other: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload @@ -3317,13 +3316,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __pow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... + def __pow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3351,13 +3350,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... @overload - def __rpow__[ScalarT: number](self: NDArray[np.bool], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... + def __rpow__[ScalarT: number](self: NDArray[bool_], other: _ArrayLike[ScalarT], mod: None = None, /) -> NDArray[ScalarT]: ... @overload def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload @@ -3384,7 +3383,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3395,7 +3394,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3406,7 +3405,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3417,7 +3416,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... @overload def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3428,7 +3427,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3439,7 +3438,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3450,7 +3449,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3461,7 +3460,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3472,7 +3471,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3483,7 +3482,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co, /) -> NDArray[bool_]: ... @overload def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... @overload @@ -3944,8 +3943,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload def all[ScalarT: generic]( self, @@ -3954,7 +3953,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def all[ScalarT: generic]( @@ -3964,7 +3963,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload @@ -3975,8 +3974,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True - ) -> np.bool: ... + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + ) -> bool_: ... @overload def any[ScalarT: generic]( self, @@ -3985,7 +3984,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def any[ScalarT: generic]( @@ -3995,7 +3994,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @@ -4058,36 +4057,36 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def real(self) -> Self: ... @property - def imag(self) -> np.bool[L[False]]: ... + def imag(self) -> bool_[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: Never, /) -> bool_[builtins.bool]: ... @overload - def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload - def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + def __new__(cls, value: object, /) -> bool_[builtins.bool]: ... def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... def __bool__(self, /) -> _BoolItemT_co: ... @overload - def __int__(self: np.bool[L[False]], /) -> L[0]: ... + def __int__(self: bool_[L[False]], /) -> L[0]: ... @overload - def __int__(self: np.bool[L[True]], /) -> L[1]: ... + def __int__(self: bool_[L[True]], /) -> L[1]: ... @overload def __int__(self, /) -> L[0, 1]: ... def __abs__(self) -> Self: ... @overload - def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + def __invert__(self: bool_[L[False]], /) -> bool_[L[True]]: ... @overload - def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __invert__(self: bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __invert__(self, /) -> np.bool: ... + def __invert__(self, /) -> bool_: ... @overload def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @@ -4276,45 +4275,45 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def __rrshift__(self, other: int, /) -> int_: ... @overload - def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + def __and__(self: bool_[L[False]], other: builtins.bool | bool_, /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... @overload - def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... @overload - def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __and__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __and__(self, other: int, /) -> np.bool | intp: ... + def __and__(self, other: int, /) -> bool_ | intp: ... __rand__ = __and__ @overload - def __xor__[ItemT: builtins.bool](self: np.bool[L[False]], other: ItemT | np.bool[ItemT], /) -> np.bool[ItemT]: ... + def __xor__[ItemT: builtins.bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... @overload - def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... @overload - def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __xor__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __xor__(self, other: int, /) -> np.bool | intp: ... + def __xor__(self, other: int, /) -> bool_ | intp: ... __rxor__ = __xor__ @overload - def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + def __or__(self: bool_[L[True]], other: builtins.bool | bool_, /) -> bool_[L[True]]: ... @overload - def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... @overload - def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + def __or__(self, other: builtins.bool | bool_, /) -> bool_: ... @overload def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __or__(self, other: int, /) -> np.bool | intp: ... + def __or__(self, other: int, /) -> bool_ | intp: ... __ror__ = __or__ @overload @@ -5487,7 +5486,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __mul__(self, x: _IntLike_co, /) -> Self: ... @overload - def __mul__(self, x: float | np.floating, /) -> timedelta64[_TD64ItemT_co | None]: ... + def __mul__(self, x: float | floating, /) -> timedelta64[_TD64ItemT_co | None]: ... @overload def __mul__(self, x: _FloatLike_co, /) -> timedelta64: ... __rmul__ = __mul__ @@ -6265,4 +6264,4 @@ def from_dlpack( *, device: L["cpu"] | None = None, copy: builtins.bool | None = None, -) -> NDArray[number | np.bool]: ... +) -> NDArray[number | bool_]: ... From d1bd5371258a5391295e901b0adbf5b8c5669c58 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:29:35 +0100 Subject: [PATCH 1018/1718] TYP: alias `builtins.bool` as `py_bool` in `__init__.pyi` --- numpy/__init__.pyi | 425 +++++++++++++++++++++++---------------------- 1 file changed, 213 insertions(+), 212 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index af13ddd20cce..bae953b7194e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5,9 +5,10 @@ import datetime as dt import inspect import sys from abc import abstractmethod -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from builtins import bool as py_bool from decimal import Decimal from fractions import Fraction +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from uuid import UUID from numpy.__config__ import show as show_config @@ -713,7 +714,7 @@ _NBitT1 = TypeVar("_NBitT1", bound=NBitBase, default=Any) # pyright: ignore[rep _NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) -_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) _NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) _InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) _FlexibleItemT_co = TypeVar( @@ -750,7 +751,7 @@ type _ArrayNumeric = NDArray[number | timedelta64 | object_] type _ScalarNotObject = bool_ | number | flexible | datetime64 | timedelta64 type _Float64_co = float | floating[_64Bit] | float32 | float16 | integer | bool_ -type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | bool_ +type _Complex64_co = number[_32Bit] | number[_16Bit] | number[_8Bit] | py_bool | bool_ type _Complex128_co = complex | number[_64Bit] | _Complex64_co type _ToIndex = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None @@ -1061,7 +1062,7 @@ euler_gamma: Final[float] = ... pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -little_endian: Final[builtins.bool] = ... +little_endian: Final[py_bool] = ... False_: Final[bool_[L[False]]] = ... True_: Final[bool_[L[True]]] = ... newaxis: Final[None] = None @@ -1097,8 +1098,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[float64 | ct.c_double] | _Float64Codes | None, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ... ) -> dtype[float64]: ... @@ -1109,8 +1110,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__[ScalarT: generic]( cls, dtype: _DTypeLike[ScalarT], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[ScalarT]: ... @@ -1128,18 +1129,18 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[builtins.bool | bool_ | ct.c_bool] | _BoolCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[bool_]: ... @overload def __new__( cls, - dtype: type[int], # also accepts `type[builtins.bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + dtype: type[int], # also accepts `type[py_bool]` + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[int_ | Any]: ... @@ -1147,8 +1148,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[float], # also accepts `type[int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[float64 | Any]: ... @@ -1156,8 +1157,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[complex], # also accepts `type[float | int | bool]` - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[complex128 | Any]: ... @@ -1165,8 +1166,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[bytes | ct.c_char] | _BytesCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[bytes_]: ... @@ -1174,8 +1175,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[str] | _StrCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[str_]: ... @@ -1189,8 +1190,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[void]: ... @@ -1200,8 +1201,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... @@ -1211,8 +1212,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt8Codes | type[ct.c_uint8], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint8]: ... @@ -1220,8 +1221,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint16]: ... @@ -1229,8 +1230,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32]: ... @@ -1238,8 +1239,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint64]: ... @@ -1247,8 +1248,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uintp]: ... @@ -1256,8 +1257,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _ULongCodes | type[ct.c_ulong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[uint32 | uint64]: ... @@ -1267,8 +1268,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int8Codes | type[ct.c_int8], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int8]: ... @@ -1276,8 +1277,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int16]: ... @@ -1285,8 +1286,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32]: ... @@ -1294,8 +1295,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int64]: ... @@ -1303,8 +1304,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _IntPCodes | type[intp | ct.c_ssize_t], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[intp]: ... @@ -1312,8 +1313,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _LongCodes | type[ct.c_long], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[int32 | int64]: ... @@ -1323,8 +1324,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Float16Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float16]: ... @@ -1332,8 +1333,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Float32Codes | type[ct.c_float], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[float32]: ... @@ -1342,8 +1343,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[longdouble]: ... @@ -1354,8 +1355,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex64Codes | type[ct.c_float_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @@ -1363,8 +1364,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex128Codes | type[ct.c_double_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @@ -1372,8 +1373,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1382,8 +1383,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex64]: ... @@ -1391,8 +1392,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _Complex128Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complex128]: ... @@ -1400,8 +1401,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CLongDoubleCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[clongdouble]: ... @@ -1411,8 +1412,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _TD64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[timedelta64]: ... @@ -1420,8 +1421,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _DT64Codes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[datetime64]: ... @@ -1431,8 +1432,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: dtypes.StringDType | _StringCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtypes.StringDType: ... @@ -1442,8 +1443,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[unsignedinteger]: ... @@ -1451,8 +1452,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _SignedIntegerCodes | _SignedIntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[signedinteger]: ... @@ -1460,8 +1461,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _IntegerCodes | _IntegerCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[integer]: ... @@ -1469,8 +1470,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _FloatingCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[floating]: ... @@ -1478,8 +1479,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _ComplexFloatingCodes, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[complexfloating]: ... @@ -1487,8 +1488,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _InexactCodes | _FloatingCType, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[inexact]: ... @@ -1496,8 +1497,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[character]: ... @@ -1507,8 +1508,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: builtins.str, - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype: ... @@ -1523,8 +1524,8 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 def __new__( cls, dtype: type[object], - align: builtins.bool = False, - copy: builtins.bool = False, + align: py_bool = False, + copy: py_bool = False, *, metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_ | Any]: ... @@ -1552,16 +1553,16 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __rmul__(self, value: SupportsIndex, /) -> dtype: ... - def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... - def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __gt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __ge__(self, other: DTypeLike | None, /) -> py_bool: ... + def __lt__(self, other: DTypeLike | None, /) -> py_bool: ... + def __le__(self, other: DTypeLike | None, /) -> py_bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any, /) -> builtins.bool: ... - def __ne__(self, other: Any, /) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> py_bool: ... + def __ne__(self, other: Any, /) -> py_bool: ... @property def alignment(self) -> int: ... @@ -1578,13 +1579,13 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def flags(self) -> int: ... @property - def hasobject(self) -> builtins.bool: ... + def hasobject(self) -> py_bool: ... @property def isbuiltin(self) -> _DTypeBuiltinKind: ... @property - def isnative(self) -> builtins.bool: ... + def isnative(self) -> py_bool: ... @property - def isalignedstruct(self) -> builtins.bool: ... + def isalignedstruct(self) -> py_bool: ... @property def itemsize(self) -> int: ... @property @@ -1686,7 +1687,7 @@ class _ArrayOrScalarCommon: @property def device(self) -> L["cpu"]: ... - def __bool__(self, /) -> builtins.bool: ... + def __bool__(self, /) -> py_bool: ... def __int__(self, /) -> int: ... def __float__(self, /) -> float: ... def __copy__(self) -> Self: ... @@ -1715,9 +1716,9 @@ class _ArrayOrScalarCommon: self, /, *, - write: builtins.bool | None = None, - align: builtins.bool | None = None, - uic: builtins.bool | None = None, + write: py_bool | None = None, + align: py_bool | None = None, + uic: py_bool | None = None, ) -> None: ... @property @@ -1744,33 +1745,33 @@ class _ArrayOrScalarCommon: kind: _SortKind | None = ..., order: str | Sequence[str] | None = ..., *, - stable: builtins.bool | None = ..., + stable: py_bool | None = ..., ) -> NDArray[intp]: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray def argmax[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False ) -> OutT: ... @overload def argmax[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False ) -> OutT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) - def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: py_bool = False) -> Any: ... @overload # axis=index, out=ndarray def argmin[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None, out: OutT, *, keepdims: py_bool = False ) -> OutT: ... @overload def argmin[OutT: _ArrayInt_co]( - self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: builtins.bool = False + self, /, axis: SupportsIndex | None = None, *, out: OutT, keepdims: py_bool = False ) -> OutT: ... # Keep in sync with `MaskedArray.round` @@ -1838,7 +1839,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1849,7 +1850,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1860,7 +1861,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1872,7 +1873,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1883,7 +1884,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1894,7 +1895,7 @@ class _ArrayOrScalarCommon: axis: _ShapeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1907,7 +1908,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1919,7 +1920,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1931,7 +1932,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1944,7 +1945,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @@ -1956,7 +1957,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1968,7 +1969,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -1980,7 +1981,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, out: None = None, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload @@ -1991,7 +1992,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None, out: ArrayT, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @overload @@ -2002,7 +2003,7 @@ class _ArrayOrScalarCommon: dtype: DTypeLike | None = None, *, out: ArrayT, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -2014,7 +2015,7 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2027,7 +2028,7 @@ class _ArrayOrScalarCommon: out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2040,7 +2041,7 @@ class _ArrayOrScalarCommon: *, out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2054,7 +2055,7 @@ class _ArrayOrScalarCommon: out: None = None, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2067,7 +2068,7 @@ class _ArrayOrScalarCommon: out: ArrayT, ddof: float = 0, *, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2080,7 +2081,7 @@ class _ArrayOrScalarCommon: *, out: ArrayT, ddof: float = 0, - keepdims: builtins.bool | _NoValueType = ..., + keepdims: py_bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeNumber_co | _NoValueType = ..., correction: float | _NoValueType = ..., @@ -2120,9 +2121,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... + def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2149,7 +2150,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., - return_scalar: builtins.bool = ..., + return_scalar: py_bool = ..., /, ) -> ndarray[ShapeT, DTypeT]: ... @@ -2230,7 +2231,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def strides(self, value: _ShapeLike) -> None: ... # - def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + def byteswap(self, inplace: py_bool = ...) -> Self: ... @property def flat(self) -> flatiter[Self]: ... @@ -2259,9 +2260,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload - def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... # keep in sync with `ma.MaskedArray.squeeze` def squeeze( @@ -2435,7 +2436,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, - stable: builtins.bool | None = None, + stable: py_bool | None = None, ) -> None: ... # Keep in sync with `MaskedArray.trace` @@ -2522,7 +2523,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.reshape` # NOTE: reshape also accepts negative integers, so we can't use integer literals @overload # (None) - def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: py_bool | None = None) -> Self: ... @overload # (empty_sequence) def reshape( # mypy false positive self, @@ -2530,7 +2531,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[()], _DTypeT_co]: ... @overload # (() | (int) | (int, int) | ....) # up to 8-d def reshape[ @@ -2551,7 +2552,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[AnyShapeT, _DTypeT_co]: ... @overload # (index) def reshape( @@ -2560,7 +2561,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], _DTypeT_co]: ... @overload # (index, index) def reshape( @@ -2570,7 +2571,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], _DTypeT_co]: ... @overload # (index, index, index) def reshape( @@ -2581,7 +2582,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... @overload # (index, index, index, index) def reshape( @@ -2593,7 +2594,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... @overload # (int, *(index, ...)) def reshape( @@ -2602,7 +2603,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *shape: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( @@ -2611,7 +2612,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload @@ -2620,8 +2621,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): dtype: _DTypeLike[ScalarT], order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... @overload def astype( @@ -2629,8 +2630,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): dtype: DTypeLike | None, order: _OrderKACF = ..., casting: _CastingKind = ..., - subok: builtins.bool = ..., - copy: builtins.bool | _CopyMode = ..., + subok: py_bool = ..., + copy: py_bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype]: ... # @@ -2659,7 +2660,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __complex__(self: NDArray[number | bool_ | object_], /) -> complex: ... def __len__(self) -> int: ... - def __contains__(self, value: object, /) -> builtins.bool: ... + def __contains__(self, value: object, /) -> py_bool: ... # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. @@ -3644,7 +3645,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): stream: int | Any | None = None, max_version: tuple[int, int] | None = None, dl_device: tuple[int, int] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> CapsuleType: ... def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... @@ -3764,9 +3765,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # NOTE: this wont't raise, but won't do anything either @overload - def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, /, *, refcheck: py_bool = True) -> None: ... @overload - def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... # def byteswap(self, /, inplace: L[False] = False) -> Self: ... @@ -3779,8 +3780,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): dtype: _DTypeLike[ScalarT], order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, + subok: py_bool = True, + copy: py_bool | _CopyMode = True, ) -> ScalarT: ... @overload def astype( @@ -3789,8 +3790,8 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): dtype: DTypeLike | None, order: _OrderKACF = "K", casting: _CastingKind = "unsafe", - subok: builtins.bool = True, - copy: builtins.bool | _CopyMode = True, + subok: py_bool = True, + copy: py_bool | _CopyMode = True, ) -> Incomplete: ... # NOTE: `view` will perform a 0D->scalar cast, @@ -3856,7 +3857,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> Self: ... @overload # (ShapeT: (index, ...)) def reshape[ShapeT: tuple[int, *tuple[int, ...]]]( @@ -3865,7 +3866,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[ShapeT, dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( @@ -3874,7 +3875,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> NDArray[Self] | Any: ... @overload # _(index) def reshape( @@ -3883,7 +3884,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int], dtype[Self]]: ... @overload # _(index, index) def reshape( @@ -3893,7 +3894,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int], dtype[Self]]: ... @overload # _(index, index, index) def reshape( @@ -3904,7 +3905,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( @@ -3916,7 +3917,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( @@ -3929,7 +3930,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, *sizes6_: SupportsIndex, order: _OrderACF = "C", - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... @@ -3943,7 +3944,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True ) -> bool_: ... @overload def all[ScalarT: generic]( @@ -3953,7 +3954,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def all[ScalarT: generic]( @@ -3963,7 +3964,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload @@ -3974,7 +3975,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True ) -> bool_: ... @overload def any[ScalarT: generic]( @@ -3984,7 +3985,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... @overload def any[ScalarT: generic]( @@ -3994,7 +3995,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, out: ndarray[tuple[()], dtype[ScalarT]], keepdims: SupportsIndex = False, - where: builtins.bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @@ -4060,13 +4061,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): def imag(self) -> bool_[L[False]]: ... @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 - def __new__(cls, value: Never, /) -> bool_[builtins.bool]: ... + def __new__(cls, value: Never, /) -> bool_[py_bool]: ... @overload def __new__(cls, value: _Falsy = ..., /) -> bool_[L[False]]: ... @overload def __new__(cls, value: _Truthy, /) -> bool_[L[True]]: ... @overload - def __new__(cls, value: object, /) -> bool_[builtins.bool]: ... + def __new__(cls, value: object, /) -> bool_[py_bool]: ... def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -4091,7 +4092,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __add__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __add__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __add__(self, other: int, /) -> int_: ... @overload @@ -4102,7 +4103,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __radd__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __radd__(self, other: builtins.bool, /) -> bool_: ... + def __radd__(self, other: py_bool, /) -> bool_: ... @overload def __radd__(self, other: int, /) -> int_: ... @overload @@ -4131,7 +4132,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __mul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __mul__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __mul__(self, other: int, /) -> int_: ... @overload @@ -4142,7 +4143,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmul__[ScalarT: number](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmul__(self, other: builtins.bool, /) -> bool_: ... + def __rmul__(self, other: py_bool, /) -> bool_: ... @overload def __rmul__(self, other: int, /) -> int_: ... @overload @@ -4153,7 +4154,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __pow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + def __pow__(self, other: py_bool | bool_, mod: None = None, /) -> int8: ... @overload def __pow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4164,7 +4165,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rpow__[ScalarT: number](self, other: ScalarT, mod: None = None, /) -> ScalarT: ... @overload - def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + def __rpow__(self, other: py_bool, mod: None = None, /) -> int8: ... @overload def __rpow__(self, other: int, mod: None = None, /) -> int_: ... @overload @@ -4189,7 +4190,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __floordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + def __floordiv__(self, other: py_bool | bool_, /) -> int8: ... @overload def __floordiv__(self, other: int, /) -> int_: ... @overload @@ -4198,7 +4199,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rfloordiv__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + def __rfloordiv__(self, other: py_bool, /) -> int8: ... @overload def __rfloordiv__(self, other: int, /) -> int_: ... @overload @@ -4208,7 +4209,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __mod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + def __mod__(self, other: py_bool | bool_, /) -> int8: ... @overload def __mod__(self, other: int, /) -> int_: ... @overload @@ -4218,7 +4219,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rmod__(self, other: builtins.bool, /) -> int8: ... + def __rmod__(self, other: py_bool, /) -> int8: ... @overload def __rmod__(self, other: int, /) -> int_: ... @overload @@ -4228,7 +4229,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __divmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + def __divmod__(self, other: py_bool | bool_, /) -> _2Tuple[int8]: ... @overload def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload @@ -4238,7 +4239,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rdivmod__[ScalarT: integer | floating](self, other: ScalarT, /) -> _2Tuple[ScalarT]: ... @overload - def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + def __rdivmod__(self, other: py_bool, /) -> _2Tuple[int8]: ... @overload def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... @overload @@ -4247,14 +4248,14 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __lshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __lshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __lshift__(self, other: int, /) -> int_: ... @overload def __rlshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rlshift__(self, other: builtins.bool, /) -> int8: ... + def __rlshift__(self, other: py_bool, /) -> int8: ... @overload def __rlshift__(self, other: int, /) -> int_: ... @@ -4262,7 +4263,7 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + def __rshift__(self, other: py_bool | bool_, /) -> int8: ... @overload def __rshift__(self, other: int, /) -> int_: ... @@ -4270,18 +4271,18 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __rrshift__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload - def __rrshift__(self, other: builtins.bool, /) -> int8: ... + def __rrshift__(self, other: py_bool, /) -> int8: ... @overload def __rrshift__(self, other: int, /) -> int_: ... @overload - def __and__(self: bool_[L[False]], other: builtins.bool | bool_, /) -> bool_[L[False]]: ... + def __and__(self: bool_[L[False]], other: py_bool | bool_, /) -> bool_[L[False]]: ... @overload def __and__(self, other: L[False] | bool_[L[False]], /) -> bool_[L[False]]: ... @overload def __and__(self, other: L[True] | bool_[L[True]], /) -> Self: ... @overload - def __and__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __and__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __and__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -4289,13 +4290,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): __rand__ = __and__ @overload - def __xor__[ItemT: builtins.bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... + def __xor__[ItemT: py_bool](self: bool_[L[False]], other: ItemT | bool_[ItemT], /) -> bool_[ItemT]: ... @overload def __xor__(self: bool_[L[True]], other: L[True] | bool_[L[True]], /) -> bool_[L[False]]: ... @overload def __xor__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload - def __xor__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __xor__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __xor__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -4303,13 +4304,13 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): __rxor__ = __xor__ @overload - def __or__(self: bool_[L[True]], other: builtins.bool | bool_, /) -> bool_[L[True]]: ... + def __or__(self: bool_[L[True]], other: py_bool | bool_, /) -> bool_[L[True]]: ... @overload def __or__(self, other: L[False] | bool_[L[False]], /) -> Self: ... @overload def __or__(self, other: L[True] | bool_[L[True]], /) -> bool_[L[True]]: ... @overload - def __or__(self, other: builtins.bool | bool_, /) -> bool_: ... + def __or__(self, other: py_bool | bool_, /) -> bool_: ... @overload def __or__[ScalarT: integer](self, other: ScalarT, /) -> ScalarT: ... @overload @@ -5062,7 +5063,7 @@ class floating(_RealMixin, _RoundMixin, inexact[_NBitT, float]): def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes - def is_integer(self, /) -> builtins.bool: ... + def is_integer(self, /) -> py_bool: ... def as_integer_ratio(self, /) -> tuple[int, int]: ... float16 = floating[_16Bit] @@ -5899,7 +5900,7 @@ class ufunc: *, signature: tuple[dtype | None, ...] | None = None, casting: _CastingKind | None = None, - reduction: builtins.bool = False, + reduction: py_bool = False, ) -> tuple[dtype, ...]: ... # Parameters: `__name__`, `ntypes` and `identity` @@ -6101,24 +6102,24 @@ class nditer: def copy(self) -> nditer: ... def debug_print(self) -> None: ... def enable_external_loop(self) -> None: ... - def iternext(self) -> builtins.bool: ... + def iternext(self) -> py_bool: ... def remove_axis(self, i: SupportsIndex, /) -> None: ... def remove_multi_index(self) -> None: ... def reset(self) -> None: ... @property def dtypes(self) -> tuple[dtype, ...]: ... @property - def finished(self) -> builtins.bool: ... + def finished(self) -> py_bool: ... @property - def has_delayed_bufalloc(self) -> builtins.bool: ... + def has_delayed_bufalloc(self) -> py_bool: ... @property - def has_index(self) -> builtins.bool: ... + def has_index(self) -> py_bool: ... @property - def has_multi_index(self) -> builtins.bool: ... + def has_multi_index(self) -> py_bool: ... @property def index(self) -> int: ... @property - def iterationneedsapi(self) -> builtins.bool: ... + def iterationneedsapi(self) -> py_bool: ... @property def iterindex(self) -> int: ... @property @@ -6180,7 +6181,7 @@ class memmap(ndarray[_ShapeT_co, _DTypeT_co]): self, array: memmap[_ShapeT_co, _DTypeT_co], context: tuple[ufunc, tuple[Any, ...], int] | None = None, - return_scalar: builtins.bool = False, + return_scalar: py_bool = False, ) -> Any: ... def flush(self) -> None: ... @@ -6219,9 +6220,9 @@ class poly1d: __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @overload - def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int]]: ... + def __array__(self, /, t: None = None, copy: py_bool | None = None) -> ndarray[tuple[int]]: ... @overload - def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], DTypeT]: ... + def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: py_bool | None = None) -> ndarray[tuple[int], DTypeT]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... @@ -6233,7 +6234,7 @@ class poly1d: def __init__( self, c_or_r: ArrayLike, - r: builtins.bool = False, + r: py_bool = False, variable: str | None = None, ) -> None: ... def __len__(self) -> int: ... @@ -6263,5 +6264,5 @@ def from_dlpack( /, *, device: L["cpu"] | None = None, - copy: builtins.bool | None = None, + copy: py_bool | None = None, ) -> NDArray[number | bool_]: ... From 81571e7a88d35f0ac4cc179258ecfa5675223ed6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 13 Dec 2025 23:31:38 +0100 Subject: [PATCH 1019/1718] TYP: prefer `str` over `builtins.str` in `__init__.pyi` --- numpy/__init__.pyi | 83 +++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bae953b7194e..4128dd3cb9ec 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,5 +1,4 @@ # ruff: noqa: I001 -import builtins import ctypes as ct import datetime as dt import inspect @@ -1090,7 +1089,7 @@ class _DTypeMeta(type): @final class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 - names: tuple[builtins.str, ...] | None + names: tuple[str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @@ -1101,7 +1100,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ... + metadata: dict[str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a @@ -1113,7 +1112,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[ScalarT]: ... # Builtin types @@ -1215,7 +1214,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( @@ -1224,7 +1223,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( @@ -1233,7 +1232,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( @@ -1242,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( @@ -1251,7 +1250,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( @@ -1260,7 +1259,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @@ -1271,7 +1270,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( @@ -1280,7 +1279,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( @@ -1289,7 +1288,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( @@ -1298,7 +1297,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( @@ -1307,7 +1306,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( @@ -1316,7 +1315,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @@ -1327,7 +1326,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( @@ -1336,7 +1335,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @overload @@ -1346,7 +1345,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[longdouble]: ... # `complexfloating` string-based representations and ctypes @@ -1358,7 +1357,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( @@ -1367,7 +1366,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( @@ -1376,7 +1375,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[clongdouble]: ... else: @overload @@ -1386,7 +1385,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( @@ -1395,7 +1394,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( @@ -1404,7 +1403,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @@ -1415,7 +1414,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[timedelta64]: ... @overload def __new__( @@ -1424,7 +1423,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @@ -1435,7 +1434,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1446,7 +1445,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( @@ -1455,7 +1454,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( @@ -1464,7 +1463,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( @@ -1473,7 +1472,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( @@ -1482,7 +1481,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( @@ -1491,12 +1490,12 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], + dtype: _CharacterCodes | type[bytes | str | ct.c_char], align: py_bool = False, copy: py_bool = False, *, @@ -1507,11 +1506,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: builtins.str, + dtype: str, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype: ... # Catch-all overload for object-likes @@ -1527,15 +1526,15 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[object_ | Any]: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + def __getitem__(self: dtype[void], key: str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload @@ -1591,7 +1590,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + def metadata(self) -> MappingProxyType[str, Any] | None: ... @property def name(self) -> LiteralString: ... @property @@ -1726,7 +1725,7 @@ class _ArrayOrScalarCommon: @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_struct__(self) -> CapsuleType: ... def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version From 510fea4b61d9ee369f4f33bf5dfe750ca8bd418a Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 00:31:16 +0100 Subject: [PATCH 1020/1718] TYP: ``linalg.diagonal``: shape-typing and transparent dtypes --- numpy/linalg/_linalg.pyi | 47 +++++++++++++++++++---- numpy/typing/tests/data/reveal/linalg.pyi | 14 +++++++ 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 38926e84d0a6..766c8f2468a5 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -5,9 +5,11 @@ from typing import ( Literal as L, NamedTuple, Never, + Protocol, SupportsIndex, SupportsInt, overload, + type_check_only, ) from typing_extensions import TypeVar @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -38,6 +41,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _NestedSequence, + _Shape, _ShapeLike, ) from numpy.linalg import LinAlgError @@ -113,6 +117,11 @@ _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArr _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) _InexactOrArrayT_co = TypeVar("_InexactOrArrayT_co", bound=np.inexact | NDArray[np.inexact], default=Any, covariant=True) +# shape-typed variant of numpy._typing._SupportsArray +@type_check_only +class _SupportsArray[ShapeT: _Shape, DTypeT: np.dtype](Protocol): + def __array__(self, /) -> np.ndarray[ShapeT, DTypeT]: ... + ### fortran_int = np.intc @@ -654,13 +663,37 @@ def multi_dot( out: NDArray[Any] | None = None, ) -> Any: ... -# TODO: narrow return types -def diagonal( - x: ArrayLike, # >= 2D array - /, - *, - offset: SupportsIndex = 0, -) -> NDArray[Any]: ... +# +@overload # workaround for microsoft/pyright#10232 +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[_JustAnyShape, DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 2d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 4d, known dtype +def diagonal[DTypeT: np.dtype]( + x: _SupportsArray[tuple[int, int, int, int], DTypeT], /, *, offset: SupportsIndex = 0 +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # nd like ~bool +def diagonal(x: _NestedSequence[list[bool]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bool]: ... +@overload # nd like ~int +def diagonal(x: _NestedSequence[list[int]], /, *, offset: SupportsIndex = 0) -> NDArray[np.int_]: ... +@overload # nd like ~float +def diagonal(x: _NestedSequence[list[float]], /, *, offset: SupportsIndex = 0) -> NDArray[np.float64]: ... +@overload # nd like ~complex +def diagonal(x: _NestedSequence[list[complex]], /, *, offset: SupportsIndex = 0) -> NDArray[np.complex128]: ... +@overload # nd like ~bytes +def diagonal(x: _NestedSequence[list[bytes]], /, *, offset: SupportsIndex = 0) -> NDArray[np.bytes_]: ... +@overload # nd like ~str +def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> NDArray[np.str_]: ... +@overload # fallback +def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... # TODO: narrow return types def trace( diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index a40aa84d9c05..744f1edbaefa 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,6 +10,7 @@ from numpy.linalg._linalg import ( SVDResult, ) +bool_list_2d: list[list[bool]] int_list_2d: list[list[int]] float_list_1d: list[float] float_list_2d: list[list[float]] @@ -17,6 +18,8 @@ float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] complex_list_2d: list[list[complex]] complex_list_3d: list[list[list[complex]]] +bytes_list_2d: list[list[bytes]] +str_list_2d: list[list[str]] AR_any: np.ndarray AR_f_: npt.NDArray[np.floating] @@ -234,6 +237,17 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) +assert_type(np.linalg.diagonal(AR_any), np.ndarray) +assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) +assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) +assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) From f8082890fb032b0a77d8dbc3f0a90f7956785210 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 00:36:09 +0100 Subject: [PATCH 1021/1718] TYP: disable type-test on mypy due to a bug --- numpy/typing/tests/data/reveal/linalg.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 744f1edbaefa..194c525871d5 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -237,7 +237,8 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) -assert_type(np.linalg.diagonal(AR_any), np.ndarray) +# Mypy incorrectly inferts `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) From 29ad5a11a798150d03d801ccf9e6e75e499c246f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 14 Dec 2025 14:29:00 +0100 Subject: [PATCH 1022/1718] TYP: ``linalg.trace``: shape-typing and transparent dtypes --- numpy/linalg/_linalg.pyi | 71 +++++++++++++++++++++-- numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++ 2 files changed, 80 insertions(+), 4 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 766c8f2468a5..4e0d075dc181 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -40,6 +40,8 @@ from numpy._typing import ( _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _ComplexLike_co, + _DTypeLike, _NestedSequence, _Shape, _ShapeLike, @@ -82,6 +84,7 @@ __all__ = [ ] type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast2D = tuple[int, int, *tuple[int, ...]] type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 @@ -91,6 +94,7 @@ type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 type _to_float64 = np.float64 | np.integer | np.bool type _to_inexact64 = np.complex128 | _to_float64 +type _to_complex = np.number | np.bool type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] @@ -695,14 +699,73 @@ def diagonal(x: _NestedSequence[list[str]], /, *, offset: SupportsIndex = 0) -> @overload # fallback def diagonal(x: ArrayLike, /, *, offset: SupportsIndex = 0) -> np.ndarray: ... -# TODO: narrow return types +# +@overload # workaround for microsoft/pyright#10232 def trace( - x: ArrayLike, # >= 2D array + x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None +) -> Any: ... +@overload # 2d known dtype, dtype=None +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[ScalarT]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> ScalarT: ... +@overload # 2d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int], np.dtype[_to_complex]] | Sequence[Sequence[_ComplexLike_co]], /, *, offset: SupportsIndex = 0, - dtype: DTypeLike | None = None, -) -> Any: ... + dtype: _DTypeLike[ScalarT], +) -> ScalarT: ... +@overload # 2d bool +def trace(x: Sequence[Sequence[bool]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +@overload # 2d int +def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... +@overload # 2d float +def trace(x: Sequence[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.float64: ... +@overload # 2d complex +def trace(x: Sequence[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.complex128: ... +@overload # 3d known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[tuple[int, int, int], DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int], DTypeT]: ... +@overload # 3d, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[tuple[int, int, int], np.dtype[_to_complex]] | Sequence[Sequence[Sequence[_ComplexLike_co]]], + /, + *, + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # 3d+ known dtype, dtype=None +def trace[DTypeT: np.dtype[_to_complex]]( + x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... +@overload # 3d+, dtype= +def trace[ScalarT: _to_complex]( + x: _SupportsArray[_AtLeast3D, np.dtype[_to_complex]] | _NestedSequence[Sequence[Sequence[_ComplexLike_co]]], + /, + *, + offset: SupportsIndex = 0, + dtype: _DTypeLike[ScalarT], +) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... +@overload # 3d+ bool +def trace( + x: _NestedSequence[Sequence[Sequence[bool]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.bool]: ... +@overload # 3d+ int +def trace( + x: _NestedSequence[Sequence[list[int]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.int_]: ... +@overload # 3d+ float +def trace( + x: _NestedSequence[Sequence[list[float]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.float64]: ... +@overload # 3d+ complex +def trace( + x: _NestedSequence[Sequence[list[complex]]], /, *, offset: SupportsIndex = 0, dtype: None = None +) -> NDArray[np.complex128]: ... +@overload # fallback +def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 194c525871d5..e782b8e24281 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -249,6 +249,19 @@ assert_type(np.linalg.diagonal(complex_list_2d), npt.NDArray[np.complex128]) assert_type(np.linalg.diagonal(bytes_list_2d), npt.NDArray[np.bytes_]) assert_type(np.linalg.diagonal(str_list_2d), npt.NDArray[np.str_]) +assert_type(np.linalg.trace(AR_any), Any) +assert_type(np.linalg.trace(AR_f4), Any) +assert_type(np.linalg.trace(AR_f4_2d), np.float32) +assert_type(np.linalg.trace(AR_f8_2d), np.float64) +assert_type(np.linalg.trace(AR_f4_3d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.linalg.trace(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) +assert_type(np.linalg.trace(bool_list_2d), np.bool) +assert_type(np.linalg.trace(int_list_2d), np.int_) +assert_type(np.linalg.trace(float_list_2d), np.float64) +assert_type(np.linalg.trace(complex_list_2d), np.complex128) +assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) From b70fc2ef39e007ea6a5393a3dc12c2e1a46b189c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 14 Dec 2025 14:35:17 +0100 Subject: [PATCH 1023/1718] TYP: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- numpy/typing/tests/data/reveal/linalg.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index e782b8e24281..81f8ec7c61d9 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -237,7 +237,7 @@ assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) -# Mypy incorrectly inferts `ndarray[Any, Any]`, but pyright behaves correctly. +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) From ffd9fd28574028ab1fa323c596f9ed7eddf12da2 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Mon, 15 Dec 2025 01:02:24 +0530 Subject: [PATCH 1024/1718] fix more data races in mtrand.pyx (#30426) --- numpy/random/mtrand.pyx | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index ae1706eb884b..c69284d0df9a 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -222,12 +222,13 @@ cdef class RandomState: "be instantized.") self._bitgen = ( PyCapsule_GetPointer(capsule, name))[0] self._aug_state.bit_generator = &self._bitgen - self._reset_gauss() self.lock = bit_generator.lock + self._reset_gauss() cdef _reset_gauss(self): - self._aug_state.has_gauss = 0 - self._aug_state.gauss = 0.0 + with self.lock: + self._aug_state.has_gauss = 0 + self._aug_state.gauss = 0.0 def seed(self, seed=None): """ @@ -301,8 +302,9 @@ cdef class RandomState: 'MT19937 BitGenerator. To silence this warning, ' 'set `legacy` to False.', RuntimeWarning) legacy = False - st['has_gauss'] = self._aug_state.has_gauss - st['gauss'] = self._aug_state.gauss + with self.lock: + st['has_gauss'] = self._aug_state.has_gauss + st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( "legacy can only be True when the underlying bitgenerator is " From 072a8ee00f87a216517accfc480d73168561c79d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 17:21:21 +0000 Subject: [PATCH 1025/1718] MAINT: Bump actions/upload-artifact from 5.0.0 to 6.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 5.0.0 to 6.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/330a01c490aca151604b8cf639adc76d48f6c5d4...b7c566a772e6b6bfb58ed0dc250532a479d7789f) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index e8f7083336e6..a8bef06a5f5c 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -67,7 +67,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 040a154d2895..b0b57cb93d7b 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -74,7 +74,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -82,7 +82,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -96,7 +96,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 140d92d43e61..421790b4ae1e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 436e929fe636..27ee88d80677 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -102,7 +102,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From 908219cd6dd75c0650e97657a54f61a17428be8c Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 15 Dec 2025 11:12:14 -0700 Subject: [PATCH 1026/1718] MAINT: Update msvc-allowed-warnings.txt Line numbers have changed in numpy/random/_generator.pyx --- .github/check-warnings/msvc-allowed-warnings.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index fa5597b18216..4cc6e6ab2124 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -15,7 +15,7 @@ C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26290): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38314): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data From 3a84feb5731514fd8343543f24b171f581b3a5c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 12:33:17 -0700 Subject: [PATCH 1027/1718] MAINT: Bump astral-sh/setup-uv from 7.1.5 to 7.1.6 (#30436) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.5 to 7.1.6. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/ed21f2f24f8dd64503750218de024bcf64c7250a...681c641aba71e4a1c380be3ab5e12ad51f415867) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.1.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index ac87ccacd846..41f421ce3889 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index c91954403b14..526471f799c7 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@ed21f2f24f8dd64503750218de024bcf64c7250a # v7.1.5 + - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 with: python-version: ${{ matrix.py }} activate-environment: true From e2649ab111ffa634f2314ed37809e7361db5c1fb Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:18:02 +0100 Subject: [PATCH 1028/1718] TYP: ``linalg.lstsq``: shape-typing and transparent dtypes (#30433) --- numpy/linalg/_linalg.pyi | 87 ++++++++++++++++------- numpy/typing/tests/data/reveal/linalg.pyi | 59 ++++++++++++++- 2 files changed, 116 insertions(+), 30 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 4e0d075dc181..c52e8e9b0f44 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -18,7 +18,6 @@ from numpy import ( complexfloating, float64, floating, - int32, object_, signedinteger, timedelta64, @@ -116,6 +115,13 @@ type _SideKind = L["L", "U", "l", "u"] type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] +type _LstSqResult[ShapeT: tuple[int, ...], InexactT: np.inexact, FloatingT: np.floating] = tuple[ + np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution + np.ndarray[tuple[int], np.dtype[FloatingT]], # residuals + np.int32, # rank + np.ndarray[tuple[int], np.dtype[FloatingT]], # singular values +] + _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) _FloatingOrArrayT_co = TypeVar("_FloatingOrArrayT_co", bound=np.floating | NDArray[np.floating], default=Any, covariant=True) _InexactT_co = TypeVar("_InexactT_co", bound=np.inexact, default=Any, covariant=True) @@ -527,34 +533,61 @@ def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) @overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... -# TODO: narrow return types -@overload -def lstsq( - a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None -) -> tuple[ - NDArray[float64], - NDArray[float64], - int32, - NDArray[float64], -]: ... -@overload +# +@overload # +float64, ~float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[_to_float64]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[np.floating | np.integer]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # ~float64, +float64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.floating | np.integer]] | Sequence[Sequence[float]], + b: _SupportsArray[ShapeT, np.dtype[_to_float64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... +@overload # +complex128, ~complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.number]] | Sequence[Sequence[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.complex128]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~complex128, +complex128, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex128]] | Sequence[list[complex]], + b: _SupportsArray[ShapeT, np.dtype[np.number]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... +@overload # ~float32, ~float32, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... +@overload # +complex64, ~complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64 | np.float32]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # ~complex64, +complex64, known shape +def lstsq[ShapeT: tuple[int] | tuple[int, int]]( + a: _SupportsArray[tuple[int, int], np.dtype[np.complex64]], + b: _SupportsArray[ShapeT, np.dtype[np.complex64 | np.float32]], + rcond: float | None = None, +) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... +@overload # +float64, +float64, unknown shape def lstsq( - a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None -) -> tuple[ - NDArray[floating], - NDArray[floating], - int32, - NDArray[floating], -]: ... -@overload + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... +@overload # +complex128, +complex128, unknown shape def lstsq( - a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None -) -> tuple[ - NDArray[complexfloating], - NDArray[floating], - int32, - NDArray[floating], -]: ... + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + rcond: float | None = None, +) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 81f8ec7c61d9..1e8098939d43 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -10,6 +10,9 @@ from numpy.linalg._linalg import ( SVDResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + bool_list_2d: list[list[bool]] int_list_2d: list[list[int]] float_list_1d: list[float] @@ -41,6 +44,7 @@ AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] @@ -204,9 +208,58 @@ assert_type(np.linalg.det(AR_f8_3d), npt.NDArray[np.float64]) assert_type(np.linalg.det(complex_list_2d), np.complex128) assert_type(np.linalg.det(complex_list_3d), npt.NDArray[np.complex128]) -assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) -assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) -assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type( + np.linalg.lstsq(AR_i8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_i8, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_i8), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f4), + tuple[npt.NDArray[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c8), + tuple[npt.NDArray[np.complex64], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_c8, AR_c16), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_c16, AR_c8), + tuple[npt.NDArray[np.complex128], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_1d), + tuple[_Array1D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_1d), + tuple[_Array1D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) +assert_type( + np.linalg.lstsq(AR_f8, AR_f8_2d), + tuple[_Array2D[np.float64], _Array1D[np.float64], np.int32, _Array1D[np.float64]], +) +assert_type( + np.linalg.lstsq(AR_f4, AR_f4_2d), + tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], +) assert_type(np.linalg.norm(AR_i8), np.floating) assert_type(np.linalg.norm(AR_f8), np.floating) From 0c0d0970227c0e1c26b8279599b6958313688e1b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:19:17 +0100 Subject: [PATCH 1029/1718] TYP: ``linalg.solve`` and ``tensorsolve``: improved return dtype specialization (#30434) --- numpy/linalg/_linalg.pyi | 75 ++++++++++++----------- numpy/typing/tests/data/reveal/linalg.pyi | 14 +++-- 2 files changed, 49 insertions(+), 40 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index c52e8e9b0f44..af876d911826 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -159,42 +159,45 @@ class SlogdetResult(NamedTuple, Generic[_FloatingOrArrayT_co, _InexactOrArrayT_c sign: _FloatingOrArrayT_co logabsdet: _InexactOrArrayT_co -# TODO: narrow return types -@overload -def tensorsolve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - axes: Iterable[int] | None = None, -) -> NDArray[float64]: ... -@overload -def tensorsolve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - axes: Iterable[int] | None = None, -) -> NDArray[floating]: ... -@overload -def tensorsolve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - axes: Iterable[int] | None = None, -) -> NDArray[complexfloating]: ... - -# TODO: narrow return types -@overload -def solve( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, -) -> NDArray[float64]: ... -@overload -def solve( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, -) -> NDArray[floating]: ... -@overload -def solve( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, -) -> NDArray[complexfloating]: ... +# keep in sync with `solve` +@overload # ~float64, +float64 +def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def tensorsolve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None) -> NDArray[np.float32]: ... +@overload # +float, +float +def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def tensorsolve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def tensorsolve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def tensorsolve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128 | Any]: ... + +# keep in sync with `tensorsolve` +@overload # ~float64, +float64 +def solve(a: _ToArrayF64, b: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 +def solve(a: _ArrayLikeFloat_co, b: _ToArrayF64) -> NDArray[np.float64]: ... +@overload # ~float32, ~float32 +def solve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32]) -> NDArray[np.float32]: ... +@overload # +float, +float +def solve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.float64 | Any]: ... +@overload # ~complex128, +complex128 +def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... +@overload # ~complex64, +complex64 +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32]) -> NDArray[np.complex64]: ... +@overload # +complex64, ~complex64 +def solve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +@overload # +complex, +complex +def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... # keep in sync with the other inverse functions and cholesky @overload # inexact32 diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 1e8098939d43..7bafc58789ff 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -51,12 +51,18 @@ AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] ### assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensorsolve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.tensorsolve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) -assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_f4, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.solve(AR_c8, AR_f4), npt.NDArray[np.complex64]) +assert_type(np.linalg.solve(AR_f4, AR_c8), npt.NDArray[np.complex64]) assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.float64]) From f159cb3c2dc0c4713a46d7009b569451e30e279a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 16:21:28 +0100 Subject: [PATCH 1030/1718] MAINT: bump mypy from 1.19.0 to 1.19.1 (#30444) --- environment.yml | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 307dd4631012..774d6c0209ac 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.19.0 + - mypy=1.19.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 3094d4eb8470..e3b17f0fc856 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -7,7 +7,7 @@ ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout # For testing types -mypy==1.19.0 +mypy==1.19.1 # for optional f2py encoding detection charset-normalizer tzdata From 15f239479148e6c9e274afda01f9f3d49f9bd2e3 Mon Sep 17 00:00:00 2001 From: Justin Kunimune Date: Tue, 16 Dec 2025 11:50:33 -0500 Subject: [PATCH 1031/1718] DOC: Update a docstring in _npyio_impl.py The docstring for `genfromtxt` didn't make it clear when the output would be a structured array. It kind of made it sound like it was only if when `names` is a sequence or a string, but in reality it's whenever `dtype` is structured _or_ `names` is not None. Now it is stated how the structured outputs behave and when the output is and is not structured. --- numpy/lib/_npyio_impl.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0e135917cd52..d68b0e538236 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1758,8 +1758,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, in a list or produced by a generator are treated as lines. dtype : dtype, optional Data type of the resulting array. - If None, the dtypes will be determined by the contents of each - column, individually. + If a structured dtype, the output array will be 1D and structured where + each field corresponds to one column. If None, the output array will be + structured, and the dtype of each field will be determined by the + contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded. @@ -1788,13 +1790,15 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first line after - the first `skip_header` lines. This line can optionally be preceded - by a comment delimiter. Any content before the comment delimiter is - discarded. If `names` is a sequence or a single-string of - comma-separated names, the names will be used to define the field - names in a structured dtype. If `names` is None, the names of the - dtype fields will be used, if any. + If `names` is True, the output will be a structured array whose field + names are read from the first line after the first `skip_header` lines. + This line can optionally be preceded by a comment delimiter. Any content + before the comment delimiter is discarded. + If `names` is a sequence or a single-string of comma-separated names, + the output is a structured array whose field names are taken from + `names`. + If `names` is None, the output is structured only if `dtype` is + structured, in which case the field names are taken from `dtype`. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended with an From e934bb97b7c6f07c61fde2e11631c63700014bda Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 19:55:15 +0100 Subject: [PATCH 1032/1718] TYP: ``linalg.outer``: shape-typing and transparent dtypes (#30443) --- numpy/linalg/_linalg.pyi | 95 ++++++++++++++--------- numpy/typing/tests/data/reveal/linalg.pyi | 22 ++++-- 2 files changed, 71 insertions(+), 46 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index af876d911826..cd2653009e89 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -14,16 +14,7 @@ from typing import ( from typing_extensions import TypeVar import numpy as np -from numpy import ( - complexfloating, - float64, - floating, - object_, - signedinteger, - timedelta64, - unsignedinteger, - vecdot, -) +from numpy import complexfloating, floating, signedinteger, unsignedinteger, vecdot from numpy._core.fromnumeric import matrix_transpose from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( @@ -91,13 +82,17 @@ type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] type _inexact32 = np.float32 | np.complex64 -type _to_float64 = np.float64 | np.integer | np.bool +type _to_integer = np.integer | np.bool +type _to_timedelta64 = np.timedelta64 | _to_integer +type _to_float64 = np.float64 | _to_integer type _to_inexact64 = np.complex128 | _to_float64 type _to_complex = np.number | np.bool type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] +type _ToArray1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] + # anything that safe-casts (from floating) into float64/complex128 type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] @@ -280,32 +275,6 @@ def matrix_power[ScalarT: _inexact32](a: _ArrayLike[ScalarT], n: SupportsIndex) @overload # fallback def matrix_power(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, n: SupportsIndex) -> np.ndarray: ... -# TODO: narrow return types -@overload -def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... -@overload -def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... -@overload -def outer[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... -@overload -def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... -@overload -def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... -@overload -def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... -@overload -def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... -@overload -def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... -@overload -def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... -@overload -def outer( - x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, - /, -) -> NDArray[Any]: ... - # NOTE: for real input the output dtype (floating/complexfloating) depends on the specific values @overload # abstract `inexact` and `floating` (excluding concrete types) def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... @@ -670,7 +639,7 @@ def tensordot( /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2, -) -> NDArray[np.bool_]: ... +) -> NDArray[np.bool]: ... @overload def tensordot( a: _ArrayLikeInt_co, @@ -803,6 +772,56 @@ def trace( @overload # fallback def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... +# +@overload # workaround for microsoft/pyright#10232 +def outer(x1: _ToArray1D[Never], x2: _ToArray1D[Never], /) -> np.ndarray[tuple[int, int]]: ... +@overload # +bool, +bool +def outer( + x1: _ToArray1D[np.bool] | Sequence[bool], x2: _ToArray1D[np.bool] | Sequence[bool], / +) -> np.ndarray[tuple[int, int], np.dtype[np.bool]]: ... +@overload # ~int64, +int64 +def outer( + x1: _ToArray1D[np.int64] | list[int], x2: _ToArray1D[_to_integer] | Sequence[int], / +) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +@overload # +int64, ~int64 +def outer( + x1: _ToArray1D[_to_integer] | Sequence[int], x2: _ToArray1D[np.int64] | list[int], / +) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +@overload # ~timedelta64, +timedelta64 +def outer( + x1: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], x2: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], / +) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +@overload # +timedelta64, ~timedelta64 +def outer( + x1: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], x2: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], / +) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +@overload # ~float64, +float64 +def outer( + x1: _ToArray1D[np.float64] | list[float], x2: _ToArray1D[_to_float64] | Sequence[float], / +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # +float64, ~float64 +def outer( + x1: _ToArray1D[_to_float64] | Sequence[float], x2: _ToArray1D[np.float64] | list[float], / +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ~complex128, +complex128 +def outer( + x1: _ToArray1D[np.complex128] | list[complex], x2: _ToArray1D[_to_complex] | Sequence[complex], / +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # +complex128, ~complex128 +def outer( + x1: _ToArray1D[_to_complex] | Sequence[complex], x2: _ToArray1D[np.complex128] | list[complex], / +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ~ScalarT, ~ScalarT +def outer[ScalarT: np.number | np.object_]( + x1: _ToArray1D[ScalarT] | Sequence[ScalarT], x2: _ToArray1D[ScalarT] | Sequence[ScalarT], / +) -> np.ndarray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload # fallback +def outer( + x1: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], + x2: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], + /, +) -> np.ndarray[tuple[int, int]]: ... + # TODO: narrow return types @overload def cross( diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 7bafc58789ff..ede41f2862c2 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -13,12 +13,15 @@ from numpy.linalg._linalg import ( type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +bool_list_1d: list[bool] bool_list_2d: list[list[bool]] +int_list_1d: list[int] int_list_2d: list[list[int]] float_list_1d: list[float] float_list_2d: list[list[float]] float_list_3d: list[list[list[float]]] float_list_4d: list[list[list[list[float]]]] +complex_list_1d: list[complex] complex_list_2d: list[list[complex]] complex_list_3d: list[list[list[complex]]] bytes_list_2d: list[list[bytes]] @@ -86,14 +89,6 @@ assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) -assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) -# NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. -assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] - assert_type(np.linalg.qr(AR_i8), QRResult[np.float64]) assert_type(np.linalg.qr(AR_i8, "r"), npt.NDArray[np.float64]) assert_type(np.linalg.qr(AR_i8, "raw"), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) @@ -321,6 +316,17 @@ assert_type(np.linalg.trace(float_list_2d), np.float64) assert_type(np.linalg.trace(complex_list_2d), np.complex128) assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.linalg.outer(int_list_1d, int_list_1d), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.linalg.outer(float_list_1d, float_list_1d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.linalg.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.linalg.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.linalg.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.linalg.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.linalg.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.linalg.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) + assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) From a561ff88efc23d0fa28a49f10384729fed2efb05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 12:35:46 -0700 Subject: [PATCH 1033/1718] MAINT: Bump egor-tensin/cleanup-path from 4.0.1 to 4.0.2 (#30448) Bumps [egor-tensin/cleanup-path](https://github.com/egor-tensin/cleanup-path) from 4.0.1 to 4.0.2. - [Release notes](https://github.com/egor-tensin/cleanup-path/releases) - [Commits](https://github.com/egor-tensin/cleanup-path/compare/f04bc953e6823bf491cc0bdcff959c630db1b458...64ef0b5036b30ce7845058a1d7a8d0830db39b94) --- updated-dependencies: - dependency-name: egor-tensin/cleanup-path dependency-version: 4.0.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cygwin.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index a8bef06a5f5c..4a2ea1308422 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -37,7 +37,7 @@ jobs: python-setuptools-wheel liblapack-devel liblapack0 gcc-fortran gcc-g++ git dash cmake ninja - name: Set Windows PATH - uses: egor-tensin/cleanup-path@f04bc953e6823bf491cc0bdcff959c630db1b458 # v4.0.1 + uses: egor-tensin/cleanup-path@64ef0b5036b30ce7845058a1d7a8d0830db39b94 # v4.0.2 with: dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack' - name: Verify that bash is Cygwin bash From f0abef4e11092355bb1f389983016f5326f15f21 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 16 Dec 2025 21:27:44 +0100 Subject: [PATCH 1034/1718] TYP: ``linalg.*norm``: shape-typing and transparent dtypes (#30446) * TYP: ``linalg.norm``: shape-typing and transparent dtypes * TYP: ``linalg.matrix_norm``: shape-typing and transparent dtypes * TYP: ``linalg.vector_norm``: shape-typing and transparent dtypes --- numpy/linalg/_linalg.pyi | 319 +++++++++++++++++++--- numpy/typing/tests/data/reveal/linalg.pyi | 102 +++++-- 2 files changed, 368 insertions(+), 53 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index cd2653009e89..010a378ea2a7 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -7,7 +7,6 @@ from typing import ( Never, Protocol, SupportsIndex, - SupportsInt, overload, type_check_only, ) @@ -80,17 +79,30 @@ type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] type _JustAnyShape = tuple[Never, ...] # workaround for microsoft/pyright#10232 type _tuple2[T] = tuple[T, T] +type _Ax2 = SupportsIndex | _tuple2[SupportsIndex] type _inexact32 = np.float32 | np.complex64 +type _inexact80 = np.longdouble | np.clongdouble type _to_integer = np.integer | np.bool type _to_timedelta64 = np.timedelta64 | _to_integer type _to_float64 = np.float64 | _to_integer type _to_inexact64 = np.complex128 | _to_float64 +type _to_inexact64_unsafe = _to_inexact64 | np.datetime64 | np.timedelta64 | np.character type _to_complex = np.number | np.bool type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] +# ==2d +type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | Sequence[Sequence[ScalarT]] +# >=2d +type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _NestedSequence[Sequence[ScalarT]] +# >=3d +type _ArrayLike3ND[ScalarT: np.generic] = ( + _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] + | _NestedSequence[Sequence[Sequence[ScalarT]]] +) + type _ToArray1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] # anything that safe-casts (from floating) into float64/complex128 @@ -561,67 +573,300 @@ def lstsq( rcond: float | None = None, ) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... -# TODO: narrow return types -@overload +# NOTE: This assumes that `axis` is only passed if `x` is >1d, and that `keepdims` is never passed positionally. +# keep in sync with `vector_norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None = None, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False, -) -> floating: ... -@overload +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis= (positional), keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None, - axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, - keepdims: bool = False, -) -> Any: ... -@overload + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None, + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), axis= (keyword), keepdims=False def norm( - x: ArrayLike, - ord: float | L["fro", "nuc"] | None = None, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, - keepdims: bool = False, -) -> Any: ... + axis: _Ax2, + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True +def norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + ord: _OrderKind | None = None, + axis: _Ax2 | None = None, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float16: ... +@overload # ~float16, axis= (positional), keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def norm( + x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.float32: ... +@overload # ~inexact32, axis= (positional), keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, axis= (positional), keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, axis= (keyword), keepdims=False +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def norm( + x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: _Ax2 | None = None, *, keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def norm(x: ArrayLike, ord: _OrderKind | None = None, axis: _Ax2 | None = None, keepdims: bool = False) -> Any: ... -# TODO: narrow return types -@overload +# +@overload # +inexact64 (unsafe casting), ?d, keepdims=False def matrix_norm( - x: ArrayLike, + x: _SupportsArray[_JustAnyShape, np.dtype[_to_inexact64_unsafe]], /, *, - ord: float | L["fro", "nuc"] | None = "fro", + ord: _OrderKind | None = "fro", keepdims: L[False] = False, -) -> floating: ... -@overload +) -> NDArray[np.float64] | Any: ... +@overload # +inexact64 (unsafe casting), 2d, keepdims=False def matrix_norm( - x: ArrayLike, + x: _ArrayLike2D[_to_inexact64_unsafe] | Sequence[Sequence[complex]], /, *, - ord: float | L["fro", "nuc"] | None = "fro", - keepdims: bool = False, -) -> Any: ... + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_to_inexact64_unsafe] | _NestedSequence[Sequence[Sequence[complex]]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[False] = False, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[True], +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_to_inexact64_unsafe] | _NestedSequence[Sequence[complex]], + /, + *, + ord: _OrderKind | None = "fro", + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # ~float16, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[np.float16]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.float16] | Any: ... +@overload # ~float16, 2d, keepdims=False +def matrix_norm( + x: _ArrayLike2D[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> np.float16: ... +@overload # ~float16, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> NDArray[np.float16]: ... +@overload # ~inexact32, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.float32] | Any: ... +@overload # ~inexact32, 2d, keepdims=False +def matrix_norm( + x: _ArrayLike2D[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> np.float32: ... +@overload # ~inexact32, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> NDArray[np.float32]: ... +@overload # ~inexact80, ?d, keepdims=False +def matrix_norm( + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.longdouble] | Any: ... +@overload # ~inexact80, 2d, keepdims=False +def matrix_norm( + x: _ArrayLike2D[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> np.longdouble: ... +@overload # ~inexact80, >2d, keepdims=False +def matrix_norm( + x: _ArrayLike3ND[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def matrix_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, ?d, keepdims=True +def matrix_norm( + x: _ArrayLike2ND[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[True] +) -> NDArray[np.longdouble]: ... +@overload # fallback +def matrix_norm(x: ArrayLike, /, *, ord: _OrderKind | None = None, keepdims: bool = False) -> Any: ... -# TODO: narrow return types -@overload +# keep in sync with `norm` +@overload # +inexact64 (unsafe casting), axis=None, keepdims=False def vector_norm( - x: ArrayLike, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], /, *, + keepdims: L[False] = False, axis: None = None, ord: float | None = 2, +) -> np.float64: ... +@overload # +inexact64 (unsafe casting), axis=, keepdims=False +def vector_norm( + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], + /, + *, + axis: _Ax2, keepdims: L[False] = False, -) -> floating: ... -@overload + ord: float | None = 2, +) -> NDArray[np.float64]: ... +@overload # +inexact64 (unsafe casting), shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_to_inexact64_unsafe]], + /, + *, + axis: _Ax2 | None = None, + keepdims: L[True], + ord: float | None = 2, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # +inexact64 (unsafe casting), shape unknown, keepdims=True def vector_norm( - x: ArrayLike, + x: _ArrayLike[_to_inexact64_unsafe] | _NestedSequence[complex], /, *, - axis: SupportsInt | SupportsIndex | tuple[int, ...], + axis: _Ax2 | None = None, + keepdims: L[True], ord: float | None = 2, - keepdims: bool = False, -) -> Any: ... +) -> NDArray[np.float64]: ... +@overload # ~float16, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float16: ... +@overload # ~float16, axis= keepdims=False +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~float16, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... +@overload # ~float16, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[np.float16], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float16]: ... +@overload # ~inexact32, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.float32: ... +@overload # ~inexact32, axis= keepdims=False +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact32, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # ~inexact32, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact32], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.float32]: ... +@overload # ~inexact80, axis=None, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: None = None, keepdims: L[False] = False, ord: float | None = 2 +) -> np.longdouble: ... +@overload # ~inexact80, axis=, keepdims=False +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2, keepdims: L[False] = False, ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # ~inexact80, shape known, keepdims=True +def vector_norm[ShapeT: _Shape]( + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # ~inexact80, shape unknown, keepdims=True +def vector_norm( + x: _ArrayLike[_inexact80], /, *, axis: _Ax2 | None = None, keepdims: L[True], ord: float | None = 2 +) -> NDArray[np.longdouble]: ... +@overload # fallback +def vector_norm(x: ArrayLike, /, *, axis: _Ax2 | None = None, keepdims: bool = False, ord: float | None = 2) -> Any: ... # keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index ede41f2862c2..914dff2ab703 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -31,13 +31,18 @@ AR_any: np.ndarray AR_f_: npt.NDArray[np.floating] AR_c_: npt.NDArray[np.complexfloating] AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] AR_m: npt.NDArray[np.timedelta64] -AR_S: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] SC_f8: np.float64 @@ -47,9 +52,12 @@ AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] +AR_f2_2d: np.ndarray[tuple[int, int], np.dtype[np.float16]] AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] +AR_f10_2d: np.ndarray[tuple[int, int], np.dtype[np.longdouble]] +AR_f10_3d: np.ndarray[tuple[int, int, int], np.dtype[np.longdouble]] ### @@ -262,21 +270,83 @@ assert_type( tuple[_Array2D[np.float32], _Array1D[np.float32], np.int32, _Array1D[np.float32]], ) -assert_type(np.linalg.norm(AR_i8), np.floating) -assert_type(np.linalg.norm(AR_f8), np.floating) -assert_type(np.linalg.norm(AR_c16), np.floating) -assert_type(np.linalg.norm(AR_S), np.floating) -assert_type(np.linalg.norm(AR_f8, axis=0), Any) - -assert_type(np.linalg.matrix_norm(AR_i8), np.floating) -assert_type(np.linalg.matrix_norm(AR_f8), np.floating) -assert_type(np.linalg.matrix_norm(AR_c16), np.floating) -assert_type(np.linalg.matrix_norm(AR_S), np.floating) - -assert_type(np.linalg.vector_norm(AR_i8), np.floating) -assert_type(np.linalg.vector_norm(AR_f8), np.floating) -assert_type(np.linalg.vector_norm(AR_c16), np.floating) -assert_type(np.linalg.vector_norm(AR_S), np.floating) +assert_type(np.linalg.norm(AR_i8), np.float64) +assert_type(np.linalg.norm(AR_f8), np.float64) +assert_type(np.linalg.norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.norm(AR_U), np.float64) +assert_type(np.linalg.norm(AR_S), np.float64) +assert_type(np.linalg.norm(AR_f8, 0, 1), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.norm(AR_f2), np.float16) +assert_type(np.linalg.norm(AR_f2, 0, 1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.norm(AR_f4), np.float32) +assert_type(np.linalg.norm(AR_c8), np.float32) +assert_type(np.linalg.norm(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.norm(AR_f10), np.longdouble) +assert_type(np.linalg.norm(AR_c20), np.longdouble) +assert_type(np.linalg.norm(AR_f10, 0, 1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) + +assert_type(np.linalg.matrix_norm(AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_c16), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_U), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_S), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matrix_norm(AR_f8_2d), np.float64) +assert_type(np.linalg.matrix_norm(AR_f8_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.matrix_norm(AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_c8), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matrix_norm(AR_f4_2d), np.float32) +assert_type(np.linalg.matrix_norm(AR_f4_3d), npt.NDArray[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.matrix_norm(AR_f10), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_c20), npt.NDArray[np.longdouble] | Any) +assert_type(np.linalg.matrix_norm(AR_f10_2d), np.longdouble) +assert_type(np.linalg.matrix_norm(AR_f10_3d), npt.NDArray[np.longdouble]) +assert_type(np.linalg.matrix_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) +assert_type(np.linalg.matrix_norm(complex_list_2d), np.float64) +assert_type(np.linalg.matrix_norm(complex_list_3d), npt.NDArray[np.float64]) +assert_type(np.linalg.matrix_norm(complex_list_2d, keepdims=True), npt.NDArray[np.float64]) + +assert_type(np.linalg.vector_norm(AR_i8), np.float64) +assert_type(np.linalg.vector_norm(AR_f8), np.float64) +assert_type(np.linalg.vector_norm(AR_c16), np.float64) +# Mypy incorrectly infers `Any` for datetime64 and timedelta64, but pyright behaves correctly. +assert_type(np.linalg.vector_norm(AR_M), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_m), np.float64) # type: ignore[assert-type] +assert_type(np.linalg.vector_norm(AR_U), np.float64) +assert_type(np.linalg.vector_norm(AR_S), np.float64) +assert_type(np.linalg.vector_norm(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.linalg.vector_norm(AR_f8_2d, keepdims=True), _Array2D[np.float64]) +assert_type(np.linalg.vector_norm(AR_f2), np.float16) +assert_type(np.linalg.vector_norm(AR_f2, axis=1), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2, keepdims=True), npt.NDArray[np.float16]) +assert_type(np.linalg.vector_norm(AR_f2_2d, keepdims=True), _Array2D[np.float16]) +assert_type(np.linalg.vector_norm(AR_f4), np.float32) +assert_type(np.linalg.vector_norm(AR_c8), np.float32) +assert_type(np.linalg.vector_norm(AR_f4, axis=1), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.linalg.vector_norm(AR_f4_2d, keepdims=True), _Array2D[np.float32]) +assert_type(np.linalg.vector_norm(AR_f10), np.longdouble) +assert_type(np.linalg.vector_norm(AR_c20), np.longdouble) +assert_type(np.linalg.vector_norm(AR_f10, axis=1), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10, keepdims=True), npt.NDArray[np.longdouble]) +assert_type(np.linalg.vector_norm(AR_f10_2d, keepdims=True), _Array2D[np.longdouble]) assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) From 797563009bbcefeacaa0e1afbeb82ed55b175b31 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 17 Dec 2025 16:10:04 +0100 Subject: [PATCH 1035/1718] CI: mypy_primer workaround for Pandas' fixed py311 mypy config (#30455) --- .github/workflows/mypy_primer.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index b0b57cb93d7b..b95cab9d71d4 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -65,6 +65,7 @@ jobs: --known-dependency-selector numpy \ --old-prepend-path numpy_base --new-prepend-path numpy_to_test \ --num-shards 1 --shard-index ${{ matrix.shard-index }} \ + --additional-flags="--python-version=3.12" \ --debug \ --output concise \ | tee diff_${{ matrix.shard-index }}.txt From e2421814a45bad4db6a1b6df2c3806eae02a1fd8 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 17 Dec 2025 16:12:05 +0100 Subject: [PATCH 1036/1718] TYP: restore ``generic.__hash__`` (#30456) --- numpy/__init__.pyi | 14 ++++++++++++++ numpy/typing/tests/data/pass/ndarray_misc.py | 5 +++++ tools/stubtest/allowlist.txt | 2 ++ 3 files changed, 21 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4128dd3cb9ec..b344eb7613ad 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3662,6 +3662,12 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + def __buffer__(self, flags: int, /) -> memoryview: ... @overload @@ -5807,6 +5813,10 @@ class bytes_(character[bytes], bytes): # type: ignore[misc] @overload def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... + # + @override + def __hash__(self, /) -> int: ... + # def __bytes__(self, /) -> bytes: ... @@ -5816,6 +5826,10 @@ class str_(character[str], str): # type: ignore[misc] @overload def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... + # + @override + def __hash__(self, /) -> int: ... + # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final class ufunc: diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index d50becb20ee4..0a9302a2a116 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -9,6 +9,7 @@ from __future__ import annotations import operator +from collections.abc import Hashable from typing import Any, cast import numpy as np @@ -192,3 +193,7 @@ class IntSubClass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] + +# regression test for https://github.com/numpy/numpy/issues/30445 +def f(x: np.generic) -> Hashable: + return x diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index 58110d66046e..e8a746c3a925 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -39,6 +39,8 @@ numpy\.(\w+\.)*integer\.bit_count numpy\.(\w+\.)*floating\.as_integer_ratio numpy\.(\w+\.)*floating\.is_integer numpy\.(\w+\.)*complexfloating\.__complex__ +# https://github.com/numpy/numpy/issues/30445#issuecomment-3665484402 +numpy\.(\w+\.)*generic\.__hash__ # intentionally missing deprecated module stubs numpy\.core\._dtype From 14670df928db43105d7b2ef5e8ce35ee8e3c5dc3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Dec 2025 10:55:12 -0700 Subject: [PATCH 1037/1718] MAINT: Bump github/codeql-action from 4.31.8 to 4.31.9 (#30458) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.8 to 4.31.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b168cd39490f61582a9beae412bb7057a6b2c4e...5d4e8d1aca955e8d8589aabd499c5cae939e33c7) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index aaa14b37588a..c7b935c56595 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 + uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 + uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b168cd39490f61582a9beae412bb7057a6b2c4e # v4.31.8 + uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 421790b4ae1e..a1e9a7aca6eb 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b168cd39490f61582a9beae412bb7057a6b2c4e # v2.1.27 + uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v2.1.27 with: sarif_file: results.sarif From 62a49250514d8f3922f5934aebe3a37dc6b619b2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 17 Dec 2025 18:57:40 +0100 Subject: [PATCH 1038/1718] CI: have runtime jobs ignore ``tools/stubtest/**`` (#30457) --- .github/workflows/compiler_sanitizers.yml | 1 + .github/workflows/cygwin.yml | 1 + .github/workflows/emscripten.yml | 1 + .github/workflows/linux-ppc64le.yml | 7 ++++--- .github/workflows/linux.yml | 1 + .github/workflows/linux_blas.yml | 1 + .github/workflows/linux_qemu.yml | 1 + .github/workflows/linux_simd.yml | 1 + .github/workflows/macos.yml | 1 + .github/workflows/wheels.yml | 1 + .github/workflows/windows.yml | 1 + 11 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 8ad89759c906..9db9313c34e3 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -12,6 +12,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 4a2ea1308422..a0a914c0204d 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -8,6 +8,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 099f5f67336b..4684537a9e8f 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' permissions: contents: read # to fetch code (actions/checkout) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 7e75786b0bea..c12165287a65 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: permissions: @@ -25,7 +26,7 @@ jobs: # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-ppc64le-p10 - + strategy: fail-fast: false matrix: @@ -34,14 +35,14 @@ jobs: args: "-Dallow-noblas=false" - name: "clang" args: "-Dallow-noblas=false" - + name: "${{ matrix.config.name }}" steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: submodules: recursive fetch-tags: true - + - name: Install dependencies run: | sudo apt update diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index c42e3e95a5d7..fed1fba04712 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -18,6 +18,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 88c5322dd754..26d98fe813d7 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -44,6 +44,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index f82bf1aa2626..21a0aa722319 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -18,6 +18,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: defaults: diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 92e46c8053b8..46feb4abb3e4 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -35,6 +35,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' defaults: run: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 15d3cf947222..3a5a2dc4792f 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' permissions: contents: read # to fetch code (actions/checkout) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 27ee88d80677..f9882f6fcbc3 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -13,6 +13,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' workflow_dispatch: concurrency: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 3ef295cc8f5b..cebb42fbdf08 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -9,6 +9,7 @@ on: - '**.pyi' - '**.md' - '**.rst' + - 'tools/stubtest/**' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} From e8e909f249484a5233362d4087dda52028febadc Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Wed, 17 Dec 2025 18:44:16 +0000 Subject: [PATCH 1039/1718] ENH: Pixi package definitions for downstream development (#30381) Add Pixi package definitions for downstream development --- .github/workflows/pixi-packages.yml | 39 +++++++++++++++++++ .../upcoming_changes/30381.new_feature.rst | 11 ++++++ pixi-packages/README.md | 37 ++++++++++++++++++ pixi-packages/asan/pixi.toml | 26 +++++++++++++ pixi-packages/default/pixi.toml | 21 ++++++++++ 5 files changed, 134 insertions(+) create mode 100644 .github/workflows/pixi-packages.yml create mode 100644 doc/release/upcoming_changes/30381.new_feature.rst create mode 100644 pixi-packages/README.md create mode 100644 pixi-packages/asan/pixi.toml create mode 100644 pixi-packages/default/pixi.toml diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml new file mode 100644 index 000000000000..7c4c704282fd --- /dev/null +++ b/.github/workflows/pixi-packages.yml @@ -0,0 +1,39 @@ +name: Pixi packages tests + +on: + pull_request: + branches: + - main + - maintenance/** + paths-ignore: + - '**.pyi' + - '**.md' + - '**.rst' + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + build_packages: + name: Build Pixi packages + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + runs-on: [ubuntu-latest, macos-15] + package_variant: [asan, default] + if: github.repository == 'numpy/numpy' + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1 + with: + pixi-version: v0.60.0 + run-install: false + + - name: Build + run: pixi build --path="pixi-packages/${{ matrix.package_variant }}" diff --git a/doc/release/upcoming_changes/30381.new_feature.rst b/doc/release/upcoming_changes/30381.new_feature.rst new file mode 100644 index 000000000000..8dc3ce34e3bc --- /dev/null +++ b/doc/release/upcoming_changes/30381.new_feature.rst @@ -0,0 +1,11 @@ +Pixi package definitions +------------------------ +Pixi package definitions have been added for different kinds +of from-source builds of NumPy. These can be used in +downstream Pixi workspaces via the ``pixi-build`` feature. + +Definitions for both ``default`` and AddressSanitizer-instrumented +(``asan``) builds are available in the source code under the +``pixi-packages/`` directory. + +``linux-64`` and ``osx-arm64`` platforms are supported. diff --git a/pixi-packages/README.md b/pixi-packages/README.md new file mode 100644 index 000000000000..202635cffa1f --- /dev/null +++ b/pixi-packages/README.md @@ -0,0 +1,37 @@ +# CPython Pixi packages + +This directory contains definitions for [Pixi packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) +which can be built from the NumPy source code. + +Downstream developers can make use of these packages by adding them as Git dependencies in a +[Pixi workspace](https://pixi.sh/latest/first_workspace/), like: + +```toml +[dependencies] +numpy = { git = "https://github.com/numpy/numpy", subdirectory = "pixi-packages/asan" } +``` + +This is particularly useful when developers need to build NumPy from source +(for example, for an ASan-instrumented build), as it does not require any manual +clone or build steps. Instead, Pixi will automatically handle both the build +and installation of the package. + +See https://github.com/scipy/scipy/pull/24066 for a full example of downstream use. + +Each package definition is contained in a subdirectory. +Currently defined package variants: + +- `default` +- `asan`: ASan-instrumented build with `-Db_sanitize=address` + +## Maintenance + +- Keep host dependency requirements up to date +- For dependencies on upstream CPython Pixi packages, keep the git revision at a compatible version + +## Opportunities for future improvement + +- More package variants (such as TSan, UBSan) +- Support for Windows +- Using a single `pixi.toml` for all package variants is blocked on https://github.com/prefix-dev/pixi/issues/2813 +- Consider pinning dependency versions to guard against upstream breakages over time diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml new file mode 100644 index 000000000000..19a6a6553806 --- /dev/null +++ b/pixi-packages/asan/pixi.toml @@ -0,0 +1,26 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["osx-arm64", "linux-64"] +preview = ["pixi-build"] + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +env.ASAN_OPTIONS = "detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" +extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/asan" +python.rev = "8bb5b6e8ce61da41b5affd5eb12d9cc46b5af448" + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/default/pixi.toml b/pixi-packages/default/pixi.toml new file mode 100644 index 000000000000..0b5d53e41ef2 --- /dev/null +++ b/pixi-packages/default/pixi.toml @@ -0,0 +1,21 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["osx-arm64", "linux-64"] +preview = ["pixi-build"] + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] + +[package.host-dependencies] +python = "*" +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build From d113a222db22c8e4e4865f5aeded55234d0a0b5a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 17 Dec 2025 22:17:32 +0100 Subject: [PATCH 1040/1718] DEP: remove ``_core.numerictypes.maximum_sctype`` (#30462) --- .../upcoming_changes/30462.expired.rst | 1 + numpy/_core/numerictypes.py | 64 ------------------- numpy/_core/tests/test_deprecations.py | 2 - numpy/_core/tests/test_numerictypes.py | 34 +--------- 4 files changed, 2 insertions(+), 99 deletions(-) create mode 100644 doc/release/upcoming_changes/30462.expired.rst diff --git a/doc/release/upcoming_changes/30462.expired.rst b/doc/release/upcoming_changes/30462.expired.rst new file mode 100644 index 000000000000..ee8b62796640 --- /dev/null +++ b/doc/release/upcoming_changes/30462.expired.rst @@ -0,0 +1 @@ +* ``numpy._core.numerictypes.maximum_sctype`` has been removed (deprecated since 2.0) diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 70570c8c0f39..bd3764f11b84 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -77,7 +77,6 @@ """ import numbers -import warnings from numpy._utils import set_module @@ -106,7 +105,6 @@ # as numerictypes.bool, etc. from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 -from ._dtype import _kind_name from ._string_helpers import ( # noqa: F401 LOWER_TABLE, UPPER_TABLE, @@ -125,68 +123,6 @@ 'complex64', 'complex128', 'complex192', 'complex256', 'object'] -@set_module('numpy') -def maximum_sctype(t): - """ - Return the scalar type of highest precision of the same kind as the input. - - .. deprecated:: 2.0 - Use an explicit dtype like int64 or float64 instead. - - Parameters - ---------- - t : dtype or dtype specifier - The input data type. This can be a `dtype` object or an object that - is convertible to a `dtype`. - - Returns - ------- - out : dtype - The highest precision data type of the same kind (`dtype.kind`) as `t`. - - See Also - -------- - obj2sctype, mintypecode, sctype2char - dtype - - Examples - -------- - >>> from numpy._core.numerictypes import maximum_sctype - >>> maximum_sctype(int) - - >>> maximum_sctype(np.uint8) - - >>> maximum_sctype(complex) - # may vary - - >>> maximum_sctype(str) - - - >>> maximum_sctype('i2') - - >>> maximum_sctype('f4') - # may vary - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " - "or float64 instead. (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - g = obj2sctype(t) - if g is None: - return t - t = g - base = _kind_name(dtype(t)) - if base in sctypes: - return sctypes[base][-1] - else: - return t - @set_module('numpy') def issctype(rep): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 7cb1fee9b890..1b93b57e90f3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -275,7 +275,6 @@ class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): from numpy import row_stack - from numpy._core.numerictypes import maximum_sctype from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._shape_base_impl import get_array_wrap from numpy.lib._utils_impl import safe_eval @@ -289,7 +288,6 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) self.assert_deprecated(get_array_wrap) - self.assert_deprecated(lambda: maximum_sctype(int)) self.assert_deprecated(lambda: row_stack([[]])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 5763a964c41d..618e668dd53d 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -5,7 +5,7 @@ import numpy as np import numpy._core.numerictypes as nt -from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes +from numpy._core.numerictypes import issctype, sctype2char, sctypes from numpy.testing import ( IS_PYPY, assert_, @@ -496,38 +496,6 @@ def test_ulong(self): assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize -@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") -class TestMaximumSctype: - - # note that parametrizing with sctype['int'] and similar would skip types - # with the same size (gh-11923) - - @pytest.mark.parametrize( - 't', [np.byte, np.short, np.intc, np.long, np.longlong] - ) - def test_int(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) - - @pytest.mark.parametrize( - 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] - ) - def test_uint(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) - - @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) - def test_float(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) - - @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) - def test_complex(self, t): - assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) - - @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, - np.void]) - def test_other(self, t): - assert_equal(maximum_sctype(t), t) - - class Test_sctype2char: # This function is old enough that we're really just documenting the quirks # at this point. From 843a82ff8ba80ca08c4db921c2552b452f25f0e3 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 17 Dec 2025 22:56:22 +0100 Subject: [PATCH 1041/1718] DEP: expire deprecation for 2d ``cross`` (#30461) --- .../upcoming_changes/30461.expired.rst | 1 + numpy/_core/numeric.py | 124 +++++------------- numpy/_core/tests/test_numeric.py | 55 +------- numpy/linalg/_linalg.py | 8 -- 4 files changed, 38 insertions(+), 150 deletions(-) create mode 100644 doc/release/upcoming_changes/30461.expired.rst diff --git a/doc/release/upcoming_changes/30461.expired.rst b/doc/release/upcoming_changes/30461.expired.rst new file mode 100644 index 000000000000..e9d05eda1b7b --- /dev/null +++ b/doc/release/upcoming_changes/30461.expired.rst @@ -0,0 +1 @@ +* ``numpy.cross`` no longer supports 2-dimensional vectors (deprecated since 2.0) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index d4e1685501d7..b79cc03c1320 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1559,10 +1559,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors are defined by the last axis of `a` and `b` by default, and these axes - can have dimensions 2 or 3. Where the dimension of either `a` or `b` is - 2, the third component of the input vector is assumed to be zero and the - cross product calculated accordingly. In cases where both input vectors - have dimension 2, the z-component of the cross product is returned. + must have 3 dimensions. Parameters ---------- @@ -1575,9 +1572,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): axisb : int, optional Axis of `b` that defines the vector(s). By default, the last axis. axisc : int, optional - Axis of `c` containing the cross product vector(s). Ignored if - both input vectors have dimension 2, as the return is scalar. - By default, the last axis. + Axis of `c` containing the cross product vector(s). By default, the last axis. axis : int, optional If defined, the axis of `a`, `b` and `c` that defines the vector(s) and cross product(s). Overrides `axisa`, `axisb` and `axisc`. @@ -1590,27 +1585,19 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Raises ------ ValueError - When the dimension of the vector(s) in `a` and/or `b` does not - equal 2 or 3. + When the dimension of the vector(s) in `a` or `b` does not equal 3. See Also -------- inner : Inner product outer : Outer product. - linalg.cross : An Array API compatible variation of ``np.cross``, - which accepts (arrays of) 3-element vectors only. + linalg.cross : An Array API compatible variation of ``np.cross``. ix_ : Construct index arrays. Notes ----- Supports full broadcasting of the inputs. - Dimension-2 input arrays were deprecated in 2.0.0. If you do need this - functionality, you can use:: - - def cross2d(x, y): - return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] - Examples -------- Vector cross-product. @@ -1623,13 +1610,6 @@ def cross2d(x, y): One vector with dimension 2. - >>> x = [1, 2] - >>> y = [4, 5, 6] - >>> np.cross(x, y) - array([12, -6, -3]) - - Equivalently: - >>> x = [1, 2, 0] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1637,10 +1617,10 @@ def cross2d(x, y): Both vectors with dimension 2. - >>> x = [1,2] - >>> y = [4,5] + >>> x = [1, 2, 0] + >>> y = [4, 5, 0] >>> np.cross(x, y) - array(-3) + array([0, 0, -3]) Multiple vector cross-products. Note that the direction of the cross product vector is defined by the *right-hand rule*. @@ -1687,24 +1667,16 @@ def cross2d(x, y): # Move working axis to the end of the shape a = moveaxis(a, axisa, -1) b = moveaxis(b, axisb, -1) - msg = ("incompatible dimensions for cross product\n" - "(dimension must be 2 or 3)") - if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): - raise ValueError(msg) - if a.shape[-1] == 2 or b.shape[-1] == 2: - # Deprecated in NumPy 2.0, 2023-09-26 - warnings.warn( - "Arrays of 2-dimensional vectors are deprecated. Use arrays of " - "3-dimensional vectors instead. (deprecated in NumPy 2.0)", - DeprecationWarning, stacklevel=2 + if a.shape[-1] != 3 or b.shape[-1] != 3: + raise ValueError( + f"Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {a.shape[-1]} and {b.shape[-1]} dimensional instead." ) # Create the output array - shape = broadcast(a[..., 0], b[..., 0]).shape - if a.shape[-1] == 3 or b.shape[-1] == 3: - shape += (3,) - # Check axisc is within bounds - axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + shape = *broadcast(a[..., 0], b[..., 0]).shape, 3 + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') dtype = promote_types(a.dtype, b.dtype) cp = empty(shape, dtype) @@ -1715,58 +1687,26 @@ def cross2d(x, y): # create local aliases for readability a0 = a[..., 0] a1 = a[..., 1] - if a.shape[-1] == 3: - a2 = a[..., 2] + a2 = a[..., 2] b0 = b[..., 0] b1 = b[..., 1] - if b.shape[-1] == 3: - b2 = b[..., 2] - if cp.ndim != 0 and cp.shape[-1] == 3: - cp0 = cp[..., 0] - cp1 = cp[..., 1] - cp2 = cp[..., 2] - - if a.shape[-1] == 2: - if b.shape[-1] == 2: - # a0 * b1 - a1 * b0 - multiply(a0, b1, out=cp) - cp -= a1 * b0 - return cp - else: - assert b.shape[-1] == 3 - # cp0 = a1 * b2 - 0 (a2 = 0) - # cp1 = 0 - a0 * b2 (a2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - multiply(a0, b2, out=cp1) - negative(cp1, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 - else: - assert a.shape[-1] == 3 - if b.shape[-1] == 3: - # cp0 = a1 * b2 - a2 * b1 - # cp1 = a2 * b0 - a0 * b2 - # cp2 = a0 * b1 - a1 * b0 - multiply(a1, b2, out=cp0) - tmp = np.multiply(a2, b1, out=...) - cp0 -= tmp - multiply(a2, b0, out=cp1) - multiply(a0, b2, out=tmp) - cp1 -= tmp - multiply(a0, b1, out=cp2) - multiply(a1, b0, out=tmp) - cp2 -= tmp - else: - assert b.shape[-1] == 2 - # cp0 = 0 - a2 * b1 (b2 = 0) - # cp1 = a2 * b0 - 0 (b2 = 0) - # cp2 = a0 * b1 - a1 * b0 - multiply(a2, b1, out=cp0) - negative(cp0, out=cp0) - multiply(a2, b0, out=cp1) - multiply(a0, b1, out=cp2) - cp2 -= a1 * b0 + b2 = b[..., 2] + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp return moveaxis(cp, -1, axisc) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 9e71b7c6b1b8..15869f522f73 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3941,29 +3941,16 @@ def test_array_likes(self): class TestCross: - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x2(self): u = [1, 2] v = [3, 4] - z = -2 - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_2x3(self): u = [1, 2] v = [3, 4, 5] - z = np.array([10, -5, -2]) - cp = np.cross(u, v) - assert_equal(cp, z) - cp = np.cross(v, u) - assert_equal(cp, -z) + assert_raises(ValueError, np.cross, u, v) + assert_raises(ValueError, np.cross, v, u) def test_3x3(self): u = [1, 2, 3] @@ -3974,32 +3961,7 @@ def test_3x3(self): cp = np.cross(v, u) assert_equal(cp, -z) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting(self): - # Ticket #2624 (Trac #2032) - u = np.tile([1, 2], (11, 1)) - v = np.tile([3, 4], (11, 1)) - z = -2 - assert_equal(np.cross(u, v), z) - assert_equal(np.cross(v, u), -z) - assert_equal(np.cross(u, u), 0) - - u = np.tile([1, 2], (11, 1)).T - v = np.tile([3, 4, 5], (11, 1)) - z = np.tile([10, -5, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0), z) - assert_equal(np.cross(v, u.T), -z) - assert_equal(np.cross(v, v), 0) - - u = np.tile([1, 2, 3], (11, 1)).T - v = np.tile([3, 4], (11, 1)).T - z = np.tile([-12, 9, -2], (11, 1)) - assert_equal(np.cross(u, v, axisa=0, axisb=0), z) - assert_equal(np.cross(v.T, u.T), -z) - assert_equal(np.cross(u.T, u.T), 0) - u = np.tile([1, 2, 3], (5, 1)) v = np.tile([4, 5, 6], (5, 1)).T z = np.tile([-3, 6, -3], (5, 1)) @@ -4007,27 +3969,20 @@ def test_broadcasting(self): assert_equal(np.cross(v.T, u), -z) assert_equal(np.cross(u, u), 0) - @pytest.mark.filterwarnings( - "ignore:.*2-dimensional vectors.*:DeprecationWarning" - ) def test_broadcasting_shapes(self): u = np.ones((2, 1, 3)) v = np.ones((5, 3)) assert_equal(np.cross(u, v).shape, (2, 5, 3)) u = np.ones((10, 3, 5)) - v = np.ones((2, 5)) + v = np.ones((3, 5)) assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) u = np.ones((10, 3, 5, 7)) - v = np.ones((5, 7, 2)) + v = np.ones((5, 7, 3)) assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) - # gh-5885 - u = np.ones((3, 4, 2)) - for axisc in range(-2, 2): - assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) def test_uint8_int32_mixed_dtypes(self): # regression test for gh-19138 diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 00e485346577..1889946f9879 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3305,14 +3305,6 @@ def cross(x1, x2, /, *, axis=-1): """ x1 = asanyarray(x1) x2 = asanyarray(x2) - - if x1.shape[axis] != 3 or x2.shape[axis] != 3: - raise ValueError( - "Both input arrays must be (arrays of) 3-dimensional vectors, " - f"but they are {x1.shape[axis]} and {x2.shape[axis]} " - "dimensional instead." - ) - return _core_cross(x1, x2, axis=axis) From 162cf00b89f1ba6716e3b6ebe5ffaa04e02ec99c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 00:45:57 +0100 Subject: [PATCH 1042/1718] TYP: ``linalg``: code cleanup (#30454) --- numpy/linalg/_linalg.pyi | 358 +++++++++------------- numpy/typing/tests/data/reveal/linalg.pyi | 51 +-- 2 files changed, 164 insertions(+), 245 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 010a378ea2a7..6a10786753c4 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -13,7 +13,7 @@ from typing import ( from typing_extensions import TypeVar import numpy as np -from numpy import complexfloating, floating, signedinteger, unsignedinteger, vecdot +from numpy import vecdot from numpy._core.fromnumeric import matrix_transpose from numpy._globals import _NoValue, _NoValueType from numpy._typing import ( @@ -29,7 +29,6 @@ from numpy._typing import ( _ArrayLikeObject_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, - _ComplexLike_co, _DTypeLike, _NestedSequence, _Shape, @@ -90,43 +89,54 @@ type _to_inexact64 = np.complex128 | _to_float64 type _to_inexact64_unsafe = _to_inexact64 | np.datetime64 | np.timedelta64 | np.character type _to_complex = np.number | np.bool +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3ND[ScalarT: np.generic] = np.ndarray[_AtLeast3D, np.dtype[ScalarT]] -# ==2d -type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | Sequence[Sequence[ScalarT]] -# >=2d -type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _NestedSequence[Sequence[ScalarT]] -# >=3d -type _ArrayLike3ND[ScalarT: np.generic] = ( - _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] - | _NestedSequence[Sequence[Sequence[ScalarT]]] -) +type _Sequence2D[T] = Sequence[Sequence[T]] +type _Sequence3D[T] = Sequence[_Sequence2D[T]] +type _Sequence2ND[T] = _NestedSequence[Sequence[T]] +type _Sequence3ND[T] = _NestedSequence[_Sequence2D[T]] +type _Sequence4ND[T] = _NestedSequence[_Sequence3D[T]] -type _ToArray1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] +type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] | Sequence[ScalarT] # ==1d +type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | _Sequence2D[ScalarT] # ==2d +type _ArrayLike3D[ScalarT: np.generic] = _SupportsArray[tuple[int, int, int], np.dtype[ScalarT]] | _Sequence3D[ScalarT] # ==3d +type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _Sequence2ND[ScalarT] # >=2d +type _ArrayLike3ND[ScalarT: np.generic] = _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] | _Sequence3ND[ScalarT] # >=3d +type _ArrayLike4ND[ScalarT: np.generic] = _SupportsArray[_AtLeast4D, np.dtype[ScalarT]] | _Sequence4ND[ScalarT] # >=3d -# anything that safe-casts (from floating) into float64/complex128 +# safe-castable array-likes +type _ToArrayBool_1d = _ArrayLike1D[np.bool_] | Sequence[bool] +type _ToArrayInt_1d = _ArrayLike1D[_to_integer] | Sequence[int] type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] +type _ToArrayF64_1d = _ArrayLike1D[_to_float64] | Sequence[float] +type _ToArrayF64_2d = _ArrayLike2D[_to_float64] | _Sequence2D[float] +type _ToArrayF64_3nd = _ArrayLike3ND[_to_float64] | _Sequence3ND[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] +type _ToArrayC128_3nd = _ArrayLike3ND[_to_inexact64] | _Sequence3ND[complex] +type _ToArrayComplex_1d = _ArrayLike1D[_to_complex] | Sequence[complex] +type _ToArrayComplex_2d = _ArrayLike2D[_to_complex] | _Sequence2D[complex] +type _ToArrayComplex_3d = _ArrayLike3D[_to_complex] | _Sequence3D[complex] # the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayI64_1d = _ArrayLike1D[np.int64] | list[int] type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_1d = _ArrayLike1D[np.float64] | list[float] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] - -type _ToArrayF64_2d = _Array2D[_to_float64] | Sequence[Sequence[float]] -type _ToArrayF64_3nd = _Array3ND[_to_float64] | Sequence[Sequence[_NestedSequence[float]]] -type _ToArrayC128_2d = _Array2D[_to_inexact64] | Sequence[Sequence[complex]] -type _ToArrayC128_3nd = _Array3ND[_to_inexact64] | Sequence[Sequence[_NestedSequence[complex]]] +type _AsArrayC128_1d = _ArrayLike1D[np.complex128] | list[complex] +type _AsArrayC128_2d = _ArrayLike2D[np.complex128] | Sequence[list[complex]] +type _AsArrayC128_3nd = _ArrayLike3ND[np.complex128] | _Sequence2ND[list[complex]] type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` type _SideKind = L["L", "U", "l", "u"] type _NonNegInt = L[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] type _NegInt = L[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] -type _LstSqResult[ShapeT: tuple[int, ...], InexactT: np.inexact, FloatingT: np.floating] = tuple[ +type _LstSqResult[ShapeT: _Shape, InexactT: np.inexact, FloatingT: np.floating] = tuple[ np.ndarray[ShapeT, np.dtype[InexactT]], # least-squares solution - np.ndarray[tuple[int], np.dtype[FloatingT]], # residuals + _Array1D[FloatingT], # residuals np.int32, # rank - np.ndarray[tuple[int], np.dtype[FloatingT]], # singular values + _Array1D[FloatingT], # singular values ] _FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=Any, covariant=True) @@ -172,7 +182,9 @@ def tensorsolve(a: _ToArrayF64, b: _ArrayLikeFloat_co, axes: Iterable[int] | Non @overload # +float64, ~float64 def tensorsolve(a: _ArrayLikeFloat_co, b: _ToArrayF64, axes: Iterable[int] | None = None) -> NDArray[np.float64]: ... @overload # ~float32, ~float32 -def tensorsolve(a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None) -> NDArray[np.float32]: ... +def tensorsolve( + a: _ArrayLike[np.float32], b: _ArrayLike[np.float32], axes: Iterable[int] | None = None +) -> NDArray[np.float32]: ... @overload # +float, +float def tensorsolve(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: Iterable[int] | None = None) -> NDArray[np.float64 | Any]: ... @overload # ~complex128, +complex128 @@ -180,11 +192,17 @@ def tensorsolve(a: _AsArrayC128, b: _ArrayLikeComplex_co, axes: Iterable[int] | @overload # +complex128, ~complex128 def tensorsolve(a: _ArrayLikeComplex_co, b: _AsArrayC128, axes: Iterable[int] | None = None) -> NDArray[np.complex128]: ... @overload # ~complex64, +complex64 -def tensorsolve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +def tensorsolve( + a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... @overload # +complex64, ~complex64 -def tensorsolve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None) -> NDArray[np.complex64]: ... +def tensorsolve( + a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64], axes: Iterable[int] | None = None +) -> NDArray[np.complex64]: ... @overload # +complex, +complex -def tensorsolve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None) -> NDArray[np.complex128 | Any]: ... +def tensorsolve( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: Iterable[int] | None = None +) -> NDArray[np.complex128 | Any]: ... # keep in sync with `tensorsolve` @overload # ~float64, +float64 @@ -200,9 +218,9 @@ def solve(a: _AsArrayC128, b: _ArrayLikeComplex_co) -> NDArray[np.complex128]: . @overload # +complex128, ~complex128 def solve(a: _ArrayLikeComplex_co, b: _AsArrayC128) -> NDArray[np.complex128]: ... @overload # ~complex64, +complex64 -def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[np.complex64 | np.float32]) -> NDArray[np.complex64]: ... +def solve(a: _ArrayLike[np.complex64], b: _ArrayLike[_inexact32]) -> NDArray[np.complex64]: ... @overload # +complex64, ~complex64 -def solve(a: _ArrayLike[np.complex64 | np.float32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... +def solve(a: _ArrayLike[_inexact32], b: _ArrayLike[np.complex64]) -> NDArray[np.complex64]: ... @overload # +complex, +complex def solve(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complex128 | Any]: ... @@ -414,7 +432,7 @@ def svdvals(a: _ArrayLikeComplex_co, /) -> NDArray[np.floating]: ... # @overload # workaround for microsoft/pyright#10232 def matrix_rank( - A: np.ndarray[tuple[Never, ...], np.dtype[np.number]], + A: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -422,7 +440,7 @@ def matrix_rank( ) -> Any: ... @overload # <2d def matrix_rank( - A: complex | Sequence[complex] | np.ndarray[_AtMost1D, np.dtype[np.number]], + A: _SupportsArray[_AtMost1D, np.dtype[_to_complex]] | Sequence[complex | _to_complex] | complex | _to_complex, tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -430,7 +448,7 @@ def matrix_rank( ) -> L[0, 1]: ... @overload # =2d def matrix_rank( - A: Sequence[Sequence[complex]] | _Array2D[np.number], + A: _ToArrayComplex_2d, tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -438,15 +456,15 @@ def matrix_rank( ) -> np.int_: ... @overload # =3d def matrix_rank( - A: Sequence[Sequence[Sequence[complex]]] | np.ndarray[tuple[int, int, int], np.dtype[np.number]], + A: _ToArrayComplex_3d, tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, rtol: _ArrayLikeFloat_co | None = None, -) -> np.ndarray[tuple[int], np.dtype[np.int_]]: ... +) -> _Array1D[np.int_]: ... @overload # ≥4d def matrix_rank( - A: Sequence[Sequence[Sequence[_NestedSequence[complex]]]] | np.ndarray[_AtLeast4D, np.dtype[np.number]], + A: _ArrayLike4ND[_to_complex] | _Sequence4ND[complex], tol: _ArrayLikeFloat_co | None = None, hermitian: bool = False, *, @@ -463,114 +481,98 @@ def matrix_rank( # @overload # workaround for microsoft/pyright#10232 -def cond(x: np.ndarray[_JustAnyShape, np.dtype[np.number]], p: _OrderKind | None = None) -> Any: ... +def cond(x: np.ndarray[_JustAnyShape, np.dtype[_to_complex]], p: _OrderKind | None = None) -> Any: ... @overload # 2d ~inexact32 -def cond(x: _Array2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... +def cond(x: _ArrayLike2D[_inexact32], p: _OrderKind | None = None) -> np.float32: ... @overload # 2d +inexact64 -def cond(x: _ToArrayC128_2d, p: _OrderKind | None = None) -> np.float64: ... +def cond(x: _ArrayLike2D[_to_inexact64] | _Sequence2D[complex], p: _OrderKind | None = None) -> np.float64: ... @overload # 2d ~number -def cond(x: _Array2D[np.number], p: _OrderKind | None = None) -> np.floating: ... +def cond(x: _ArrayLike2D[_to_complex], p: _OrderKind | None = None) -> np.floating: ... @overload # >2d ~inexact32 -def cond(x: np.ndarray[_AtLeast3D, np.dtype[_inexact32]], p: _OrderKind | None = None) -> NDArray[np.float32]: ... +def cond(x: _ArrayLike3ND[_inexact32], p: _OrderKind | None = None) -> NDArray[np.float32]: ... @overload # >2d +inexact64 def cond(x: _ToArrayC128_3nd, p: _OrderKind | None = None) -> NDArray[np.float64]: ... @overload # >2d ~number -def cond(x: np.ndarray[_AtLeast3D, np.dtype[np.number]], p: _OrderKind | None = None) -> NDArray[np.floating]: ... +def cond(x: _ArrayLike3ND[_to_complex], p: _OrderKind | None = None) -> NDArray[np.floating]: ... @overload # fallback def cond(x: _ArrayLikeComplex_co, p: _OrderKind | None = None) -> Any: ... # keep in sync with `det` @overload # workaround for microsoft/pyright#10232 -def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> SlogdetResult: ... +def slogdet(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> SlogdetResult: ... @overload # 2d ~inexact32 -def slogdet[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... +def slogdet[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> SlogdetResult[np.float32, ScalarT]: ... @overload # >2d ~inexact32 -def slogdet[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... +def slogdet[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> SlogdetResult[NDArray[np.float32], NDArray[ScalarT]]: ... @overload # 2d +float64 -def slogdet(a: _Array2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... +def slogdet(a: _ArrayLike2D[_to_float64]) -> SlogdetResult[np.float64, np.float64]: ... @overload # >2d +float64 -def slogdet(a: _Array3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... +def slogdet(a: _ArrayLike3ND[_to_float64]) -> SlogdetResult[NDArray[np.float64], NDArray[np.float64]]: ... @overload # 2d ~complex128 -def slogdet(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> SlogdetResult[np.float64, np.complex128]: ... +def slogdet(a: _AsArrayC128_2d) -> SlogdetResult[np.float64, np.complex128]: ... @overload # >2d ~complex128 -def slogdet( - a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]] -) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... +def slogdet(a: _AsArrayC128_3nd) -> SlogdetResult[NDArray[np.float64], NDArray[np.complex128]]: ... @overload # fallback def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... # keep in sync with `slogdet` @overload # workaround for microsoft/pyright#10232 -def det(a: np.ndarray[_JustAnyShape, np.dtype[np.number]]) -> Any: ... +def det(a: np.ndarray[_JustAnyShape, np.dtype[_to_complex]]) -> Any: ... @overload # 2d ~inexact32 -def det[ScalarT: _inexact32](a: _Array2D[ScalarT]) -> ScalarT: ... +def det[ScalarT: _inexact32](a: _ArrayLike2D[ScalarT]) -> ScalarT: ... @overload # >2d ~inexact32 -def det[ScalarT: _inexact32](a: _Array3ND[ScalarT]) -> NDArray[ScalarT]: ... +def det[ScalarT: _inexact32](a: _ArrayLike3ND[ScalarT]) -> NDArray[ScalarT]: ... @overload # 2d +float64 -def det(a: _Array2D[_to_float64]) -> np.float64: ... +def det(a: _ArrayLike2D[_to_float64]) -> np.float64: ... @overload # >2d +float64 -def det(a: _Array3ND[_to_float64]) -> NDArray[np.float64]: ... +def det(a: _ArrayLike3ND[_to_float64]) -> NDArray[np.float64]: ... @overload # 2d ~complex128 -def det(a: _Array2D[np.complex128] | Sequence[list[complex]]) -> np.complex128: ... +def det(a: _AsArrayC128_2d) -> np.complex128: ... @overload # >2d ~complex128 -def det(a: _Array3ND[np.complex128] | _NestedSequence[Sequence[list[complex]]]) -> NDArray[np.complex128]: ... +def det(a: _AsArrayC128_3nd) -> NDArray[np.complex128]: ... @overload # fallback def det(a: _ArrayLikeComplex_co) -> Any: ... # @overload # +float64, ~float64, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[_to_float64]] | Sequence[Sequence[float]], - b: _SupportsArray[ShapeT, np.dtype[np.floating | np.integer]], + a: _ArrayLike2D[_to_float64] | _Sequence2D[float], + b: _SupportsArray[ShapeT, np.dtype[np.floating | _to_integer]], rcond: float | None = None, ) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... @overload # ~float64, +float64, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.floating | np.integer]] | Sequence[Sequence[float]], + a: _ArrayLike2D[np.floating | _to_integer] | _Sequence2D[float], b: _SupportsArray[ShapeT, np.dtype[_to_float64]], rcond: float | None = None, ) -> _LstSqResult[ShapeT, np.float64, np.float64]: ... @overload # +complex128, ~complex128, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.number]] | Sequence[Sequence[complex]], - b: _SupportsArray[ShapeT, np.dtype[np.complex128]], - rcond: float | None = None, + a: _ToArrayComplex_2d, b: _SupportsArray[ShapeT, np.dtype[np.complex128]], rcond: float | None = None ) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... @overload # ~complex128, +complex128, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.complex128]] | Sequence[list[complex]], - b: _SupportsArray[ShapeT, np.dtype[np.number]], - rcond: float | None = None, + a: _AsArrayC128_2d, b: _SupportsArray[ShapeT, np.dtype[_to_complex]], rcond: float | None = None ) -> _LstSqResult[ShapeT, np.complex128, np.float64]: ... @overload # ~float32, ~float32, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.float32]], - b: _SupportsArray[ShapeT, np.dtype[np.float32]], - rcond: float | None = None, + a: _ArrayLike2D[np.float32], b: _SupportsArray[ShapeT, np.dtype[np.float32]], rcond: float | None = None ) -> _LstSqResult[ShapeT, np.float32, np.float32]: ... @overload # +complex64, ~complex64, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.complex64 | np.float32]], - b: _SupportsArray[ShapeT, np.dtype[np.complex64]], - rcond: float | None = None, + a: _ArrayLike2D[_inexact32], b: _SupportsArray[ShapeT, np.dtype[np.complex64]], rcond: float | None = None ) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... @overload # ~complex64, +complex64, known shape def lstsq[ShapeT: tuple[int] | tuple[int, int]]( - a: _SupportsArray[tuple[int, int], np.dtype[np.complex64]], - b: _SupportsArray[ShapeT, np.dtype[np.complex64 | np.float32]], - rcond: float | None = None, + a: _ArrayLike2D[np.complex64], b: _SupportsArray[ShapeT, np.dtype[_inexact32]], rcond: float | None = None ) -> _LstSqResult[ShapeT, np.complex64, np.float32]: ... @overload # +float64, +float64, unknown shape def lstsq( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - rcond: float | None = None, + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None ) -> _LstSqResult[_AnyShape, np.float64 | Any, np.float64 | Any]: ... @overload # +complex128, +complex128, unknown shape def lstsq( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - rcond: float | None = None, + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None ) -> _LstSqResult[_AnyShape, np.complex128 | Any, np.float64 | Any]: ... # NOTE: This assumes that `axis` is only passed if `x` is >1d, and that `keepdims` is never passed positionally. @@ -618,9 +620,7 @@ def norm( x: _ArrayLike[np.float16], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False ) -> np.float16: ... @overload # ~float16, axis= (positional), keepdims=False -def norm( - x: _ArrayLike[np.float16], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False -) -> NDArray[np.float16]: ... +def norm(x: _ArrayLike[np.float16], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float16]: ... @overload # ~float16, axis= (keyword), keepdims=False def norm( x: _ArrayLike[np.float16], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False @@ -638,9 +638,7 @@ def norm( x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False ) -> np.float32: ... @overload # ~inexact32, axis= (positional), keepdims=False -def norm( - x: _ArrayLike[_inexact32], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False -) -> NDArray[np.float32]: ... +def norm(x: _ArrayLike[_inexact32], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.float32]: ... @overload # ~inexact32, axis= (keyword), keepdims=False def norm( x: _ArrayLike[_inexact32], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False @@ -658,9 +656,7 @@ def norm( x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, axis: None = None, keepdims: L[False] = False ) -> np.longdouble: ... @overload # ~inexact80, axis= (positional), keepdims=False -def norm( - x: _ArrayLike[_inexact80], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False -) -> NDArray[np.longdouble]: ... +def norm(x: _ArrayLike[_inexact80], ord: _OrderKind | None, axis: _Ax2, keepdims: L[False] = False) -> NDArray[np.longdouble]: ... @overload # ~inexact80, axis= (keyword), keepdims=False def norm( x: _ArrayLike[_inexact80], ord: _OrderKind | None = None, *, axis: _Ax2, keepdims: L[False] = False @@ -687,7 +683,7 @@ def matrix_norm( ) -> NDArray[np.float64] | Any: ... @overload # +inexact64 (unsafe casting), 2d, keepdims=False def matrix_norm( - x: _ArrayLike2D[_to_inexact64_unsafe] | Sequence[Sequence[complex]], + x: _ArrayLike2D[_to_inexact64_unsafe] | _Sequence2D[complex], /, *, ord: _OrderKind | None = "fro", @@ -695,7 +691,7 @@ def matrix_norm( ) -> np.float64: ... @overload # +inexact64 (unsafe casting), >2d, keepdims=False def matrix_norm( - x: _ArrayLike3ND[_to_inexact64_unsafe] | _NestedSequence[Sequence[Sequence[complex]]], + x: _ArrayLike3ND[_to_inexact64_unsafe] | _Sequence3D[complex], /, *, ord: _OrderKind | None = "fro", @@ -711,74 +707,62 @@ def matrix_norm[ShapeT: _Shape]( ) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... @overload # +inexact64 (unsafe casting), ?d, keepdims=True def matrix_norm( - x: _ArrayLike2ND[_to_inexact64_unsafe] | _NestedSequence[Sequence[complex]], - /, - *, - ord: _OrderKind | None = "fro", - keepdims: L[True], + x: _ArrayLike2ND[_to_inexact64_unsafe] | _Sequence2ND[complex], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] ) -> NDArray[np.float64]: ... @overload # ~float16, ?d, keepdims=False def matrix_norm( - x: _SupportsArray[_JustAnyShape, np.dtype[np.float16]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _SupportsArray[_JustAnyShape, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.float16] | Any: ... @overload # ~float16, 2d, keepdims=False -def matrix_norm( - x: _ArrayLike2D[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False -) -> np.float16: ... +def matrix_norm(x: _ArrayLike2D[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float16: ... @overload # ~float16, >2d, keepdims=False def matrix_norm( - x: _ArrayLike3ND[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _ArrayLike3ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.float16]: ... @overload # ~float16, shape known, keepdims=True def matrix_norm[ShapeT: _Shape]( - x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, ord: _OrderKind | None = None, keepdims: L[True] + x: _SupportsArray[ShapeT, np.dtype[np.float16]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] ) -> np.ndarray[ShapeT, np.dtype[np.float16]]: ... @overload # ~float16, ?d, keepdims=True -def matrix_norm( - x: _ArrayLike2ND[np.float16], /, *, ord: _OrderKind | None = None, keepdims: L[True] -) -> NDArray[np.float16]: ... +def matrix_norm(x: _ArrayLike2ND[np.float16], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float16]: ... @overload # ~inexact32, ?d, keepdims=False def matrix_norm( - x: _SupportsArray[_JustAnyShape, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.float32] | Any: ... @overload # ~inexact32, 2d, keepdims=False -def matrix_norm( - x: _ArrayLike2D[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False -) -> np.float32: ... +def matrix_norm(x: _ArrayLike2D[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False) -> np.float32: ... @overload # ~inexact32, >2d, keepdims=False def matrix_norm( - x: _ArrayLike3ND[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _ArrayLike3ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.float32]: ... @overload # ~inexact32, shape known, keepdims=True def matrix_norm[ShapeT: _Shape]( - x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = None, keepdims: L[True] + x: _SupportsArray[ShapeT, np.dtype[_inexact32]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] ) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... @overload # ~inexact32, ?d, keepdims=True -def matrix_norm( - x: _ArrayLike2ND[_inexact32], /, *, ord: _OrderKind | None = None, keepdims: L[True] -) -> NDArray[np.float32]: ... +def matrix_norm(x: _ArrayLike2ND[_inexact32], /, *, ord: _OrderKind | None = "fro", keepdims: L[True]) -> NDArray[np.float32]: ... @overload # ~inexact80, ?d, keepdims=False def matrix_norm( - x: _SupportsArray[_JustAnyShape, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _SupportsArray[_JustAnyShape, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.longdouble] | Any: ... @overload # ~inexact80, 2d, keepdims=False def matrix_norm( - x: _ArrayLike2D[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _ArrayLike2D[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> np.longdouble: ... @overload # ~inexact80, >2d, keepdims=False def matrix_norm( - x: _ArrayLike3ND[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[False] = False + x: _ArrayLike3ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[False] = False ) -> NDArray[np.longdouble]: ... @overload # ~inexact80, shape known, keepdims=True def matrix_norm[ShapeT: _Shape]( - x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = None, keepdims: L[True] + x: _SupportsArray[ShapeT, np.dtype[_inexact80]], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] ) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... @overload # ~inexact80, ?d, keepdims=True def matrix_norm( - x: _ArrayLike2ND[_inexact80], /, *, ord: _OrderKind | None = None, keepdims: L[True] + x: _ArrayLike2ND[_inexact80], /, *, ord: _OrderKind | None = "fro", keepdims: L[True] ) -> NDArray[np.longdouble]: ... @overload # fallback -def matrix_norm(x: ArrayLike, /, *, ord: _OrderKind | None = None, keepdims: bool = False) -> Any: ... +def matrix_norm(x: ArrayLike, /, *, ord: _OrderKind | None = "fro", keepdims: bool = False) -> Any: ... # keep in sync with `norm` @overload # +inexact64 (unsafe casting), axis=None, keepdims=False @@ -871,43 +855,23 @@ def vector_norm(x: ArrayLike, /, *, axis: _Ax2 | None = None, keepdims: bool = F # keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) @overload def tensordot[ScalarT: np.number | np.timedelta64 | np.object_]( - a: _ArrayLike[ScalarT], - b: _ArrayLike[ScalarT], - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLike[ScalarT], b: _ArrayLike[ScalarT], /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[ScalarT]: ... @overload def tensordot( - a: _ArrayLikeBool_co, - b: _ArrayLikeBool_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.bool]: ... @overload def tensordot( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.int_ | Any]: ... @overload def tensordot( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.float64 | Any]: ... @overload def tensordot( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, - /, - *, - axes: int | tuple[_ShapeLike, _ShapeLike] = 2, + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.complex128 | Any]: ... # TODO: Returns a scalar or array @@ -955,19 +919,13 @@ def trace( x: _SupportsArray[_JustAnyShape, np.dtype[_to_complex]], /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None ) -> Any: ... @overload # 2d known dtype, dtype=None -def trace[ScalarT: _to_complex]( - x: _SupportsArray[tuple[int, int], np.dtype[ScalarT]], /, *, offset: SupportsIndex = 0, dtype: None = None -) -> ScalarT: ... +def trace[ScalarT: _to_complex](x: _ArrayLike2D[ScalarT], /, *, offset: SupportsIndex = 0, dtype: None = None) -> ScalarT: ... @overload # 2d, dtype= def trace[ScalarT: _to_complex]( - x: _SupportsArray[tuple[int, int], np.dtype[_to_complex]] | Sequence[Sequence[_ComplexLike_co]], - /, - *, - offset: SupportsIndex = 0, - dtype: _DTypeLike[ScalarT], + x: _ToArrayComplex_2d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] ) -> ScalarT: ... @overload # 2d bool -def trace(x: Sequence[Sequence[bool]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... +def trace(x: _Sequence2D[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.bool: ... @overload # 2d int def trace(x: Sequence[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> np.int_: ... @overload # 2d float @@ -980,92 +938,52 @@ def trace[DTypeT: np.dtype[_to_complex]]( ) -> np.ndarray[tuple[int], DTypeT]: ... @overload # 3d, dtype= def trace[ScalarT: _to_complex]( - x: _SupportsArray[tuple[int, int, int], np.dtype[_to_complex]] | Sequence[Sequence[Sequence[_ComplexLike_co]]], - /, - *, - offset: SupportsIndex = 0, - dtype: _DTypeLike[ScalarT], -) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... + x: _ToArrayComplex_3d, /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] +) -> _Array1D[ScalarT]: ... @overload # 3d+ known dtype, dtype=None def trace[DTypeT: np.dtype[_to_complex]]( x: _SupportsArray[_AtLeast3D, DTypeT], /, *, offset: SupportsIndex = 0, dtype: None = None ) -> np.ndarray[tuple[int, *tuple[Any, ...]], DTypeT]: ... @overload # 3d+, dtype= def trace[ScalarT: _to_complex]( - x: _SupportsArray[_AtLeast3D, np.dtype[_to_complex]] | _NestedSequence[Sequence[Sequence[_ComplexLike_co]]], - /, - *, - offset: SupportsIndex = 0, - dtype: _DTypeLike[ScalarT], + x: _ArrayLike3ND[_to_complex] | _Sequence3ND[complex], /, *, offset: SupportsIndex = 0, dtype: _DTypeLike[ScalarT] ) -> np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[ScalarT]]: ... @overload # 3d+ bool -def trace( - x: _NestedSequence[Sequence[Sequence[bool]]], /, *, offset: SupportsIndex = 0, dtype: None = None -) -> NDArray[np.bool]: ... +def trace(x: _Sequence3ND[bool], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.bool]: ... @overload # 3d+ int -def trace( - x: _NestedSequence[Sequence[list[int]]], /, *, offset: SupportsIndex = 0, dtype: None = None -) -> NDArray[np.int_]: ... +def trace(x: _Sequence2ND[list[int]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.int_]: ... @overload # 3d+ float -def trace( - x: _NestedSequence[Sequence[list[float]]], /, *, offset: SupportsIndex = 0, dtype: None = None -) -> NDArray[np.float64]: ... +def trace(x: _Sequence2ND[list[float]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.float64]: ... @overload # 3d+ complex -def trace( - x: _NestedSequence[Sequence[list[complex]]], /, *, offset: SupportsIndex = 0, dtype: None = None -) -> NDArray[np.complex128]: ... +def trace(x: _Sequence2ND[list[complex]], /, *, offset: SupportsIndex = 0, dtype: None = None) -> NDArray[np.complex128]: ... @overload # fallback def trace(x: _ArrayLikeComplex_co, /, *, offset: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Any: ... # @overload # workaround for microsoft/pyright#10232 -def outer(x1: _ToArray1D[Never], x2: _ToArray1D[Never], /) -> np.ndarray[tuple[int, int]]: ... +def outer(x1: NDArray[Never], x2: NDArray[Never], /) -> _Array2D[Any]: ... @overload # +bool, +bool -def outer( - x1: _ToArray1D[np.bool] | Sequence[bool], x2: _ToArray1D[np.bool] | Sequence[bool], / -) -> np.ndarray[tuple[int, int], np.dtype[np.bool]]: ... +def outer(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> _Array2D[np.bool]: ... @overload # ~int64, +int64 -def outer( - x1: _ToArray1D[np.int64] | list[int], x2: _ToArray1D[_to_integer] | Sequence[int], / -) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +def outer(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> _Array2D[np.int64]: ... @overload # +int64, ~int64 -def outer( - x1: _ToArray1D[_to_integer] | Sequence[int], x2: _ToArray1D[np.int64] | list[int], / -) -> np.ndarray[tuple[int, int], np.dtype[np.int64]]: ... +def outer(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> _Array2D[np.int64]: ... @overload # ~timedelta64, +timedelta64 -def outer( - x1: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], x2: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], / -) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +def outer(x1: _ArrayLike1D[np.timedelta64], x2: _ArrayLike1D[_to_timedelta64], /) -> _Array2D[np.timedelta64]: ... @overload # +timedelta64, ~timedelta64 -def outer( - x1: _ToArray1D[_to_timedelta64] | Sequence[_to_timedelta64], x2: _ToArray1D[np.timedelta64] | Sequence[np.timedelta64], / -) -> np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]: ... +def outer(x1: _ArrayLike1D[_to_timedelta64], x2: _ArrayLike1D[np.timedelta64], /) -> _Array2D[np.timedelta64]: ... @overload # ~float64, +float64 -def outer( - x1: _ToArray1D[np.float64] | list[float], x2: _ToArray1D[_to_float64] | Sequence[float], / -) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +def outer(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> _Array2D[np.float64]: ... @overload # +float64, ~float64 -def outer( - x1: _ToArray1D[_to_float64] | Sequence[float], x2: _ToArray1D[np.float64] | list[float], / -) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +def outer(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> _Array2D[np.float64]: ... @overload # ~complex128, +complex128 -def outer( - x1: _ToArray1D[np.complex128] | list[complex], x2: _ToArray1D[_to_complex] | Sequence[complex], / -) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +def outer(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[np.complex128]: ... @overload # +complex128, ~complex128 -def outer( - x1: _ToArray1D[_to_complex] | Sequence[complex], x2: _ToArray1D[np.complex128] | list[complex], / -) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +def outer(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> _Array2D[np.complex128]: ... @overload # ~ScalarT, ~ScalarT -def outer[ScalarT: np.number | np.object_]( - x1: _ToArray1D[ScalarT] | Sequence[ScalarT], x2: _ToArray1D[ScalarT] | Sequence[ScalarT], / -) -> np.ndarray[tuple[int, int], np.dtype[ScalarT]]: ... +def outer[ScalarT: np.number | np.object_](x1: _ArrayLike1D[ScalarT], x2: _ArrayLike1D[ScalarT], /) -> _Array2D[ScalarT]: ... @overload # fallback -def outer( - x1: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], - x2: _ToArray1D[_to_complex] | Sequence[complex | _to_complex], - /, -) -> np.ndarray[tuple[int, int]]: ... +def outer(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[Any]: ... # TODO: narrow return types @overload @@ -1075,7 +993,7 @@ def cross( /, *, axis: int = -1, -) -> NDArray[unsignedinteger]: ... +) -> NDArray[np.unsignedinteger]: ... @overload def cross( x1: _ArrayLikeInt_co, @@ -1083,7 +1001,7 @@ def cross( /, *, axis: int = -1, -) -> NDArray[signedinteger]: ... +) -> NDArray[np.signedinteger]: ... @overload def cross( x1: _ArrayLikeFloat_co, @@ -1091,7 +1009,7 @@ def cross( /, *, axis: int = -1, -) -> NDArray[floating]: ... +) -> NDArray[np.floating]: ... @overload def cross( x1: _ArrayLikeComplex_co, @@ -1099,16 +1017,16 @@ def cross( /, *, axis: int = -1, -) -> NDArray[complexfloating]: ... +) -> NDArray[np.complexfloating]: ... # TODO: narrow return types @overload def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload -def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[np.unsignedinteger]: ... @overload -def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[np.signedinteger]: ... @overload -def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[np.floating]: ... @overload -def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[np.complexfloating]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 914dff2ab703..2d36b27b8859 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -12,6 +12,7 @@ from numpy.linalg._linalg import ( type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] bool_list_1d: list[bool] bool_list_2d: list[list[bool]] @@ -47,17 +48,17 @@ AR_b: npt.NDArray[np.bool] SC_f8: np.float64 AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] -AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] -AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] -AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_f8_1d: _Array1D[np.float64] +AR_f8_2d: _Array2D[np.float64] +AR_f8_3d: _Array3D[np.float64] AR_f8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.float64]] -AR_f2_2d: np.ndarray[tuple[int, int], np.dtype[np.float16]] -AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] -AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] -AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] -AR_f10_2d: np.ndarray[tuple[int, int], np.dtype[np.longdouble]] -AR_f10_3d: np.ndarray[tuple[int, int, int], np.dtype[np.longdouble]] +AR_f2_2d: _Array2D[np.float16] +AR_f4_1d: _Array1D[np.float32] +AR_f4_2d: _Array2D[np.float32] +AR_f4_3d: _Array3D[np.float32] +AR_f10_2d: _Array2D[np.longdouble] +AR_f10_3d: _Array3D[np.longdouble] ### @@ -184,8 +185,8 @@ assert_type(np.linalg.matrix_rank(AR_f8_1d), Literal[0, 1]) assert_type(np.linalg.matrix_rank(float_list_1d), Literal[0, 1]) assert_type(np.linalg.matrix_rank(AR_f8_2d), np.int_) assert_type(np.linalg.matrix_rank(float_list_2d), np.int_) -assert_type(np.linalg.matrix_rank(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.linalg.matrix_rank(float_list_3d), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.linalg.matrix_rank(AR_f8_3d), _Array1D[np.int_]) +assert_type(np.linalg.matrix_rank(float_list_3d), _Array1D[np.int_]) assert_type(np.linalg.matrix_rank(AR_f8_4d), npt.NDArray[np.int_]) assert_type(np.linalg.matrix_rank(float_list_4d), npt.NDArray[np.int_]) @@ -364,8 +365,8 @@ assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) # Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] assert_type(np.linalg.diagonal(AR_f4), npt.NDArray[np.float32]) -assert_type(np.linalg.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.linalg.diagonal(AR_f8_2d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.diagonal(AR_f4_2d), _Array1D[np.float32]) +assert_type(np.linalg.diagonal(AR_f8_2d), _Array1D[np.float64]) assert_type(np.linalg.diagonal(bool_list_2d), npt.NDArray[np.bool]) assert_type(np.linalg.diagonal(int_list_2d), npt.NDArray[np.int_]) assert_type(np.linalg.diagonal(float_list_2d), npt.NDArray[np.float64]) @@ -377,8 +378,8 @@ assert_type(np.linalg.trace(AR_any), Any) assert_type(np.linalg.trace(AR_f4), Any) assert_type(np.linalg.trace(AR_f4_2d), np.float32) assert_type(np.linalg.trace(AR_f8_2d), np.float64) -assert_type(np.linalg.trace(AR_f4_3d), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.linalg.trace(AR_f8_3d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.linalg.trace(AR_f4_3d), _Array1D[np.float32]) +assert_type(np.linalg.trace(AR_f8_3d), _Array1D[np.float64]) assert_type(np.linalg.trace(AR_f8_4d), np.ndarray[tuple[int, *tuple[Any, ...]], np.dtype[np.float64]]) assert_type(np.linalg.trace(bool_list_2d), np.bool) assert_type(np.linalg.trace(int_list_2d), np.int_) @@ -386,16 +387,16 @@ assert_type(np.linalg.trace(float_list_2d), np.float64) assert_type(np.linalg.trace(complex_list_2d), np.complex128) assert_type(np.linalg.trace(float_list_3d), npt.NDArray[np.float64]) -assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), np.ndarray[tuple[int, int], np.dtype[np.bool]]) -assert_type(np.linalg.outer(int_list_1d, int_list_1d), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(np.linalg.outer(float_list_1d, float_list_1d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) -assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) -assert_type(np.linalg.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(np.linalg.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) -assert_type(np.linalg.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) -assert_type(np.linalg.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) -assert_type(np.linalg.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) -assert_type(np.linalg.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.linalg.outer(bool_list_1d, bool_list_1d), _Array2D[np.bool]) +assert_type(np.linalg.outer(int_list_1d, int_list_1d), _Array2D[np.int64]) +assert_type(np.linalg.outer(float_list_1d, float_list_1d), _Array2D[np.float64]) +assert_type(np.linalg.outer(complex_list_1d, complex_list_1d), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_i8, AR_i8), _Array2D[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), _Array2D[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), _Array2D[np.complex128]) +assert_type(np.linalg.outer(AR_b, AR_b), _Array2D[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), _Array2D[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), _Array2D[np.timedelta64]) assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) From abdb7bf5e7c61cc715f6b85bebb44a41f6078078 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 00:48:08 +0100 Subject: [PATCH 1043/1718] DEP: expire deprecations in ``lib._shape_base_impl`` (#30463) * DEP: remove ``lib._shape_base_impl.get_array_wrap`` * DEP: remove ``row_stack`` * MAINT: Update 30463.expired.rst --------- Co-authored-by: Charles Harris --- .../upcoming_changes/30463.expired.rst | 2 + numpy/__init__.py | 1 - numpy/__init__.pyi | 5 +- numpy/_core/tests/test_deprecations.py | 6 +-- numpy/lib/_shape_base_impl.py | 46 +------------------ numpy/lib/_shape_base_impl.pyi | 18 -------- numpy/matlib.pyi | 1 - tools/functions_missing_types.py | 1 - 8 files changed, 7 insertions(+), 73 deletions(-) create mode 100644 doc/release/upcoming_changes/30463.expired.rst diff --git a/doc/release/upcoming_changes/30463.expired.rst b/doc/release/upcoming_changes/30463.expired.rst new file mode 100644 index 000000000000..232448966104 --- /dev/null +++ b/doc/release/upcoming_changes/30463.expired.rst @@ -0,0 +1,2 @@ +* ``numpy.row_stack`` has been removed in favor of ``numpy.vstack`` (deprecated since 2.0). +* ``get_array_wrap`` has been removed (deprecated since 2.0). diff --git a/numpy/__init__.py b/numpy/__init__.py index 5012decc43ab..668f203d4b0d 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -575,7 +575,6 @@ hsplit, kron, put_along_axis, - row_stack, split, take_along_axis, tile, diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b344eb7613ad..ddd91cb191c6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -509,9 +509,8 @@ from numpy.lib._polynomial_impl import ( polyfit, ) -from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] +from numpy.lib._shape_base_impl import ( column_stack, - row_stack, dstack, array_split, split, @@ -670,7 +669,7 @@ __all__ = [ # noqa: RUF022 # lib._shape_base_impl.__all__ "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", - "take_along_axis", "put_along_axis", "row_stack", + "take_along_axis", "put_along_axis", # lib._type_check_impl.__all__ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 1b93b57e90f3..669014d754ef 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -274,9 +274,8 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy import row_stack + from numpy.lib._npyio_impl import recfromcsv, recfromtxt - from numpy.lib._shape_base_impl import get_array_wrap from numpy.lib._utils_impl import safe_eval from numpy.lib.tests.test_io import TextIO @@ -287,9 +286,6 @@ def test_lib_functions_deprecation_call(self): self.assert_deprecated(lambda: recfromcsv(data_gen())) self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(get_array_wrap) - - self.assert_deprecated(lambda: row_stack([[]])) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index c9e0fd316e04..454d3f5d2c26 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -1,9 +1,8 @@ import functools -import warnings import numpy as np import numpy._core.numeric as _nx -from numpy._core import atleast_3d, overrides, vstack +from numpy._core import atleast_3d, overrides from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import reshape, transpose from numpy._core.multiarray import normalize_axis_index @@ -15,13 +14,12 @@ zeros, zeros_like, ) -from numpy._core.overrides import set_module from numpy._core.shape_base import _arrays_for_stack_dispatcher from numpy.lib._index_tricks_impl import ndindex from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells __all__ = [ - 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'column_stack', 'dstack', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', 'apply_along_axis', 'kron', 'tile', 'take_along_axis', 'put_along_axis' @@ -602,22 +600,6 @@ def expand_dims(a, axis): return a.reshape(shape) -# NOTE: Remove once deprecation period passes -@set_module("numpy") -def row_stack(tup, *, dtype=None, casting="same_kind"): - # Deprecated in NumPy 2.0, 2023-08-18 - warnings.warn( - "`row_stack` alias is deprecated. " - "Use `np.vstack` directly.", - DeprecationWarning, - stacklevel=2 - ) - return vstack(tup, dtype=dtype, casting=casting) - - -row_stack.__doc__ = vstack.__doc__ - - def _column_stack_dispatcher(tup): return _arrays_for_stack_dispatcher(tup) @@ -1049,30 +1031,6 @@ def dsplit(ary, indices_or_sections): return split(ary, indices_or_sections, 2) -def get_array_wrap(*args): - """Find the wrapper for the array with the highest priority. - - In case of ties, leftmost wins. If no wrapper is found, return None. - - .. deprecated:: 2.0 - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`get_array_wrap` is deprecated. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - wrappers = sorted((getattr(x, '__array_priority__', 0), -i, - x.__array_wrap__) for i, x in enumerate(args) - if hasattr(x, '__array_wrap__')) - if wrappers: - return wrappers[-1][-1] - return None - - def _kron_dispatcher(a, b): return (a, b) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 8037a01ac998..d65855741f89 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,10 +1,8 @@ from collections.abc import Callable, Sequence from typing import Any, Concatenate, Protocol, SupportsIndex, overload, type_check_only -from typing_extensions import deprecated import numpy as np from numpy import ( - _CastingKind, complexfloating, floating, integer, @@ -15,7 +13,6 @@ from numpy import ( ) from numpy._typing import ( ArrayLike, - DTypeLike, NDArray, _ArrayLike, _ArrayLikeBool_co, @@ -29,7 +26,6 @@ from numpy._typing import ( __all__ = [ "column_stack", - "row_stack", "dstack", "array_split", "split", @@ -104,15 +100,6 @@ def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) - @overload def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]: ... -# Deprecated in NumPy 2.0, 2023-08-18 -@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") -def row_stack( - tup: Sequence[ArrayLike], - *, - dtype: DTypeLike | None = None, - casting: _CastingKind = "same_kind", -) -> NDArray[Any]: ... - # keep in sync with `numpy.ma.extras.column_stack` @overload def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @@ -185,11 +172,6 @@ def dsplit( indices_or_sections: _ShapeLike, ) -> list[NDArray[Any]]: ... -@overload -def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... -@overload -def get_array_wrap(*args: object) -> _ArrayWrap | None: ... - @overload def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index f446dbf1c4b9..3eee0f441e8d 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -396,7 +396,6 @@ from numpy import ( # type: ignore[deprecated] # noqa: F401 roots, rot90, round, - row_stack, s_, save, savetxt, diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index 8149a0106575..33c5d2381fcb 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -33,7 +33,6 @@ # Accidentally public, deprecated, or shouldn't be used "Tester", "_core", - "get_array_wrap", "int_asbuffer", "numarray", "oldnumeric", From 627f537b35cd7a5cae6b9cb64db15e12a038eec6 Mon Sep 17 00:00:00 2001 From: janosh Date: Wed, 17 Dec 2025 21:17:19 -0800 Subject: [PATCH 1044/1718] TYP: preserve type for split functions via structural Protocol Add overloads for array_split, split, hsplit, vsplit, and dsplit that preserve the input type when the input implements _SupportsSplitOps. The Protocol requires: - shape: tuple[int, ...] - for ary.shape[axis] access - ndim: int - for dimensional checks in hsplit/vsplit/dsplit - swapaxes(axis1, axis2) -> Self - for axis manipulation - __getitem__(key) -> Self - for slicing Uses PEP 695 syntax and adds tests with a duck-typed SplitableArray class that implements the protocol. Closes #23005 --- numpy/lib/_shape_base_impl.pyi | 50 ++++++++++++++++++- numpy/typing/tests/data/reveal/shape_base.pyi | 16 +++++- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index d65855741f89..4a32b5e656ea 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,5 +1,13 @@ from collections.abc import Callable, Sequence -from typing import Any, Concatenate, Protocol, SupportsIndex, overload, type_check_only +from typing import ( + Any, + Concatenate, + Protocol, + Self, + SupportsIndex, + overload, + type_check_only, +) import numpy as np from numpy import ( @@ -57,6 +65,19 @@ class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... +# Protocol for array-like objects that preserve their type through split operations. +# Requires shape (with __getitem__ for axis access), ndim for dimensional checks +# in hsplit/vsplit/dsplit, swapaxes for axis manipulation, and __getitem__ for +# slicing. Examples: pandas DataFrame, xarray DataArray. +@type_check_only +class _SupportsSplitOps(Protocol): + @property + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + ### def take_along_axis[ScalarT: np.generic]( @@ -112,6 +133,12 @@ def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[S @overload def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +@overload +def array_split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[SplitableT]: ... @overload def array_split[ScalarT: np.generic]( ary: _ArrayLike[ScalarT], @@ -125,6 +152,12 @@ def array_split( axis: SupportsIndex = 0, ) -> list[NDArray[Any]]: ... +@overload +def split[SplitableT: _SupportsSplitOps]( + ary: SplitableT, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[SplitableT]: ... @overload def split[ScalarT: np.generic]( ary: _ArrayLike[ScalarT], @@ -140,6 +173,11 @@ def split( # keep in sync with `numpy.ma.extras.hsplit` @overload +def hsplit[SplitableT: _SupportsSplitOps]( + ary: SplitableT, + indices_or_sections: _ShapeLike, +) -> list[SplitableT]: ... +@overload def hsplit[ScalarT: np.generic]( ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike, @@ -150,6 +188,11 @@ def hsplit( indices_or_sections: _ShapeLike, ) -> list[NDArray[Any]]: ... +@overload +def vsplit[SplitableT: _SupportsSplitOps]( + ary: SplitableT, + indices_or_sections: _ShapeLike, +) -> list[SplitableT]: ... @overload def vsplit[ScalarT: np.generic]( ary: _ArrayLike[ScalarT], @@ -161,6 +204,11 @@ def vsplit( indices_or_sections: _ShapeLike, ) -> list[NDArray[Any]]: ... +@overload +def dsplit[SplitableT: _SupportsSplitOps]( + ary: SplitableT, + indices_or_sections: _ShapeLike, +) -> list[SplitableT]: ... @overload def dsplit[ScalarT: np.generic]( ary: _ArrayLike[ScalarT], diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index e409a53bcef9..c9171804ab26 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, assert_type +from typing import Any, Self, SupportsIndex, assert_type import numpy as np import numpy.typing as npt @@ -12,6 +12,15 @@ AR_f8: npt.NDArray[np.float64] AR_LIKE_f8: list[float] +# Duck-typed class implementing _SupportsSplitOps protocol for testing +class _SplitableArray: + shape: tuple[int, ...] + ndim: int + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + def __getitem__(self, key: Any, /) -> Self: ... + +splitable: _SplitableArray + assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) @@ -28,18 +37,23 @@ assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.array_split(splitable, 2), list[_SplitableArray]) assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.split(splitable, 2), list[_SplitableArray]) assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.hsplit(splitable, 2), list[_SplitableArray]) assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.vsplit(splitable, 2), list[_SplitableArray]) assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) +assert_type(np.dsplit(splitable, 2), list[_SplitableArray]) assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) From 1993653ca6162b8b7bca2b13cf84a8c2bfce3e49 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Thu, 18 Dec 2025 12:34:44 +0000 Subject: [PATCH 1045/1718] DOC: pixi-packages: fix README typo --- pixi-packages/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pixi-packages/README.md b/pixi-packages/README.md index 202635cffa1f..9f1fed5fdb2c 100644 --- a/pixi-packages/README.md +++ b/pixi-packages/README.md @@ -1,4 +1,4 @@ -# CPython Pixi packages +# NumPy Pixi packages This directory contains definitions for [Pixi packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) which can be built from the NumPy source code. From 73b9feca8321f45dd420803ab1d2f8dd2627557f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 18:26:54 +0100 Subject: [PATCH 1046/1718] DEP: remove ``lib._npyio_impl.recfrom{txt,csv}`` (#30467) --- .../upcoming_changes/30467.expired.rst | 1 + numpy/_core/tests/test_deprecations.py | 9 -- numpy/lib/_npyio_impl.py | 96 ---------------- numpy/lib/_npyio_impl.pyi | 11 -- numpy/lib/tests/test_io.py | 107 +----------------- 5 files changed, 6 insertions(+), 218 deletions(-) create mode 100644 doc/release/upcoming_changes/30467.expired.rst diff --git a/doc/release/upcoming_changes/30467.expired.rst b/doc/release/upcoming_changes/30467.expired.rst new file mode 100644 index 000000000000..3474787b2e1f --- /dev/null +++ b/doc/release/upcoming_changes/30467.expired.rst @@ -0,0 +1 @@ +* ``recfromtxt`` and ``recfromcsv`` have been removed from ``numpy.lib._npyio`` in favor of ``numpy.genfromtxt`` (deprecated since 2.0). diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 669014d754ef..2560719d7688 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -274,18 +274,9 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - - from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._utils_impl import safe_eval - from numpy.lib.tests.test_io import TextIO self.assert_deprecated(lambda: safe_eval("None")) - - data_gen = lambda: TextIO('A,B\n0,1\n2,3') - kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} - self.assert_deprecated(lambda: recfromcsv(data_gen())) - self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) - self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0e135917cd52..5467b51de6c2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -2485,99 +2485,3 @@ def encode_unicode_cols(row_tup): _genfromtxt_with_like = array_function_dispatch()(genfromtxt) - - -def recfromtxt(fname, **kwargs): - """ - Load ASCII data from a file and return it in a record array. - - If ``usemask=False`` a standard `recarray` is returned, - if ``usemask=True`` a MaskedRecords array is returned. - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromtxt` is deprecated, " - "use `numpy.genfromtxt` instead." - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - kwargs.setdefault("dtype", None) - usemask = kwargs.get('usemask', False) - output = genfromtxt(fname, **kwargs) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output - - -def recfromcsv(fname, **kwargs): - """ - Load ASCII data stored in a comma-separated file. - - The returned array is a record array (if ``usemask=False``, see - `recarray`) or a masked record array (if ``usemask=True``, - see `ma.mrecords.MaskedRecords`). - - .. deprecated:: 2.0 - Use `numpy.genfromtxt` with comma as `delimiter` instead. - - Parameters - ---------- - fname, kwargs : For a description of input parameters, see `genfromtxt`. - - See Also - -------- - numpy.genfromtxt : generic function to load ASCII data. - - Notes - ----- - By default, `dtype` is None, which means that the data-type of the output - array will be determined from the data. - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`recfromcsv` is deprecated, " - "use `numpy.genfromtxt` with comma as `delimiter` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Set default kwargs for genfromtxt as relevant to csv import. - kwargs.setdefault("case_sensitive", "lower") - kwargs.setdefault("names", True) - kwargs.setdefault("delimiter", ",") - kwargs.setdefault("dtype", None) - output = genfromtxt(fname, **kwargs) - - usemask = kwargs.get("usemask", False) - if usemask: - from numpy.ma.mrecords import MaskedRecords - output = output.view(MaskedRecords) - else: - output = output.view(np.recarray) - return output diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index f13ee2e7e967..10be679f9c74 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -25,7 +25,6 @@ from typing_extensions import TypeVar import numpy as np from numpy._core.multiarray import packbits, unpackbits from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc -from numpy.ma.mrecords import MaskedRecords from ._datasource import DataSource as DataSource @@ -283,13 +282,3 @@ def genfromtxt( ndmin: L[0, 1, 2] = 0, like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... - -@overload -def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... -@overload -def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... - -@overload -def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... -@overload -def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 4051e203dacf..03b585443710 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -23,7 +23,6 @@ from numpy.exceptions import VisibleDeprecationWarning from numpy.lib import _npyio_impl from numpy.lib._iotools import ConversionWarning, ConverterError -from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.ma.testutils import assert_equal from numpy.testing import ( HAS_REFCOUNT, @@ -1682,21 +1681,20 @@ def test_dtype_with_converters(self): control = np.array([2009., 23., 46],) assert_equal(test, control) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") def test_dtype_with_converters_and_usecols(self): dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - names=None, converters=conv, encoding="bytes") + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) assert_equal(test, control) dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] - test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', - usecols=(0, 1, 3), names=None, converters=conv, - encoding="bytes") + test = np.genfromtxt(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) assert_equal(test, control) @@ -2333,69 +2331,6 @@ def test_utf8_file_nodtype_unicode(self): dtype=np.str_) assert_array_equal(test, ctl) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} - test = recfromtxt(data, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromtxt(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self): - # - data = TextIO('A,B\n0,1\n2,3') - kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True, - "encoding": "bytes"} - test = recfromcsv(data, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,N/A') - test = recfromcsv(data, dtype=None, usemask=True, **kwargs) - control = ma.array([(0, 1), (2, -1)], - mask=[(False, False), (False, True)], - dtype=[('A', int), ('B', int)]) - assert_equal(test, control) - assert_equal(test.mask, control.mask) - assert_equal(test.A, [0, 2]) - # - data = TextIO('A,B\n0,1\n2,3') - test = recfromcsv(data, missing_values='N/A',) - control = np.array([(0, 1), (2, 3)], - dtype=[('a', int), ('b', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - # - data = TextIO('A,B\n0,1\n2,3') - dtype = [('a', int), ('b', float)] - test = recfromcsv(data, missing_values='N/A', dtype=dtype) - control = np.array([(0, 1), (2, 3)], - dtype=dtype) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - # gh-10394 - data = TextIO('color\n"red"\n"blue"') - test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) - control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) - assert_equal(test.dtype, control.dtype) - assert_equal(test, control) - def test_max_rows(self): # Test the `max_rows` keyword argument. data = '1 2\n3 4\n5 6\n7 8\n9 10\n' @@ -2654,38 +2589,6 @@ def test_genfromtxt(self, filename_type): data = np.genfromtxt(path) assert_array_equal(a, data) - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") - def test_recfromtxt(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} - test = recfromtxt(path, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - - @pytest.mark.parametrize("filename_type", [Path, str]) - @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") - def test_recfromcsv(self, filename_type): - with temppath(suffix='.txt') as path: - path = filename_type(path) - with open(path, 'w') as f: - f.write('A,B\n0,1\n2,3') - - kwargs = { - "missing_values": "N/A", "names": True, "case_sensitive": True - } - test = recfromcsv(path, dtype=None, **kwargs) - control = np.array([(0, 1), (2, 3)], - dtype=[('A', int), ('B', int)]) - assert_(isinstance(test, np.recarray)) - assert_equal(test, control) - def test_gzip_load(): a = np.random.random((5, 5)) From 71c08c9caa24b58cd13cf9687deae4900130167e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 18:34:03 +0100 Subject: [PATCH 1047/1718] DEP: expire deprecation for ``finfo(None)`` (#30460) * DEP: expire deprecation for ``finfo(None)`` * DEP: remove ``finfo(None)`` deprecation test * DEP/TST: ensure that `finfo(None)` raises * MAINT: `finfo`: check `None` before cache lookup --- doc/release/upcoming_changes/30460.expired.rst | 1 + numpy/_core/getlimits.py | 13 +++---------- numpy/_core/tests/test_deprecations.py | 6 ------ numpy/_core/tests/test_getlimits.py | 3 +++ 4 files changed, 7 insertions(+), 16 deletions(-) create mode 100644 doc/release/upcoming_changes/30460.expired.rst diff --git a/doc/release/upcoming_changes/30460.expired.rst b/doc/release/upcoming_changes/30460.expired.rst new file mode 100644 index 000000000000..5fb6bf470866 --- /dev/null +++ b/doc/release/upcoming_changes/30460.expired.rst @@ -0,0 +1 @@ +* Passing ``None`` as dtype to ``np.finfo`` will now raise a ``TypeError`` (deprecated since 1.25) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 3c03d81165fb..e821048c5da1 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -5,7 +5,6 @@ import math import types -import warnings from functools import cached_property from numpy._utils import set_module @@ -176,6 +175,9 @@ class finfo: __class_getitem__ = classmethod(types.GenericAlias) def __new__(cls, dtype): + if dtype is None: + raise TypeError("dtype must not be None") + try: obj = cls._finfo_cache.get(dtype) # most common path if obj is not None: @@ -183,15 +185,6 @@ def __new__(cls, dtype): except TypeError: pass - if dtype is None: - # Deprecated in NumPy 1.25, 2023-01-16 - warnings.warn( - "finfo() dtype cannot be None. This behavior will " - "raise an error in the future. (Deprecated in NumPy 1.25)", - DeprecationWarning, - stacklevel=2 - ) - try: dtype = numeric.dtype(dtype) except TypeError: diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2560719d7688..22d58c8ada96 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -260,12 +260,6 @@ def test_attributeerror_includes_info(self, name): getattr(np, name) -class TestDeprecatedFinfo(_DeprecationTestCase): - # Deprecated in NumPy 1.25, 2023-01-16 - def test_deprecated_none(self): - self.assert_deprecated(np.finfo, args=(None,)) - - class TestMathAlias(_DeprecationTestCase): def test_deprecated_np_lib_math(self): self.assert_deprecated(lambda: np.lib.math) diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 4e911b89e89f..786fd1d494e4 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -83,6 +83,9 @@ class NonHashableWithDtype: x = NonHashableWithDtype() assert np.finfo(x) == np.finfo(x.dtype) + def test_no_none_sense(self): + assert_raises(TypeError, finfo, None) + class TestIinfo: def test_basic(self): From 0cf86f726d7288702866b3ca2af2b5d8ac128292 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 18:36:34 +0100 Subject: [PATCH 1048/1718] TYP: ``linalg.cross``: improved dtype specialization (#30466) --- numpy/linalg/_linalg.pyi | 101 +++++++++++++++++----- numpy/typing/tests/data/reveal/linalg.pyi | 16 +++- 2 files changed, 93 insertions(+), 24 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 6a10786753c4..dd508514cf52 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -98,9 +98,14 @@ type _Sequence3D[T] = Sequence[_Sequence2D[T]] type _Sequence2ND[T] = _NestedSequence[Sequence[T]] type _Sequence3ND[T] = _NestedSequence[_Sequence2D[T]] type _Sequence4ND[T] = _NestedSequence[_Sequence3D[T]] +type _Sequence0D1D[T] = T | Sequence[T] +type _Sequence1D2D[T] = Sequence[T] | _Sequence2D[T] type _ArrayLike1D[ScalarT: np.generic] = _SupportsArray[tuple[int], np.dtype[ScalarT]] | Sequence[ScalarT] # ==1d type _ArrayLike2D[ScalarT: np.generic] = _SupportsArray[tuple[int, int], np.dtype[ScalarT]] | _Sequence2D[ScalarT] # ==2d +type _ArrayLike1D2D[ScalarT: np.generic] = ( # 1d or 2d + _SupportsArray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] | _Sequence1D2D[ScalarT] +) type _ArrayLike3D[ScalarT: np.generic] = _SupportsArray[tuple[int, int, int], np.dtype[ScalarT]] | _Sequence3D[ScalarT] # ==3d type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _Sequence2ND[ScalarT] # >=2d type _ArrayLike3ND[ScalarT: np.generic] = _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] | _Sequence3ND[ScalarT] # >=3d @@ -119,6 +124,7 @@ type _ToArrayComplex_1d = _ArrayLike1D[_to_complex] | Sequence[complex] type _ToArrayComplex_2d = _ArrayLike2D[_to_complex] | _Sequence2D[complex] type _ToArrayComplex_3d = _ArrayLike3D[_to_complex] | _Sequence3D[complex] # the invariant `list` type avoids overlap with bool, int, etc +type _AsArrayI64 = _ArrayLike[np.int64] | list[int] | _NestedSequence[list[int]] type _AsArrayI64_1d = _ArrayLike1D[np.int64] | list[int] type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayF64_1d = _ArrayLike1D[np.float64] | list[float] @@ -985,39 +991,92 @@ def outer[ScalarT: np.number | np.object_](x1: _ArrayLike1D[ScalarT], x2: _Array @overload # fallback def outer(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[Any]: ... -# TODO: narrow return types -@overload +# +@overload # ~T, ~T (we use constraints instead of a `: np.number` bound to prevent joins/unions) +def cross[ + AnyScalarT: ( # int64, float64, and complex128 are handled separately + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64, + np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, + ), +]( + x1: _ArrayLike1D2D[AnyScalarT], + x2: _ArrayLike1D2D[AnyScalarT], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[AnyScalarT]: ... # fmt: skip +@overload # ~int64, +int64 def cross( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, + x1: _ArrayLike1D2D[np.int64] | _Sequence1D2D[int], + x2: _ArrayLike1D2D[np.integer] | _Sequence1D2D[int], /, *, - axis: int = -1, -) -> NDArray[np.unsignedinteger]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # +int64, ~int64 def cross( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, + x1: _ArrayLike1D2D[np.integer], + x2: _ArrayLike1D2D[np.int64], /, *, - axis: int = -1, -) -> NDArray[np.signedinteger]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.int64]: ... +@overload # ~float64, +float64 def cross( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, + x1: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], + x2: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], /, *, - axis: int = -1, -) -> NDArray[np.floating]: ... -@overload + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # +float64, ~float64 def cross( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, + x1: _ArrayLike1D2D[np.floating | np.integer] | _Sequence1D2D[float], + x2: _ArrayLike1D2D[np.float64] | _Sequence0D1D[list[float]], /, *, - axis: int = -1, -) -> NDArray[np.complexfloating]: ... + axis: SupportsIndex = -1, +) -> NDArray[np.float64]: ... +@overload # ~complex128, +complex128 +def cross( + x1: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + x2: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # +complex128, ~complex128 +def cross( + x1: _ArrayLike1D2D[np.number] | _Sequence1D2D[complex], + x2: _ArrayLike1D2D[np.complex128] | _Sequence0D1D[list[complex]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128]: ... +@overload # ~object_, +object_ +def cross( + x1: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + x2: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # +object_, ~object_ +def cross( + x1: _ArrayLike1D2D[np.number | np.object_] | _Sequence1D2D[complex], + x2: _SupportsArray[tuple[int] | tuple[int, int], np.dtype[np.object_]], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[np.object_]: ... +@overload # fallback +def cross[ScalarT: np.number]( + x1: _ArrayLike1D2D[ScalarT], + x2: _ArrayLike1D2D[ScalarT], + /, + *, + axis: SupportsIndex = -1, +) -> NDArray[ScalarT]: ... # TODO: narrow return types @overload diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 2d36b27b8859..607e18047b81 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -398,9 +398,19 @@ assert_type(np.linalg.outer(AR_b, AR_b), _Array2D[np.bool]) assert_type(np.linalg.outer(AR_O, AR_O), _Array2D[np.object_]) assert_type(np.linalg.outer(AR_i8, AR_m), _Array2D[np.timedelta64]) -assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) -assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) -assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.linalg.cross(int_list_1d, int_list_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(float_list_1d, int_list_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(float_list_1d, complex_list_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.cross(AR_f8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_f2, AR_f2), npt.NDArray[np.float16]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.cross(AR_c16, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_f8), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.cross(AR_O, AR_f8), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_f8, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.cross(AR_O, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) From 11b4586e90e00c78a35e3940e9392fea8ce1efbb Mon Sep 17 00:00:00 2001 From: janosh Date: Thu, 18 Dec 2025 10:21:41 -0800 Subject: [PATCH 1049/1718] use int instead of SupportsIndex for swapaxes params As suggested in review: axis parameters are in contravariant position, so using int is more general and allows implementations that accept int or broader types. Also: clean up comment, fix import formatting. --- numpy/lib/_shape_base_impl.pyi | 7 +++---- numpy/typing/tests/data/reveal/shape_base.pyi | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 4a32b5e656ea..280f70a508cb 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -66,16 +66,15 @@ class _SupportsArrayWrap(Protocol): def __array_wrap__(self) -> _ArrayWrap: ... # Protocol for array-like objects that preserve their type through split operations. -# Requires shape (with __getitem__ for axis access), ndim for dimensional checks -# in hsplit/vsplit/dsplit, swapaxes for axis manipulation, and __getitem__ for -# slicing. Examples: pandas DataFrame, xarray DataArray. +# Requires shape for size, ndim for dimensional checks in hsplit/vsplit/dsplit, +# swapaxes for axis manipulation, and __getitem__ for slicing. @type_check_only class _SupportsSplitOps(Protocol): @property def shape(self) -> tuple[int, ...]: ... @property def ndim(self) -> int: ... - def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... def __getitem__(self, key: Any, /) -> Self: ... ### diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index c9171804ab26..ce033e97d070 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,4 +1,4 @@ -from typing import Any, Self, SupportsIndex, assert_type +from typing import Any, Self, assert_type import numpy as np import numpy.typing as npt @@ -16,7 +16,7 @@ AR_LIKE_f8: list[float] class _SplitableArray: shape: tuple[int, ...] ndim: int - def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... def __getitem__(self, key: Any, /) -> Self: ... splitable: _SplitableArray From 7dad5a13b405b9897103255951e1c967a4516ebe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:12:45 -0700 Subject: [PATCH 1050/1718] MAINT: Bump prefix-dev/setup-pixi from 0.8.1 to 0.9.3 (#30473) Bumps [prefix-dev/setup-pixi](https://github.com/prefix-dev/setup-pixi) from 0.8.1 to 0.9.3. - [Release notes](https://github.com/prefix-dev/setup-pixi/releases) - [Commits](https://github.com/prefix-dev/setup-pixi/compare/ba3bb36eb2066252b2363392b7739741bb777659...82d477f15f3a381dbcc8adc1206ce643fe110fb7) --- updated-dependencies: - dependency-name: prefix-dev/setup-pixi dependency-version: 0.9.3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pixi-packages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index 7c4c704282fd..400af28084e9 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: prefix-dev/setup-pixi@ba3bb36eb2066252b2363392b7739741bb777659 # v0.8.1 + - uses: prefix-dev/setup-pixi@82d477f15f3a381dbcc8adc1206ce643fe110fb7 # v0.9.3 with: pixi-version: v0.60.0 run-install: false From 00f111d500a2e650a0baacea7638425d91d94637 Mon Sep 17 00:00:00 2001 From: Vineet Kumar <108144301+whyvineet@users.noreply.github.com> Date: Fri, 19 Dec 2025 00:43:29 +0530 Subject: [PATCH 1051/1718] DEP: remove unused configuration files for npymath and mlib (#30472) --- numpy/_core/meson.build | 27 --------------------------- numpy/_core/mlib.ini.in | 12 ------------ numpy/_core/npymath.ini.in | 20 -------------------- 3 files changed, 59 deletions(-) delete mode 100644 numpy/_core/mlib.ini.in delete mode 100644 numpy/_core/npymath.ini.in diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index aa4da9c11146..0b3626d91965 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -605,33 +605,6 @@ npymath_lib = static_library('npymath', gnu_symbol_visibility: 'hidden', ) -dir_separator = '/' -if build_machine.system() == 'windows' - dir_separator = '\\' -endif -configure_file( - input: 'npymath.ini.in', - output: 'npymath.ini', - configuration: configuration_data({ - 'pkgname' : 'numpy._core', - 'sep' : dir_separator, - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) -configure_file( - input: 'mlib.ini.in', - output: 'mlib.ini', - configuration: configuration_data({ - 'posix_mathlib' : mlib_linkflag, - 'msvc_mathlib' : 'm.lib', - }), - install: true, - install_dir: np_dir / '_core/lib/npy-pkg-config', - install_tag: 'devel' -) - if false # This doesn't quite work (yet), it assumes we'll install headers under # include/, and trying to add the correct path with `extra_cflags` runs into diff --git a/numpy/_core/mlib.ini.in b/numpy/_core/mlib.ini.in deleted file mode 100644 index badaa2ae9de4..000000000000 --- a/numpy/_core/mlib.ini.in +++ /dev/null @@ -1,12 +0,0 @@ -[meta] -Name = mlib -Description = Math library used with this version of numpy -Version = 1.0 - -[default] -Libs=@posix_mathlib@ -Cflags= - -[msvc] -Libs=@msvc_mathlib@ -Cflags= diff --git a/numpy/_core/npymath.ini.in b/numpy/_core/npymath.ini.in deleted file mode 100644 index a233b8f3bfa9..000000000000 --- a/numpy/_core/npymath.ini.in +++ /dev/null @@ -1,20 +0,0 @@ -[meta] -Name=npymath -Description=Portable, core math library implementing C99 standard -Version=0.1 - -[variables] -pkgname=@pkgname@ -prefix=${pkgdir} -libdir=${prefix}@sep@lib -includedir=${prefix}@sep@include - -[default] -Libs=-L${libdir} -lnpymath -Cflags=-I${includedir} -Requires=mlib - -[msvc] -Libs=/LIBPATH:${libdir} npymath.lib -Cflags=/INCLUDE:${includedir} -Requires=mlib From d366070c5e4d79fb1505340b89fb9cec33e6e860 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 20:15:38 +0100 Subject: [PATCH 1052/1718] DEP: expire deprecations in ``lib._utils_impl`` (#30464) * DEP: remove `lib._utils_impl.deprecate[_with_doc]` * DEP: remove `lib._utils_impl.safe_eval` --- numpy/_core/tests/test_deprecations.py | 3 - numpy/_expired_attrs_2_0.py | 4 +- numpy/lib/_utils_impl.py | 243 ------------------------- tools/functions_missing_types.py | 1 - 4 files changed, 2 insertions(+), 249 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 22d58c8ada96..2dd14d9e00de 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -268,9 +268,6 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy.lib._utils_impl import safe_eval - - self.assert_deprecated(lambda: safe_eval("None")) self.assert_deprecated(lambda: np.chararray) diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 2eebf95bc558..be9a84a3a310 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -58,9 +58,9 @@ "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", "recfromtxt": "Use `np.genfromtxt` instead.", "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " - "or use `typing.deprecated`.", + "or use `warnings.deprecated`.", "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " - "directly, or use `typing.deprecated`.", + "directly, or use `warnings.deprecated`.", "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 164aa4ee3d8c..6531d9631145 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -1,10 +1,7 @@ -import functools import os import platform import sys -import textwrap import types -import warnings import numpy as np from numpy._core import ndarray @@ -123,73 +120,6 @@ def get_include(): return d -class _Deprecate: - """ - Decorator class to deprecate old functions. - - Refer to `deprecate` for details. - - See Also - -------- - deprecate - - """ - - def __init__(self, old_name=None, new_name=None, message=None): - self.old_name = old_name - self.new_name = new_name - self.message = message - - def __call__(self, func, *args, **kwargs): - """ - Decorator call. Refer to ``decorate``. - - """ - old_name = self.old_name - new_name = self.new_name - message = self.message - - if old_name is None: - old_name = func.__name__ - if new_name is None: - depdoc = f"`{old_name}` is deprecated!" - else: - depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!" - - if message is not None: - depdoc += "\n" + message - - @functools.wraps(func) - def newfunc(*args, **kwds): - warnings.warn(depdoc, DeprecationWarning, stacklevel=2) - return func(*args, **kwds) - - newfunc.__name__ = old_name - doc = func.__doc__ - if doc is None: - doc = depdoc - else: - lines = doc.expandtabs().split('\n') - indent = _get_indent(lines[1:]) - if lines[0].lstrip(): - # Indent the original first line to let inspect.cleandoc() - # dedent the docstring despite the deprecation notice. - doc = indent * ' ' + doc - else: - # Remove the same leading blank lines as cleandoc() would. - skip = len(lines[0]) + 1 - for line in lines[1:]: - if len(line) > indent: - break - skip += len(line) + 1 - doc = doc[skip:] - depdoc = textwrap.indent(depdoc, ' ' * indent) - doc = f'{depdoc}\n\n{doc}' - newfunc.__doc__ = doc - - return newfunc - - def _get_indent(lines): """ Determines the leading whitespace that could be removed from all the lines. @@ -204,112 +134,6 @@ def _get_indent(lines): return indent -def deprecate(*args, **kwargs): - """ - Issues a DeprecationWarning, adds warning to `old_name`'s - docstring, rebinds ``old_name.__name__`` and returns the new - function object. - - This function may also be used as a decorator. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - Parameters - ---------- - func : function - The function to be deprecated. - old_name : str, optional - The name of the function to be deprecated. Default is None, in - which case the name of `func` is used. - new_name : str, optional - The new name for the function. Default is None, in which case the - deprecation message is that `old_name` is deprecated. If given, the - deprecation message is that `old_name` is deprecated and `new_name` - should be used instead. - message : str, optional - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - old_func : function - The deprecated function. - - Examples - -------- - Note that ``olduint`` returns a value after printing Deprecation - Warning: - - >>> olduint = np.lib.utils.deprecate(np.uint) - DeprecationWarning: `uint64` is deprecated! # may vary - >>> olduint(6) - 6 - - """ - # Deprecate may be run as a function or as a decorator - # If run as a function, we initialise the decorator class - # and execute its __call__ method. - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if args: - fn = args[0] - args = args[1:] - - return _Deprecate(*args, **kwargs)(fn) - else: - return _Deprecate(*args, **kwargs) - - -def deprecate_with_doc(msg): - """ - Deprecates a function and includes the deprecation in its docstring. - - .. deprecated:: 2.0 - Use `~warnings.warn` with :exc:`DeprecationWarning` instead. - - This function is used as a decorator. It returns an object that can be - used to issue a DeprecationWarning, by passing the to-be decorated - function as argument, this adds warning to the to-be decorated function's - docstring and returns the new function object. - - See Also - -------- - deprecate : Decorate a function such that it issues a - :exc:`DeprecationWarning` - - Parameters - ---------- - msg : str - Additional explanation of the deprecation. Displayed in the - docstring after the warning. - - Returns - ------- - obj : object - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`deprecate` is deprecated, " - "use `warn` with `DeprecationWarning` instead. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - return _Deprecate(message=msg) - - #----------------------------------------------------------------------------- @@ -580,73 +404,6 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): print(inspect.getdoc(object), file=output) -def safe_eval(source): - """ - Protected string evaluation. - - .. deprecated:: 2.0 - Use `ast.literal_eval` instead. - - Evaluate a string containing a Python literal expression without - allowing the execution of arbitrary non-literal code. - - .. warning:: - - This function is identical to :py:meth:`ast.literal_eval` and - has the same security implications. It may not always be safe - to evaluate large input strings. - - Parameters - ---------- - source : str - The string to evaluate. - - Returns - ------- - obj : object - The result of evaluating `source`. - - Raises - ------ - SyntaxError - If the code has invalid Python syntax, or if it contains - non-literal code. - - Examples - -------- - >>> np.safe_eval('1') - 1 - >>> np.safe_eval('[1, 2, 3]') - [1, 2, 3] - >>> np.safe_eval('{"foo": ("bar", 10.0)}') - {'foo': ('bar', 10.0)} - - >>> np.safe_eval('import os') - Traceback (most recent call last): - ... - SyntaxError: invalid syntax - - >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') - Traceback (most recent call last): - ... - ValueError: malformed node or string: <_ast.Call object at 0x...> - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " - "Be aware of security implications, such as memory exhaustion " - "based attacks (deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - # Local import to speed up numpy's import time. - import ast - return ast.literal_eval(source) - - def _median_nancheck(data, result, axis): """ Utility function to check median result from data for NaN values at the end diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index 33c5d2381fcb..9362c1478bd0 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -36,7 +36,6 @@ "int_asbuffer", "numarray", "oldnumeric", - "safe_eval", "test", "typeDict", # Builtins From 399ac53ddcfbe58d43456181bf8cb0763fd688e0 Mon Sep 17 00:00:00 2001 From: Cameron <96146940+camriddell@users.noreply.github.com> Date: Thu, 18 Dec 2025 11:19:40 -0800 Subject: [PATCH 1053/1718] MAINT: remove duplicate byte decode in loadtxt (#30471) numpy/lib/_npyio_impl.py::loadtxt had repeated code for decoding the delimiter[bytes]. This appears to have been a mistake and these lines can be safely removed. --- numpy/lib/_npyio_impl.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 5467b51de6c2..ad64ca18144e 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1365,9 +1365,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, max_rows=max_rows ) - if isinstance(delimiter, bytes): - delimiter.decode("latin1") - if dtype is None: dtype = np.float64 From ccaf198024f42148d334ea1c28e736a9fd3bee81 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 18 Dec 2025 23:53:25 +0100 Subject: [PATCH 1054/1718] TYP: ``linalg.matmul``: shape-typing and improved dtype specialization (#30474) --- numpy/linalg/_linalg.pyi | 162 ++++++++++++++++++---- numpy/typing/tests/data/reveal/linalg.pyi | 95 ++++++++++++- 2 files changed, 228 insertions(+), 29 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index dd508514cf52..4d7182863d20 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -28,7 +28,6 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, - _ArrayLikeUInt_co, _DTypeLike, _NestedSequence, _Shape, @@ -72,6 +71,7 @@ __all__ = [ ] type _AtMost1D = tuple[()] | tuple[int] +type _AtLeast1D = tuple[int, *tuple[int, ...]] type _AtLeast2D = tuple[int, int, *tuple[int, ...]] type _AtLeast3D = tuple[int, int, int, *tuple[int, ...]] type _AtLeast4D = tuple[int, int, int, int, *tuple[int, ...]] @@ -88,6 +88,8 @@ type _to_float64 = np.float64 | _to_integer type _to_inexact64 = np.complex128 | _to_float64 type _to_inexact64_unsafe = _to_inexact64 | np.datetime64 | np.timedelta64 | np.character type _to_complex = np.number | np.bool +type _to_float64_co = np.float64 | np.float32 | np.float16 | _to_integer +type _to_complex128_co = np.complex128 | np.complex64 | _to_float64_co type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] @@ -107,30 +109,45 @@ type _ArrayLike1D2D[ScalarT: np.generic] = ( # 1d or 2d _SupportsArray[tuple[int] | tuple[int, int], np.dtype[ScalarT]] | _Sequence1D2D[ScalarT] ) type _ArrayLike3D[ScalarT: np.generic] = _SupportsArray[tuple[int, int, int], np.dtype[ScalarT]] | _Sequence3D[ScalarT] # ==3d +type _ArrayLike1ND[ScalarT: np.generic] = _SupportsArray[_AtLeast1D, np.dtype[ScalarT]] | _NestedSequence[ScalarT] # >=1d type _ArrayLike2ND[ScalarT: np.generic] = _SupportsArray[_AtLeast2D, np.dtype[ScalarT]] | _Sequence2ND[ScalarT] # >=2d type _ArrayLike3ND[ScalarT: np.generic] = _SupportsArray[_AtLeast3D, np.dtype[ScalarT]] | _Sequence3ND[ScalarT] # >=3d type _ArrayLike4ND[ScalarT: np.generic] = _SupportsArray[_AtLeast4D, np.dtype[ScalarT]] | _Sequence4ND[ScalarT] # >=3d # safe-castable array-likes type _ToArrayBool_1d = _ArrayLike1D[np.bool_] | Sequence[bool] +type _ToArrayBool_1nd = _ArrayLike1ND[np.bool_] | _NestedSequence[bool] +type _ToArrayBool_2nd = _ArrayLike2ND[np.bool_] | _Sequence2ND[bool] type _ToArrayInt_1d = _ArrayLike1D[_to_integer] | Sequence[int] +type _ToArrayInt_1nd = _ArrayLike1ND[_to_integer] | _NestedSequence[int] +type _ToArrayInt_2nd = _ArrayLike2ND[_to_integer] | _Sequence2ND[int] type _ToArrayF64 = _ArrayLike[_to_float64] | _NestedSequence[float] -type _ToArrayF64_1d = _ArrayLike1D[_to_float64] | Sequence[float] -type _ToArrayF64_2d = _ArrayLike2D[_to_float64] | _Sequence2D[float] -type _ToArrayF64_3nd = _ArrayLike3ND[_to_float64] | _Sequence3ND[float] +type _ToArrayF64_1d = _ArrayLike1D[_to_float64_co] | Sequence[float] +type _ToArrayF64_1nd = _ArrayLike1ND[_to_float64_co] | _NestedSequence[float] +type _ToArrayF64_2nd = _ArrayLike2ND[_to_float64_co] | _Sequence2ND[float] type _ToArrayC128 = _ArrayLike[_to_inexact64] | _NestedSequence[complex] -type _ToArrayC128_3nd = _ArrayLike3ND[_to_inexact64] | _Sequence3ND[complex] +type _ToArrayC128_1d = _ArrayLike1D[_to_complex128_co] | Sequence[complex] +type _ToArrayC128_1nd = _ArrayLike1ND[_to_complex128_co] | _NestedSequence[complex] +type _ToArrayC128_2nd = _ArrayLike2ND[_to_complex128_co] | _Sequence2ND[complex] type _ToArrayComplex_1d = _ArrayLike1D[_to_complex] | Sequence[complex] type _ToArrayComplex_2d = _ArrayLike2D[_to_complex] | _Sequence2D[complex] type _ToArrayComplex_3d = _ArrayLike3D[_to_complex] | _Sequence3D[complex] +type _ToArrayComplex_1nd = _ArrayLike1ND[_to_complex] | _NestedSequence[complex] +type _ToArrayComplex_2nd = _ArrayLike2ND[_to_complex] | _Sequence2ND[complex] # the invariant `list` type avoids overlap with bool, int, etc type _AsArrayI64 = _ArrayLike[np.int64] | list[int] | _NestedSequence[list[int]] type _AsArrayI64_1d = _ArrayLike1D[np.int64] | list[int] +type _AsArrayI64_1nd = _ArrayLike1ND[np.int64] | list[int] | _NestedSequence[list[int]] +type _AsArrayI64_2nd = _ArrayLike2ND[np.int64] | _NestedSequence[list[int]] type _AsArrayF64 = _ArrayLike[np.float64] | list[float] | _NestedSequence[list[float]] type _AsArrayF64_1d = _ArrayLike1D[np.float64] | list[float] +type _AsArrayF64_1nd = _ArrayLike1ND[np.float64] | list[float] | _NestedSequence[list[float]] +type _AsArrayF64_2nd = _ArrayLike2ND[np.float64] | _NestedSequence[list[float]] type _AsArrayC128 = _ArrayLike[np.complex128] | list[complex] | _NestedSequence[list[complex]] type _AsArrayC128_1d = _ArrayLike1D[np.complex128] | list[complex] type _AsArrayC128_2d = _ArrayLike2D[np.complex128] | Sequence[list[complex]] +type _AsArrayC128_1nd = _ArrayLike1ND[np.complex128] | list[complex] | _NestedSequence[list[complex]] +type _AsArrayC128_2nd = _ArrayLike2ND[np.complex128] | _NestedSequence[list[complex]] type _AsArrayC128_3nd = _ArrayLike3ND[np.complex128] | _Sequence2ND[list[complex]] type _OrderKind = L[1, -1, 2, -2, "fro", "nuc"] | float # only accepts `-inf` and `inf` as `float` @@ -497,7 +514,7 @@ def cond(x: _ArrayLike2D[_to_complex], p: _OrderKind | None = None) -> np.floati @overload # >2d ~inexact32 def cond(x: _ArrayLike3ND[_inexact32], p: _OrderKind | None = None) -> NDArray[np.float32]: ... @overload # >2d +inexact64 -def cond(x: _ToArrayC128_3nd, p: _OrderKind | None = None) -> NDArray[np.float64]: ... +def cond(x: _ArrayLike3ND[_to_inexact64] | _Sequence3ND[complex], p: _OrderKind | None = None) -> NDArray[np.float64]: ... @overload # >2d ~number def cond(x: _ArrayLike3ND[_to_complex], p: _OrderKind | None = None) -> NDArray[np.floating]: ... @overload # fallback @@ -991,20 +1008,22 @@ def outer[ScalarT: np.number | np.object_](x1: _ArrayLike1D[ScalarT], x2: _Array @overload # fallback def outer(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> _Array2D[Any]: ... +# note that this doesn't include bool, int_, float64, and complex128, as those require special-casing overloads +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64, + np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, +) # fmt: skip + # @overload # ~T, ~T (we use constraints instead of a `: np.number` bound to prevent joins/unions) -def cross[ - AnyScalarT: ( # int64, float64, and complex128 are handled separately - np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.uint64, - np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, - ), -]( - x1: _ArrayLike1D2D[AnyScalarT], - x2: _ArrayLike1D2D[AnyScalarT], +def cross( + x1: _ArrayLike1D2D[_AnyScalarT], + x2: _ArrayLike1D2D[_AnyScalarT], /, *, axis: SupportsIndex = -1, -) -> NDArray[AnyScalarT]: ... # fmt: skip +) -> NDArray[_AnyScalarT]: ... # fmt: skip @overload # ~int64, +int64 def cross( x1: _ArrayLike1D2D[np.int64] | _Sequence1D2D[int], @@ -1078,14 +1097,105 @@ def cross[ScalarT: np.number]( axis: SupportsIndex = -1, ) -> NDArray[ScalarT]: ... -# TODO: narrow return types -@overload -def matmul[ScalarT: np.number](x1: _ArrayLike[ScalarT], x2: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... -@overload -def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[np.unsignedinteger]: ... -@overload -def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[np.signedinteger]: ... -@overload -def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[np.floating]: ... -@overload -def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[np.complexfloating]: ... +# These overloads can be grouped into three parts: +# - 16 overloads as workaround for microsoft/pyright#10232 +# - 9 overloads for the scalar cases (both args 1d) +# - 18 overloads for the non-scalar cases (at least one arg >1d) +@overload # ?d ~T, 1d ~T +def matmul( + x1: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], x2: _ArrayLike1D[_AnyScalarT], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # 1d ~T, ?d ~T +def matmul( + x1: _ArrayLike1D[_AnyScalarT], x2: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], / +) -> NDArray[_AnyScalarT] | Any: ... +@overload # ?d bool, 1d bool +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], x2: _ToArrayBool_1d, /) -> NDArray[np.bool] | Any: ... +@overload # 1d bool, ?d bool +def matmul(x1: _ToArrayBool_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.bool]], /) -> NDArray[np.bool] | Any: ... +@overload # ?d ~int, 1d +int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], x2: _ToArrayInt_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d +int, ?d ~int +def matmul(x1: _ToArrayInt_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.int64]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d +int, 1d ~int +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], x2: _AsArrayI64_1d, /) -> NDArray[np.int64] | Any: ... +@overload # 1d ~int, ?d +int +def matmul(x1: _AsArrayI64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_integer]], /) -> NDArray[np.int64] | Any: ... +@overload # ?d ~float64, 1d +float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], x2: _ToArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d +float64, ?d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d +float64, 1d ~float64 +def matmul(x1: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], x2: _AsArrayF64_1d, /) -> NDArray[np.float64] | Any: ... +@overload # 1d ~float64, ?d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_float64]], /) -> NDArray[np.float64] | Any: ... +@overload # ?d ~complex128, 1d +complex128 +def matmul( + x1: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], x2: _ToArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d +complex128, ?d ~complex128 +def matmul( + x1: _ToArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[np.complex128]], / +) -> NDArray[np.complex128] | Any: ... +@overload # ?d +complex128, 1d ~complex128 +def matmul( + x1: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], x2: _AsArrayC128_1d, / +) -> NDArray[np.complex128] | Any: ... +@overload # 1d ~complex128, ?d +complex128 +def matmul( + x1: _AsArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], / +) -> NDArray[np.complex128] | Any: ... # end workaround +@overload # 1d ~T, 1d ~T +def matmul(x1: _ArrayLike1D[_AnyScalarT], x2: _ArrayLike1D[_AnyScalarT], /) -> _AnyScalarT: ... +@overload # 1d +bool, 1d +bool +def matmul(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> np.bool: ... +@overload # 1d ~int, 1d +int +def matmul(x1: _AsArrayI64_1d, x2: _ToArrayInt_1d, /) -> np.int64: ... +@overload # 1d +int, 1d ~int +def matmul(x1: _ToArrayInt_1d, x2: _AsArrayI64_1d, /) -> np.int64: ... +@overload # 1d ~float64, 1d +float64 +def matmul(x1: _AsArrayF64_1d, x2: _ToArrayF64_1d, /) -> np.float64: ... +@overload # 1d +float64, 1d ~float64 +def matmul(x1: _ToArrayF64_1d, x2: _AsArrayF64_1d, /) -> np.float64: ... +@overload # 1d ~complex128, 1d +complex128 +def matmul(x1: _AsArrayC128_1d, x2: _ToArrayComplex_1d, /) -> np.complex128: ... +@overload # 1d +complex128, 1d ~complex128 +def matmul(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> np.complex128: ... +@overload # 1d fallback, 1d fallback +def matmul(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> Any: ... # end 1d x 1d +@overload # >=1d ~T, >=2d ~T +def matmul(x1: _ArrayLike1ND[_AnyScalarT], x2: _ArrayLike2ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... +@overload # >=2d ~T, >=1d ~T +def matmul(x1: _ArrayLike2ND[_AnyScalarT], x2: _ArrayLike1ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... +@overload # >=1d +bool, >=2d +bool +def matmul(x1: _ToArrayBool_1nd, x2: _ToArrayBool_2nd, /) -> NDArray[np.bool]: ... +@overload # >=2d +bool, >=1d +bool +def matmul(x1: _ToArrayBool_2nd, x2: _ToArrayBool_1nd, /) -> NDArray[np.bool]: ... +@overload # >=1d ~int, >=2d +int +def matmul(x1: _AsArrayI64_1nd, x2: _ToArrayInt_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d ~int, >=1d +int +def matmul(x1: _AsArrayI64_2nd, x2: _ToArrayInt_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d +int, >=2d ~int +def matmul(x1: _ToArrayInt_1nd, x2: _AsArrayI64_2nd, /) -> NDArray[np.int64]: ... +@overload # >=2d +int, >=1d ~int +def matmul(x1: _ToArrayInt_2nd, x2: _AsArrayI64_1nd, /) -> NDArray[np.int64]: ... +@overload # >=1d ~float64, >=2d +float64 +def matmul(x1: _AsArrayF64_1nd, x2: _ToArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d ~float64, >=1d +float64 +def matmul(x1: _AsArrayF64_2nd, x2: _ToArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d +float64, >=2d ~float64 +def matmul(x1: _ToArrayF64_1nd, x2: _AsArrayF64_2nd, /) -> NDArray[np.float64]: ... +@overload # >=2d +float64, >=1d ~float64 +def matmul(x1: _ToArrayF64_2nd, x2: _AsArrayF64_1nd, /) -> NDArray[np.float64]: ... +@overload # >=1d ~complex128, >=2d +complex128 +def matmul(x1: _AsArrayC128_1nd, x2: _ToArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d ~complex128, >=1d +complex128 +def matmul(x1: _AsArrayC128_2nd, x2: _ToArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d +complex128, >=2d ~complex128 +def matmul(x1: _ToArrayC128_1nd, x2: _AsArrayC128_2nd, /) -> NDArray[np.complex128]: ... +@overload # >=2d +complex128, >=1d ~complex128 +def matmul(x1: _ToArrayC128_2nd, x2: _AsArrayC128_1nd, /) -> NDArray[np.complex128]: ... +@overload # >=1d fallback, >=2d fallback +def matmul(x1: _ToArrayComplex_1nd, x2: _ToArrayComplex_2nd, /) -> NDArray[Any]: ... +@overload # >=2d fallback, >=1d fallback +def matmul(x1: _ToArrayComplex_2nd, x2: _ToArrayComplex_1nd, /) -> NDArray[Any]: ... diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 607e18047b81..3726f642b913 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -46,6 +46,12 @@ AR_S: npt.NDArray[np.bytes_] AR_U: npt.NDArray[np.str_] AR_b: npt.NDArray[np.bool] +AR_b_1d: np.ndarray[tuple[int], np.dtype[np.bool]] +AR_b_2d: np.ndarray[tuple[int, int], np.dtype[np.bool]] + +AR_i8_1d: np.ndarray[tuple[int], np.dtype[np.int64]] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] + SC_f8: np.float64 AR_f8_0d: np.ndarray[tuple[()], np.dtype[np.float64]] AR_f8_1d: _Array1D[np.float64] @@ -60,6 +66,9 @@ AR_f4_3d: _Array3D[np.float32] AR_f10_2d: _Array2D[np.longdouble] AR_f10_3d: _Array3D[np.longdouble] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] +AR_c16_2d: np.ndarray[tuple[int, int], np.dtype[np.complex128]] + ### assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) @@ -412,6 +421,86 @@ assert_type(np.linalg.cross(AR_O, AR_f8), npt.NDArray[np.object_]) assert_type(np.linalg.cross(AR_f8, AR_O), npt.NDArray[np.object_]) assert_type(np.linalg.cross(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) -assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) -assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_b, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) +# same as the block above, but for the 1d x 1d case +assert_type(np.linalg.matmul(AR_b_1d, AR_b_1d), np.bool) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_1d), np.int64) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_1d), np.int64) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_1d), np.float64) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_1d), np.float64) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_1d), np.complex128) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_1d), np.complex128) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_1d), np.complex128) +# 1d x 2d +assert_type(np.linalg.matmul(AR_b_1d, AR_b_2d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8_2d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4_2d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16_2d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16_2d), npt.NDArray[np.complex128]) +# 1d x ?d +assert_type(np.linalg.matmul(AR_b_1d, AR_b), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_b), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4_1d, AR_f4), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_i8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_i8), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8_1d, AR_c16), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16_1d, AR_c16), npt.NDArray[np.complex128] | Any) +# 2d x 1d +assert_type(np.linalg.matmul(AR_b_2d, AR_b_1d), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8_1d), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4_1d), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8_1d), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16_1d), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16_1d), npt.NDArray[np.complex128]) +# 2d x ?d +assert_type(np.linalg.matmul(AR_b_2d, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_b), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_b_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f4_2d, AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_i8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_i8), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_f8_2d, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.matmul(AR_c16_2d, AR_c16), npt.NDArray[np.complex128]) +# ?d x 1d +assert_type(np.linalg.matmul(AR_b, AR_b_1d), npt.NDArray[np.bool] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_b_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_b, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_i8_1d), npt.NDArray[np.int64] | Any) +assert_type(np.linalg.matmul(AR_f4, AR_f4_1d), npt.NDArray[np.float32] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_i8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_i8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_f8_1d), npt.NDArray[np.float64] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_i8_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_f8, AR_c16_1d), npt.NDArray[np.complex128] | Any) +assert_type(np.linalg.matmul(AR_c16, AR_c16_1d), npt.NDArray[np.complex128] | Any) From be8d1c62dc185e6ae4cd100d0a0423a95888126c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 19 Dec 2025 14:22:23 +0100 Subject: [PATCH 1055/1718] DOC: move ``linalg.outer`` from "Decompositions" to "Matrix and vector products" (#30481) --- numpy/linalg/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index cc482cfc9579..4e4c65758fa7 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -31,12 +31,12 @@ matrix_power tensordot matmul + outer Decompositions -------------- cholesky - outer qr svd svdvals From 6e003341f7af5f1fa9a36b2553e33e91274cf07b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 19 Dec 2025 17:06:51 +0100 Subject: [PATCH 1056/1718] DOC: new "typing" changelog category (#30483) --- doc/release/upcoming_changes/README.rst | 2 +- pyproject.toml | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index 51ccd7690eff..c1b9a91dd3c1 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -24,6 +24,7 @@ Each file should be named like ``..rst``, where * ``improvement``: General improvements and edge-case changes which are not new features or compatibility related. * ``performance``: Performance changes that should not affect other behaviour. +* ``typing``: Improvements and changes related to static typing. * ``change``: Other changes * ``highlight``: Adds a highlight bullet point to use as a possibly highlight of the release. @@ -59,4 +60,3 @@ will look in the final release notes. This README was adapted from the pytest changelog readme under the terms of the MIT licence. - diff --git a/pyproject.toml b/pyproject.toml index 084ba993072e..07189ddbe592 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,6 +173,11 @@ tracker = "https://github.com/numpy/numpy/issues" name = "Performance improvements and changes" showcontent = true + [[tool.towncrier.type]] + directory = "typing" + name = "Typing improvements and changes" + showcontent = true + [[tool.towncrier.type]] directory = "change" name = "Changes" From d184d31e900354418d01ebae82c4ad0b5c7872cf Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 19 Dec 2025 18:38:18 +0100 Subject: [PATCH 1057/1718] TYP/DOC: release note for the ``linalg`` typing improvements (#30480) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- doc/release/upcoming_changes/30480.typing.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/30480.typing.rst diff --git a/doc/release/upcoming_changes/30480.typing.rst b/doc/release/upcoming_changes/30480.typing.rst new file mode 100644 index 000000000000..fa27a8ecbe37 --- /dev/null +++ b/doc/release/upcoming_changes/30480.typing.rst @@ -0,0 +1,8 @@ +``numpy.linalg`` typing improvements and preliminary shape-typing support +------------------------------------------------------------------------- +Input and output dtypes for ``numpy.linalg`` functions are now more precise. Several of these +functions also gain preliminary shape-typing support while remaining backward compatible. +For example, the return type of ``numpy.linalg.matmul`` now depends on the shape-type of its inputs, +or fall back to the backward-compatible return type if the shape-types are unknown at type-checking +time. Because of limitations in Python's type system and current type-checkers, shape-typing cannot +cover every situation and is often only implemented for the most common lower-rank cases. From da9b7a6bf53ee9a60643fd87211d88c436c8db64 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 19 Dec 2025 18:40:28 +0100 Subject: [PATCH 1058/1718] DOC: Make the issue template clearer about what "context" means (#30482) Co-authored-by: Joren Hammudoglu --- .github/ISSUE_TEMPLATE/bug-report.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b237d52424ac..bb88ed20b8ba 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -64,10 +64,12 @@ body: - type: textarea attributes: - label: "Context for the issue:" + label: "How does this issue affect you or how did you find it:" description: | - Please explain how this issue affects your work or why it should be prioritized. + Please explain how this issue concretely affects you or others. + Especially if it does not impact you how did you find it? + (If an issue has no concrete impact this is also helpful to know.) placeholder: | - << your explanation here >> + << description of how the issue affects you >> validations: required: false From f3e720aa6114b0cf4a5d63051ff21cfde0dd0cd0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 19 Dec 2025 18:42:30 +0100 Subject: [PATCH 1059/1718] TYP: ``linalg.multi_dot``: improved dtype specialization (#30479) --- numpy/linalg/_linalg.pyi | 33 ++++++++++++++++++----- numpy/typing/tests/data/fail/linalg.pyi | 2 +- numpy/typing/tests/data/reveal/linalg.pyi | 13 +++++---- 3 files changed, 36 insertions(+), 12 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 4d7182863d20..6c78cb10ba2b 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -897,12 +897,33 @@ def tensordot( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /, *, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[np.complex128 | Any]: ... -# TODO: Returns a scalar or array -def multi_dot( - arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], - *, - out: NDArray[Any] | None = None, -) -> Any: ... +# +@overload +def multi_dot[ArrayT: np.ndarray]( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], *, out: ArrayT, +) -> ArrayT: ... +@overload +def multi_dot[ + AnyScalarT: ( + np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, np.complex64, np.complex128, np.clongdouble, + np.object_, np.timedelta64, + ), +](arrays: Sequence[_ArrayLike[AnyScalarT]], *, out: None = None) -> NDArray[AnyScalarT]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeBool_co], *, out: None = None) -> NDArray[np.bool]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeInt_co], *, out: None = None) -> NDArray[np.int64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeFloat_co], *, out: None = None) -> NDArray[np.float64 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeComplex_co], *, out: None = None) -> NDArray[np.complex128 | Any]: ... +@overload +def multi_dot(arrays: Sequence[_ArrayLikeTD64_co], *, out: None = None) -> NDArray[np.timedelta64 | Any]: ... +@overload +def multi_dot[ScalarT: np.number | np.object_ | np.timedelta64]( + arrays: Sequence[_ArrayLike[ScalarT]], *, out: None = None +) -> NDArray[ScalarT]: ... # @overload # workaround for microsoft/pyright#10232 diff --git a/numpy/typing/tests/data/fail/linalg.pyi b/numpy/typing/tests/data/fail/linalg.pyi index eda82c48c85a..c59238eab878 100644 --- a/numpy/typing/tests/data/fail/linalg.pyi +++ b/numpy/typing/tests/data/fail/linalg.pyi @@ -45,4 +45,4 @@ np.linalg.det(AR_O) # type: ignore[arg-type] np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] -np.linalg.multi_dot([AR_M]) # type: ignore[list-item] +np.linalg.multi_dot([AR_M]) # type: ignore[type-var] diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 3726f642b913..f6de644bd7da 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -365,11 +365,14 @@ assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) -assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) -assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) -assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) -assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), npt.NDArray[np.int64]) +assert_type(np.linalg.multi_dot([AR_f8, AR_f8]), npt.NDArray[np.float64]) +assert_type(np.linalg.multi_dot([AR_c16, AR_c16]), npt.NDArray[np.complex128]) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), npt.NDArray[np.object_]) +# Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), npt.NDArray[np.float64 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), npt.NDArray[np.complex128 | Any]) # type: ignore[assert-type] +assert_type(np.linalg.multi_dot([AR_m, AR_m]), npt.NDArray[np.timedelta64]) # type: ignore[assert-type] # Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.linalg.diagonal(AR_any), np.ndarray) # type: ignore[assert-type] From 9d7f4977acba32063dc5ef461a4545f5ead65bb3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 19 Dec 2025 18:43:29 +0100 Subject: [PATCH 1060/1718] TYP: ``argpartition`` shape-typing --- numpy/__init__.pyi | 33 ++++++++--- numpy/_core/fromnumeric.pyi | 56 ++++++++++++++++++- numpy/ma/core.pyi | 33 ++++++++--- numpy/typing/tests/data/fail/fromnumeric.pyi | 7 ++- numpy/typing/tests/data/fail/ma.pyi | 6 +- .../typing/tests/data/reveal/fromnumeric.pyi | 5 ++ numpy/typing/tests/data/reveal/ma.pyi | 5 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 2 + 8 files changed, 123 insertions(+), 24 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index ddd91cb191c6..71984361e760 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2370,25 +2370,44 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): order: str | Sequence[str] | None = None, ) -> None: ... - # - @overload + # keep in sync with `ma.core.MaskedArray.argpartition` + # keep roughly in sync with `_core.fromnumeric.argpartition` + @overload # axis: None def argpartition( self, kth: _ArrayLikeInt, /, - axis: SupportsIndex | None = -1, + axis: None, kind: _PartitionKind = "introselect", order: None = None, - ) -> NDArray[intp]: ... - @overload + ) -> ndarray[tuple[int], dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + @overload # void, axis: None def argpartition( self: NDArray[void], kth: _ArrayLikeInt, /, - axis: SupportsIndex | None = -1, + axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> NDArray[intp]: ... + ) -> ndarray[tuple[int], dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... # keep in sync with `ma.MaskedArray.diagonal` def diagonal( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index ce08335ac256..0d5b26131146 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -54,6 +54,7 @@ from numpy._typing import ( _ScalarLike_co, _Shape, _ShapeLike, + _SupportsArray, ) __all__ = [ @@ -351,14 +352,63 @@ def partition( order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... -# +# keep roughly in sync with `ndarray.argpartition` +@overload # axis: None def argpartition( a: ArrayLike, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # known shape, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # 1d array-like, axis: index (default) +def argpartition( + a: Sequence[np.generic | complex], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.intp]]: ... +@overload # 2d array-like, axis: index (default) +def argpartition( + a: Sequence[Sequence[np.generic | complex]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.intp]]: ... +@overload # ?d array-like, axis: index (default) +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[np.intp]: ... +@overload # void, axis: None +def argpartition( + a: _SupportsArray[np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, -) -> NDArray[intp]: ... +) -> np.ndarray[tuple[int], np.dtype[intp]]: ... +@overload # void, axis: index (default) +def argpartition[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.void]], + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... # @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index a5e8e41b7709..36766932fa14 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2401,25 +2401,44 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): order: str | Sequence[str] | None = None, ) -> None: ... - # - @overload + # keep in sync with ndarray.argpartition + @override + @overload # axis: None def argpartition( self, + kth: _ArrayLikeInt, /, + axis: None, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> MaskedArray[tuple[int], dtype[intp]]: ... + @overload # axis: index (default) + def argpartition( + self, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + /, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, - ) -> _MaskedArray[intp]: ... - @overload + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + @overload # void, axis: None def argpartition( self: _MaskedArray[np.void], + kth: _ArrayLikeInt, /, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> MaskedArray[tuple[int], dtype[intp]]: ... + @overload # void, axis: index (default) + def argpartition( + self: _MaskedArray[np.void], kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + /, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> _MaskedArray[intp]: ... + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... # Keep in-sync with np.ma.take @overload # type: ignore[override] diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 92b0cb366207..c3f060679089 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -36,10 +36,11 @@ np.partition(a, 0, axis="bob") # type: ignore[call-overload] np.partition(A, 0, kind="bob") # type: ignore[call-overload] np.partition(A, 0, order=range(5)) # type: ignore[arg-type] -np.argpartition(a, None) # type: ignore[arg-type] -np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] -np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(a, None) # type: ignore[call-overload] +np.argpartition(a, 0, axis="bob") # type: ignore[call-overload] +np.argpartition(A, 0, kind="bob") # type: ignore[call-overload] np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] +np.argpartition(AR_f4, 0, order="a") # type: ignore[arg-type] np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 06159f6e979e..59698264e7fd 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -104,9 +104,9 @@ MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] -MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] -MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[call-overload] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[call-overload] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[call-overload] MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 26de2c2b6e37..4474f375b716 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -80,6 +80,11 @@ assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(f, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argpartition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index a94d278e87f4..944983b97fa1 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -217,7 +217,10 @@ assert_type(MAR_f4.partition(1), None) assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) +assert_type( + MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), + np.ma.MaskedArray[tuple[int], np.dtype[np.intp]], +) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index be0666f95fcb..fa2c6020919f 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -161,7 +161,9 @@ assert_type(AR_f8.var(axis=0), Any) assert_type(AR_f8.var(keepdims=True), Any) assert_type(AR_f8.var(out=B), SubClass) +assert_type(AR_f8.argpartition(0), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) +assert_type(AR_f8.argpartition(0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) From e6290b911dec13b6de5a7d6a736f12175eeb7332 Mon Sep 17 00:00:00 2001 From: stratakis Date: Fri, 19 Dec 2025 18:43:45 +0100 Subject: [PATCH 1061/1718] MAINT: fix array size declarations in string_partition_resolve_descriptors (#30475) --- numpy/_core/src/umath/string_ufuncs.cpp | 6 +++--- numpy/_core/src/umath/stringdtype_ufuncs.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 9b3d86c25301..4a65227cdfa6 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -1125,9 +1125,9 @@ string_partition_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[6]), + PyArray_Descr *const given_descrs[6], + PyArray_Descr *loop_descrs[6], npy_intp *NPY_UNUSED(view_offset)) { if (!given_descrs[3] || !given_descrs[4] || !given_descrs[5]) { diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ebc10586bf8b..8e9c3ddbe40c 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1928,9 +1928,9 @@ zfill_strided_loop(PyArrayMethod_Context *context, static NPY_CASTING string_partition_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), - PyArray_Descr *const given_descrs[3], - PyArray_Descr *loop_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { if (given_descrs[2] || given_descrs[3] || given_descrs[4]) { From c59095fba8e6aee50aec49bacc740611ceb8feca Mon Sep 17 00:00:00 2001 From: div Date: Fri, 19 Dec 2025 23:43:59 +0530 Subject: [PATCH 1062/1718] ENH: Return rank 0 for empty matrices in matrix_rank (#30422) This PR fixes a bug in np.linalg.matrix_rank where empty matrices (previously with 0 rows or 0 columns) would raise a ValueError due to attempting a reduction operation on a zero-size array. --- numpy/linalg/_linalg.py | 3 ++- numpy/linalg/tests/test_linalg.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 1889946f9879..b07ea5873d9f 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -2130,6 +2130,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): A = asarray(A) if A.ndim < 2: return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) if tol is None: @@ -2137,7 +2138,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): rtol = max(A.shape[-2:]) * finfo(S.dtype).eps else: rtol = asarray(rtol)[..., newaxis] - tol = S.max(axis=-1, keepdims=True) * rtol + tol = S.max(axis=-1, keepdims=True, initial=0) * rtol else: tol = asarray(tol)[..., newaxis] diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index b3744024fd88..9c06e04a5ec9 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -2440,3 +2440,22 @@ def test_vector_norm_empty(): assert_equal(np.linalg.vector_norm(x, ord=1), 0) assert_equal(np.linalg.vector_norm(x, ord=2), 0) assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) + +def test_empty_matrix_rank(): + assert_equal(matrix_rank(np.zeros((0, 0))), 0) + assert_equal(matrix_rank(np.zeros((0, 5))), 0) + assert_equal(matrix_rank(np.zeros((5, 0))), 0) + + result = matrix_rank(np.zeros((0, 5, 5))) + assert_equal(result.shape, (0,)) + assert_equal(result.dtype, np.intp) + + result = matrix_rank(np.zeros((3, 0, 5))) + assert_equal(result, np.array([0, 0, 0])) + + result = matrix_rank(np.zeros((2, 5, 0))) + assert_equal(result, np.array([0, 0])) + + result = matrix_rank(np.zeros((2, 3, 0, 4))) + assert_equal(result.shape, (2, 3)) + assert_equal(result, np.zeros((2, 3), dtype=np.intp)) From 515696580670e527454fb3c1a7679a96e88a7de9 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Sat, 20 Dec 2025 03:42:44 +0900 Subject: [PATCH 1063/1718] ENH: Change `assert_allclose` to accept `numpy.timedelta64` as `atol` (#30386) --- numpy/_core/tests/test_datetime.py | 11 +++++++++++ numpy/testing/_private/utils.py | 8 ++++++-- numpy/testing/_private/utils.pyi | 2 +- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c7b11149ed43..5ca4a5bb6c62 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2748,6 +2748,17 @@ def test_true_divide_object_by_timedelta( results = inputs / divisor assert_array_equal(results, expected) + @pytest.mark.parametrize( + "atol", [np.timedelta64(1, "s"), np.timedelta64(1, "ms")] + ) + def test_assert_all_close_with_timedelta_atol( + self, atol: np.timedelta64 | datetime.timedelta + ): + # gh-30382 + a = np.array([1, 2], dtype="m8[s]") + b = np.array([3, 4], dtype="m8[s]") + with pytest.raises(AssertionError): + np.testing.assert_allclose(a, b, atol=atol) class TestDateTimeData: diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 87d9f0394fb3..fde610b05460 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1687,7 +1687,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, Array desired. rtol : float, optional Relative tolerance. - atol : float, optional + atol : float | np.timedelta64, optional Absolute tolerance. equal_nan : bool, optional. If True, NaNs will compare equal. @@ -1766,7 +1766,11 @@ def compare(x, y): equal_nan=equal_nan) actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + if isinstance(atol, np.timedelta64): + atol_str = str(atol) + else: + atol_str = f"{atol:g}" + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol_str}' assert_array_compare(compare, actual, desired, err_msg=str(err_msg), verbose=verbose, header=header, equal_nan=equal_nan, strict=strict) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 5cb7f746380d..c408a51200a1 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -319,7 +319,7 @@ def assert_allclose( actual: _ArrayLikeTD64_co, desired: _ArrayLikeTD64_co, rtol: float = 1e-7, - atol: float = 0, + atol: float | np.timedelta64 = 0, equal_nan: bool = True, err_msg: object = "", verbose: bool = True, From f6440be7b8eec4a6481832f15f6730d984d78ef0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 20 Dec 2025 13:32:01 -0700 Subject: [PATCH 1064/1718] MAINT: Update main after 2.4.0 release. (#30491) --- .mailmap | 42 +- doc/changelog/2.4.0-changelog.rst | 828 +++++++++++++++++++++++++++++ doc/source/release/2.4.0-notes.rst | 719 ++++++++++++++++++++++++- 3 files changed, 1582 insertions(+), 7 deletions(-) create mode 100644 doc/changelog/2.4.0-changelog.rst diff --git a/.mailmap b/.mailmap index 18cfb272618f..1a906d065f47 100644 --- a/.mailmap +++ b/.mailmap @@ -18,6 +18,7 @@ !LSchroefl <65246829+LSchroefl@users.noreply.github.com> !Lbogula !Lisa <34400837+lyzlisa@users.noreply.github.com> +!MyUserNameWasTakenLinux !Patrick <39380924+xamm@users.noreply.github.com> !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays @@ -34,9 +35,11 @@ !h-vetinari !h6197627 <44726212+h6197627@users.noreply.github.com> !hutauf +!ianlv <168640168+ianlv@users.noreply.github.com> !jbCodeHub !juztamau5 !karl3wm +!kostayScr <11485271+kostayScr@users.noreply.github.com> !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -49,6 +52,7 @@ !mykykh <49101849+mykykh@users.noreply.github.com> !nullSoup <34267803+nullSoup@users.noreply.github.com> !ogidig5 <82846833+ogidig5@users.noreply.github.com> +!olivier !partev !pkubaj !pmvz @@ -69,10 +73,13 @@ !yan-wyb !yetanothercheer Aaron Baecker -Adrin Jalali Abhishek Kumar Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> +Abhishek Tiwari <27881020+Abhi210@users.noreply.github.com> Abraham Medina +Adrin Jalali +Akhil Kannan +Akhil Kannan <143798318+Alverok@users.noreply.github.com> Arun Kota Arun Kota Arun Kota Aarthi Agurusa @@ -94,6 +101,7 @@ Aerik Pawson <45904740+aerikpawson@users.noreply.github.com> Ahmet Can Solak Amrit Krishnan Amrit Krishnan +Ankit Ahlawat Alban Desmaison Albert Jornet Puig Alberto Rubiales @@ -121,6 +129,7 @@ Alok Singhal Alok Singhal Alyssa Quek Andrea Bianchi Andrea Bianchi andrea-bia +Anik Chand <161185149+anikchand461@users.noreply.github.com> Ankit Dwivedi Ankit Dwivedi Ankur Singh @@ -137,6 +146,7 @@ Andreas Schwab Andrei Kucharavy Andrej Zhilenkov Andrew Lawson +Aniket Singh Yadav <148300120+Aniketsy@users.noreply.github.com> Anirudh Subramanian Anne Archibald Anne Archibald @@ -216,6 +226,7 @@ Chris Navarro <24905907+lvllvl@users.noreply.github.com Chris Vavaliaris Christian Barbia Christian Clauss +Christine P. Chai Christopher Dahlin Christopher Hanley Christoph Buchner @@ -253,6 +264,7 @@ David Pitchford David Prosin Davide Dal Bosco <62077652+davidedalbosco@users.noreply.github.com> Dawid Zych +Dennis Van de Vorst <87502756+dvorst@users.noreply.github.com> Dennis Zollo Derek Homeier Derek Homeier @@ -260,6 +272,7 @@ Derek Homeier Devin Shanahan Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> +Diego Atencia <53157128+alektebel@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik @@ -271,6 +284,7 @@ D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> +Diya Singh Dylan Cutler Ed Schofield Egor Zindy @@ -313,6 +327,9 @@ Greg Young Greg Young Gregory R. Lee Gregory R. Lee +Gubaydullin Danis +Gubaydullin Danis <96629796+DanisNone@users.noreply.github.com> +Guido Imperiale Guo Ci guoci Guo Shuai Gyeongjae Choi @@ -385,6 +402,7 @@ Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Jingu Kang Jiuding Tan (谭九鼎) <109224573@qq.com> Johann Faouzi Johann Rohwer @@ -431,6 +449,7 @@ Karan Dhir Karel Planken <71339309+kplanken@users.noreply.github.com> Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> Karthik Kaiplody +Khelf Mohamed Keller Meier Kenny Huynh Kevin Granados @@ -442,6 +461,7 @@ Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso Kira Prokopenko +Koki Watanabe Konrad Kapp Kristoffer Pedersen Kristoffer Pedersen @@ -545,6 +565,9 @@ Mircea Akos Bruma <35742861+Mitchell-Faas@users.noreply.github.com> Mohammed Abdul Rahman Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> +Muhammad Maaz +Muhammad Maaz <76714503+mmaaz-git@users.noreply.github.com> +Mohammed Zuhaib <56065368+zuhu2195@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin @@ -585,6 +608,7 @@ Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller +Paul Caprioli Paul Ivanov Paul Ivanov Paul Jacobson @@ -601,6 +625,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Phoenix Studio <59125767+phoenixstudiodz@users.noreply.github.com> Filipe Laíns Pierre GM Pierre GM pierregm @@ -612,14 +637,18 @@ Prathmesh Shirsat <55539563+Fayyr@users.noreply.github.com> Prithvi Singh Prithvi Singh <42640176+prithvitewatia@users.noreply.github.com> Przemyslaw Bartosik +Raghuveer Devulapalli Raghuveer Devulapalli -Raghuveer Devulapalli <44766858+r-devulap@users.noreply.github.com> +Raghuveer Devulapalli <447668+r-devulap@users.noreply.github.com> Rajas Rade lkdmttg7 Rakesh Vasudevan +Ralf Bürkle <214435818+polaris-3@users.noreply.github.com> Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> +Riku Sakamoto +Riku Sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Ritta Narita Riya Sharma Rob Timpe @@ -635,6 +664,7 @@ Roman Yurchak Ronan Lamy Ronan Lamy Rostan Tabet Roy Jacobson +Rupesh Sharma <206439536+Rupeshhsharma@users.noreply.github.com> Russell Hewett Ryan Blakemore Ryan Polley @@ -661,6 +691,7 @@ Schrijvers Luc Sean Cheah Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg +Sebastian Berg Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> @@ -670,6 +701,7 @@ Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak Shen Zhou +Shirong Wang Shreya Singh Shota Kawabuchi Siavash Eliasi @@ -696,6 +728,8 @@ Stuart Archibald Stuart Archibald SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P +Swayam Singh +Swayam Singh Sylvain Ferriol Takanori Hirano Theodoros Nikolaou @@ -704,6 +738,7 @@ Talha Mohsin <131553190+talhabm@users.noreply.github.com Thomas A Caswell Thomas Kluyver Thomas Orgis +Timileyin Daso Tim Cera Tim Teichmann Tim Teichmann <44259103+tteichmann@users.noreply.github.com> @@ -720,6 +755,7 @@ Travis Oliphant Travis Oliphant Travis Oliphant Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> +Varad Raj Singh Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> @@ -745,6 +781,8 @@ Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> Xiangyi Wang +Xiaoyu +Xiaoyu Yamada Fuyuka Yang Hau Yang Hau diff --git a/doc/changelog/2.4.0-changelog.rst b/doc/changelog/2.4.0-changelog.rst new file mode 100644 index 000000000000..472811f6be62 --- /dev/null +++ b/doc/changelog/2.4.0-changelog.rst @@ -0,0 +1,828 @@ + +Contributors +============ + +A total of 142 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !MyUserNameWasTakenLinux + +* !ianlv + +* !kostayScr + +* !olivier + +* Aadya Chinubhai + +* Aaron Kollasch + +* Abdu Zoghbi + +* Abhishek Kumar +* Abhishek Tiwari + +* Adam Turner + +* Akhil Kannan + +* Aleksandr A. Voyt + +* Amelia Thurdekoos + +* Andrew Nelson +* Angus Gibson + +* Anik Chand + +* Aniket Singh Yadav + +* Ankit Ahlawat + +* Arthur Lacote + +* Ben Woodruff +* Bernard Roesler + +* Brad Smith + +* Britney Whittington + +* Carlos Martin +* Charles Harris +* Charlie Lin + +* Chris Navarro +* Christian Barbia + +* Christian Bourjau + +* Christine P. Chai +* Christopher Sidebottom +* Clément Robert +* Copilot + +* Dan Raviv + +* Daniel Bertalan + +* David Seifert + +* Dennis Van de Vorst + +* Developer-Ecosystem-Engineering +* Diego Atencia + +* Dillon Niederhut +* Dimitri Papadopoulos Orfanos +* Diya Singh + +* Evgeni Burovski +* Faizan-Ul Huda + +* François Rozet +* Germán Godoy Gutierrez + +* Gubaydullin Danis + +* Guido Imperiale +* Hamza Meel + +* Hannah Aizenman +* Henry Schreiner +* Hunter Hogan + +* Iason Krommydas + +* Inessa Pawson +* Jake VanderPlas +* Jingu Kang + +* Joe Rickerby + +* Johnnie Gray + +* Jonathan Reimer + +* Joren Hammudoglu +* Kaiyuan Yang + +* Kelvin Li + +* Khelf Mohamed + +* Koki Watanabe + +* Kumar Aditya + +* Leonardo Paredes + +* Lucas Colley +* Lysandros Nikolaou +* Maanas Arora +* Marc Redemske + +* Marco Barbosa +* Marco Edward Gorelli +* Mark Ryan +* Marten van Kerkwijk +* Maryanne Wachter +* Mateusz Sokół +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Michael Davidsaver +* Michael Siebert +* Michał Górny +* Mohammed Abdul Rahman +* Mohammed Zuhaib + +* Mohit Deoli + +* Moritz Groß + +* Mugundan Selvanayagam +* Muhammad Maaz + +* Mukulika Pahari +* Nathan Goldbaum +* Nicholas Bidler + +* Paresh Joshi + +* Parsa Shemirani + +* Paul Caprioli + +* Phoenix Studio + +* Pieter Eendebak +* Rafael Laboissière + +* Raghuveer Devulapalli +* Ralf Bürkle + +* Ralf Gommers +* Richard Smythe + +* Riku Sakamoto + +* Rohit Goswami +* Ross Barnowski +* Rupesh Sharma + +* Sachin Shah + +* Samruddhi Baviskar + +* Sandeep Gupta + +* Sandro + +* Sanjay Kumar Sakamuri Kamalakar + +* Sarang Joshi + +* Sayed Awad +* Sebastian Berg +* Sergey Fedorov + +* Shirong Wang + +* Shyok Mutsuddi + +* Simola Nayak + +* Stan Ulbrych +* Steven Hur + +* Swayam Singh + +* T.Yamada + +* Tim Hoffmann +* Timileyin Daso + +* Tobias Markus + +* Tontonio3 + +* Toshaksha + +* Trey Cole + +* Tyler Reddy +* Varad Raj Singh + +* Veit Heller + +* Vineet Kumar + +* Wang Yang (杨旺) +* Warren Weckesser +* William Pursell + +* Xiaoyu + +* Yasir Ashfaq + +* Yuki K +* Yuvraj Pradhan +* Zebreus + +* Zhi Li + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 673 pull requests were merged for this release. + +* `#23513 `__: ENH: speed up einsum with optimize using batched matmul +* `#24501 `__: DOC: add description of dtype b1 in arrays.dtypes.rst +* `#25245 `__: ENH: Enable native half-precision scalar conversion operations... +* `#28147 `__: DOC: Fix ambiguity in polyfit description +* `#28158 `__: DOC: Update CONTRIBUTING.rst +* `#28590 `__: ENH: Use array indexing preparation routines for flatiter objects +* `#28595 `__: BUG: quantile should error when weights are all zeros +* `#28622 `__: ENH, SIMD: Initial implementation of Highway wrapper +* `#28767 `__: ENH: np.unique: support hash based unique for string dtype +* `#28826 `__: DOC: Add flat examples to argmax and argmin +* `#28896 `__: ENH: Modulate dispatched x86 CPU features +* `#28925 `__: DEP: Deprecate setting the strides attribute of a numpy array +* `#28955 `__: MNT: Update windows-2019 to windows-2022[wheel build] +* `#28970 `__: MNT: Enforce ruff/Perflint rules (PERF) +* `#28979 `__: DOC: improves np.fromfile file description (#28840) +* `#28983 `__: MAINT: Options to catch more issues reported by pytest +* `#28985 `__: MNT: constant string arrays instead of pointers in C +* `#28996 `__: ENH: add __array_function__ protocol in polynomial +* `#29007 `__: CI: update cibuildwheel to 3.0.0b1 and enable cp314 and cp314t... +* `#29012 `__: TYP: Type ``MaskedArray.__{add,radd,sub,rsub}__`` +* `#29019 `__: BEG, MAINT: Begin NumPy 2.4.0 development. +* `#29022 `__: MAINT: Convert multiarray to multi-phase init (PEP 489) +* `#29028 `__: MAINT: Convert pocketfft_umath to multi-phase init (PEP 489) +* `#29032 `__: BUG: Fix workflow bug +* `#29034 `__: BUG: Avoid compile errors in f2py modules +* `#29036 `__: DOC: Expand/clean up extension module import error +* `#29039 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 +* `#29040 `__: BUG: Fix f2py derived types in modules +* `#29041 `__: BUG: Fix cache use regression +* `#29048 `__: TYP: annotate ``strings.slice`` +* `#29050 `__: TYP: remove expired ``tostring`` methods +* `#29051 `__: MNT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29052 `__: ENH: show warning when np.maximum receives more than 2 inputs +* `#29053 `__: BLD: allow targeting webassembly without emscripten +* `#29057 `__: TYP: fix invalid overload definition in ``_core.defchararray.add`` +* `#29058 `__: TYP: fill in some of the missing annotations in the stubs +* `#29060 `__: BUG: add bounds-checking to in-place string multiply +* `#29061 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29066 `__: DOC: fix typo in documentation of vecmat +* `#29068 `__: MAINT: Enforce ruff E501 +* `#29078 `__: CI: clean up cibuildwheel config a bit +* `#29080 `__: CI: bump to cibuildwheel 3.0.0b4 +* `#29083 `__: MAINT: Avoid use of deprecated _PyDict_GetItemStringWithError... +* `#29084 `__: BENCH: Increase array sizes for ufunc and sort benchmarks +* `#29085 `__: MAINT: Bump ``scipy-doctest`` to 1.8 +* `#29088 `__: MAINT: Add ``build-\*`` directories to ``.gitignore`` +* `#29091 `__: BUG: f2py: thread-safe forcomb +* `#29092 `__: TYP: fix ``NDArray[integer]`` inplace operator mypy issue +* `#29093 `__: MAINT: Bump ossf/scorecard-action from 2.4.1 to 2.4.2 +* `#29094 `__: BUG: remove ``NPY_ALIGNMENT_REQUIRED`` +* `#29095 `__: MAINT: bump ``mypy`` to ``1.16.0`` +* `#29097 `__: TYP: run mypy in strict mode +* `#29098 `__: PERF: Make NpzFile member existence checks constant-time +* `#29105 `__: BUG: Allow np.percentile to operate on float16 data +* `#29106 `__: DOC: Fix some incorrect reST markups +* `#29111 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29112 `__: ENH: Improve error message in numpy.testing.assert_array_compare +* `#29115 `__: MAINT: cleanup from finalized concatenate deprecation +* `#29119 `__: DOC: remove very outdated info on ATLAS +* `#29120 `__: TYP: minor ufunc alias fixes in ``__init__.pyi`` +* `#29121 `__: MAINT: Bump github/codeql-action from 3.28.18 to 3.28.19 +* `#29122 `__: DOC: fix typo in Numpy's module structure +* `#29128 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.1 to 3.2.0 +* `#29129 `__: ENH: add a casting option 'same_value' and use it in np.astype +* `#29133 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.4 to 2.0.5 +* `#29137 `__: BUG: make round consistently return a copy +* `#29141 `__: MAINT: Update main after 2.3.0 release. +* `#29142 `__: TYP: update typing stubs for ``_pyinstaller/hook-numpy.py`` +* `#29143 `__: DOC: Document assertion comparison behavior between scalar and... +* `#29145 `__: TYP: add missing ``numpy.lib`` exports +* `#29146 `__: TYP: fix minor ``f2py`` stub inconsistencies +* `#29147 `__: BUG: Missing array-api ``capabilities()`` key +* `#29148 `__: TST: migrating from pytz to zoneinfo + tzdata (where needed) +* `#29149 `__: CI: Bump ``array-api-tests`` to ``v2025.05.23`` +* `#29154 `__: DOC: Remove version switcher colors +* `#29155 `__: TYP: ``double = float64`` and ``cdouble = complex128`` +* `#29156 `__: CI: Run mypy with Python 3.13 +* `#29158 `__: DOC: tweak release walkthrough for numpy.org news blurb +* `#29160 `__: DOC: Suppress distutils doc build warnings for python 3.12+ +* `#29165 `__: ENH: Use itertools.product for ndindex to improve performance +* `#29166 `__: TYP: Fix missing ``_core.numeric`` (re-)exports +* `#29168 `__: TYP: Simplified ``dtype.__new__`` overloads +* `#29169 `__: TYP: ``out=...`` in ufuncs +* `#29170 `__: TYP: ``numpy._NoValue`` +* `#29171 `__: TYP: Accept dispatcher function with optional returns in ``_core.overrides`` +* `#29175 `__: TYP: Fix invalid inline annotations in ``lib._function_base_impl`` +* `#29176 `__: TYP: ``any(None)`` and ``all(None)`` +* `#29177 `__: TYP: ``lib._iotools`` annotation improvements +* `#29179 `__: BUG: fix matmul with transposed out arg +* `#29180 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0b4 to 3.0.0 +* `#29181 `__: TYP: fix and improve ``numpy.lib._utils_impl`` +* `#29183 `__: STY: ruff/isort config tweaks +* `#29184 `__: TYP: fix ``ravel_multi_index`` false rejections +* `#29185 `__: STY: ruff/isort config tweaks - episode 2 +* `#29186 `__: MAINT: bump ``ruff`` to ``0.11.13`` +* `#29187 `__: TYP: add ``__all__`` in ``numpy._core.__init__`` +* `#29188 `__: MAINT: strides comparison performance fix, compare discussion... +* `#29195 `__: MAINT: Bump github/codeql-action from 3.28.19 to 3.29.0 +* `#29196 `__: BUG: Revert ``np.vectorize`` casting to legacy behavior +* `#29197 `__: TST: additional tests for matmul with non-contiguous input and... +* `#29204 `__: TYP: fix ``ndarray.__array__`` annotation for ``copy`` +* `#29208 `__: ENH: improve Timsort with powersort merge-policy +* `#29210 `__: BUG: fix linting +* `#29212 `__: CI: Add native ``ppc64le`` CI job using GitHub Actions +* `#29215 `__: CI: Add support for building NumPy with LLVM for Win-ARM64 +* `#29216 `__: MAINT: Fix some undef warnings +* `#29218 `__: TYP: Workaround for a mypy issue in ``ndarray.__iter__`` +* `#29219 `__: MAINT: bump ``mypy`` to ``1.16.1`` +* `#29220 `__: MAINT: bump ``ruff`` to ``0.12.0`` +* `#29221 `__: ENH: Detect CPU features on OpenBSD ARM and PowerPC64 +* `#29223 `__: BUG: Address interaction between SME and FPSR +* `#29224 `__: ENH: Detect CPU features on FreeBSD / OpenBSD RISC-V64. +* `#29227 `__: TYP: Support iteration of ``StringDType`` arrays +* `#29230 `__: BUG: avoid negating unsigned integers in resize implementation +* `#29231 `__: BUG: Enforce integer limitation in concatenate +* `#29232 `__: TST: Fix test that uses uninitialized memory +* `#29240 `__: ENH: Let numpy.size accept multiple axes. +* `#29248 `__: TYP: Work around a mypy issue with bool arrays +* `#29250 `__: MAINT: Enable linting with ruff E501 +* `#29252 `__: DOC: Fix some markup errors +* `#29254 `__: DOC: Clarify dtype argument for __array__ in custom container... +* `#29257 `__: MAINT: Update main after 2.3.1 release. +* `#29265 `__: TYP: Type ``MaskedArray.__{mul,rmul}__`` +* `#29269 `__: BUG: fix fencepost error in StringDType internals +* `#29271 `__: TYP: Add overloads for ``MaskedArray.__{div,rdiv,floordiv,rfloordiv}__`` +* `#29272 `__: MAINT: Fix ``I001`` ruff error on main +* `#29273 `__: ENH: Extend numpy.pad to handle pad_width dictionary argument. +* `#29275 `__: DOC: avoid searching some directories for doxygen-commented source... +* `#29277 `__: TYP: Add type annotations for ``MaskedArray.__{pow,rpow}__`` +* `#29278 `__: TYP: fix overloads where ``out: _ArrayT`` was typed as being... +* `#29281 `__: BUG: Include python-including headers first +* `#29285 `__: MAINT: Bump github/codeql-action from 3.29.0 to 3.29.1 +* `#29286 `__: BUG: handle case in mapiter where descriptors might get replaced +* `#29289 `__: BUG: Fix macro redefinition +* `#29290 `__: BUG: Fix version check in blas_utils.c +* `#29291 `__: MAINT: Enable linting with ruff E501 +* `#29296 `__: MAINT: Bump github/codeql-action from 3.29.1 to 3.29.2 +* `#29300 `__: MAINT: Enable linting with ruff E501 +* `#29301 `__: DEP: Give a visible warning when ``align=`` to dtype is a non-bool +* `#29302 `__: DOCS: Remove incorrect "Returns" section from ``MaskedArray.sort`` +* `#29303 `__: TYP: Add shape typing to return values of ``np.nonzero`` and... +* `#29305 `__: TYP: add explicit types for np.quantile +* `#29306 `__: DOC: remove redundant words +* `#29307 `__: TYP: Type ``MaskedArray.{trace,round,cumsum,cumprod}`` +* `#29308 `__: Fix incorrect grammar in TypeError message for ufunc argument... +* `#29309 `__: TYP: Type ``MaskedArray.dot`` and ``MaskedArray.anom`` +* `#29310 `__: TYP: rename ``_T`` to ``_ScalarT`` in ``matlib.pyi`` for consistency +* `#29311 `__: DOCS: Fix rendering of ``MaskedArray.anom`` ``dtype`` +* `#29312 `__: BLD: remove unused github workflow +* `#29313 `__: BUG: Allow reading non-npy files in npz and add test +* `#29314 `__: MAINT: Replace setting of array shape by reshape operation +* `#29316 `__: MAINT: remove out-of-date comment +* `#29318 `__: BUG: Fix np.testing utils failing for masked scalar vs. scalar... +* `#29320 `__: DOC: Fix spelling +* `#29321 `__: MNT: Cleanup infs handling in np.testing assertion utilities +* `#29322 `__: MAINT: remove internal uses of assert_warns and suppress_warnings +* `#29325 `__: DOC: Clarify assert_allclose differences vs. allclose +* `#29327 `__: MAINT: Rename nep-0049.rst. +* `#29329 `__: BLD: update ``highway`` submodule to latest master +* `#29331 `__: TYP: ``svd`` overload incorrectly noted ``Literal[False]`` to... +* `#29332 `__: TYP: Allow passing ``dtype=None`` to ``trace`` +* `#29333 `__: BUG: Always return a real dtype from linalg.cond (gh-18304) +* `#29334 `__: MAINT: Bump pypa/cibuildwheel from 3.0.0 to 3.0.1 +* `#29335 `__: DOC: vectorize with signature doesn't pre-call function +* `#29338 `__: API,BUG: Fix scalar handling in array-interface allowing NULL... +* `#29340 `__: TYP: correct default value of ``unicode`` in ``chararray.__new__``... +* `#29341 `__: TST: Avoid uninitialized values in test +* `#29343 `__: DOC: Add missing ``self`` in ``__array_ufunc__`` signature +* `#29347 `__: DOC: Fix NEP 49 Resolution Link Formatting (part of #29328) +* `#29351 `__: BLD: print long double format used +* `#29356 `__: BUG: fix test_npy_uintp_type_enum +* `#29358 `__: BUG: Fix reference leakage for output arrays in reduction functions +* `#29362 `__: DOC: specify that ``numpy.nan_to_num`` supports array like arguments +* `#29364 `__: TST: refactor typing check for @ +* `#29368 `__: BUG: avoid segmentation fault in ``string_expandtabs_length_promoter`` +* `#29369 `__: BUG: fix casting issue in center, ljust, rjust, and zfill +* `#29370 `__: ENH: Allow subscript access for ``np.bool`` by adding ``__class_getitem__`` +* `#29371 `__: MNT: add linter for thread-unsafe C API uses +* `#29372 `__: BUG: Fix np.unique with axis=0 and 1D input not collapsing NaNs... +* `#29374 `__: DEV: remove "packages" from ``.gitignore`` +* `#29375 `__: STY: Fix typo in npy_cpu_dispatch.c +* `#29377 `__: TYP: Fix overloads in ``np.char.array`` and ``np.char.asarray``... +* `#29380 `__: BUG: Fix repeatability issues in test suite +* `#29381 `__: TYP: Type ``MaskedArray.{sum,std,var,mean,prod}`` +* `#29383 `__: TYP: Type ``MaskedArray.view`` +* `#29385 `__: BLD: Add sw_64 support +* `#29386 `__: DOC: Fix ``PyArrayMapIterObject`` document +* `#29387 `__: DOC: document ``mean`` parameter in ``ndarray.std`` and ``ndarray.var``... +* `#29390 `__: DOC: better differentiate arrays in dstack docstring +* `#29392 `__: BUG: Any dtype should call ``square`` on ``arr \*\* 2`` +* `#29394 `__: ENH: avoid thread safety issues around uses of ``PySequence_Fast`` +* `#29396 `__: ENH: Show unit information in repr for datetime64("NaT") +* `#29401 `__: TYP: Type ``MaskedArray.resize``\ , wrap ``NoReturn`` tests in... +* `#29402 `__: DOC: Correct more ndarray defaults +* `#29403 `__: MAINT: remove unnecessary ``kwargs`` update in ``MaskedArray.reshape`` +* `#29404 `__: TYP: Type ``MaskedArray.reshape`` +* `#29405 `__: MAINT/BUG: Followups for PySequence_Fast locking +* `#29406 `__: MAINT: Bump github/codeql-action from 3.29.2 to 3.29.3 +* `#29407 `__: MAINT: use a stable pypy release in CI +* `#29411 `__: BUG: fix datetime/timedelta hash memory leak +* `#29418 `__: TYP: Type ``MaskedArray.__deepcopy__`` and ``MaskedArray.argsort`` +* `#29419 `__: DOC: Fix index name in notes for np.take +* `#29423 `__: BUG: allow ``MaskedArray.fill_value`` be a string when ``dtype=StringDType`` +* `#29426 `__: MAINT: Bump github/codeql-action from 3.29.3 to 3.29.4 +* `#29427 `__: DOC: Remove outdated ``numpy.exceptions`` compatibility note. +* `#29428 `__: TYP: Add test which hits ``np.array`` constructor overload with... +* `#29431 `__: ENH: Enable RVV acceleration for auto-vectorization in RISC-V +* `#29432 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29433 `__: MAINT: Bump pypa/cibuildwheel from 3.0.1 to 3.1.0 +* `#29435 `__: BUG: Fix test_configtool_pkgconfigdir to resolve PKG_CONFIG_DIR... +* `#29436 `__: BLD: allow targeting webassembly without emscripten +* `#29441 `__: MAINT: Update main after 2.3.2 release. +* `#29444 `__: MAINT: Add Python 3.14 to classifier. +* `#29445 `__: DOC: Update RELEASE_WALKTHROUGH.rst +* `#29450 `__: DOC: Clarify that ``numpy.printoptions`` applies only to ``ndarray``\... +* `#29456 `__: MAINT: Replace pavement.py +* `#29457 `__: TYP: Type ``MaskedArray.__new__`` +* `#29459 `__: BLD: provide explicit control over cpu-baseline detection +* `#29466 `__: TYP: Type ``MaskedArray.flat`` +* `#29467 `__: TYP: Type ``MaskedArray.recordmask`` +* `#29468 `__: TYP: Type ``MaskedArray.fill_value`` +* `#29470 `__: TYP: Remove ``MaskedArray.__reduce__``\ , and punt on ``MaskedArray.__{eq,ne}__```... +* `#29471 `__: MAINT: Bump pypa/cibuildwheel from 3.1.0 to 3.1.2 +* `#29472 `__: TYP: Type ``MaskedArray.__getitem__`` +* `#29478 `__: TYP: Type ``MaskedArray.__setitem__`` +* `#29479 `__: MAINT: Do not exclude ``typing/tests/data`` from ruff +* `#29480 `__: TYP: Type ``MaskedArray.compress`` +* `#29481 `__: MAINT: Add .file entry to all .s SVML files +* `#29483 `__: TYP: Type ``MaskedArray.__array_finalize__`` and ``MaskedArray.__array_wrap__`` +* `#29484 `__: MAINT: Bump github/codeql-action from 3.29.4 to 3.29.5 +* `#29487 `__: CI: Add UBSAN CI jobs for macOS arm64 and Linux x86-64 +* `#29489 `__: DOC: Add 'See Also' refs for sign, copysign and signbit. +* `#29493 `__: MAINT: bump ``mypy`` to ``1.17.1`` +* `#29495 `__: MAINT: Bump pypa/cibuildwheel from 3.1.2 to 3.1.3 +* `#29502 `__: DOC: Add narrative documentation for printing NumPy arrays +* `#29504 `__: ENH: Use extern C in arraytypes.h.src file for cpp files +* `#29505 `__: BUG: Casting from one timedelta64 to another didn't handle NAT. +* `#29509 `__: BLD: update vendored Meson to 1.8.3 +* `#29512 `__: TST: don't explicitly specify -j in TSAN build +* `#29513 `__: MAINT: Bump hypothesis to 6.137.1 +* `#29514 `__: DOC: Add 'today' string to datetime64 documentation +* `#29521 `__: MAINT: Bump actions/download-artifact from 4.3.0 to 5.0.0 +* `#29522 `__: BUG: random: Fix handling of very small p in Generator.binomial. +* `#29526 `__: TYP: Type ``MaskedIterator`` +* `#29529 `__: MAINT: Bump actions/cache from 4.2.3 to 4.2.4 +* `#29530 `__: MAINT: Bump github/codeql-action from 3.29.5 to 3.29.6 +* `#29531 `__: TYP: Type default values in stubs in ``numpy/ma`` +* `#29532 `__: DOC:Clarify build compatibility to dev depending page +* `#29533 `__: MAINT: Bump github/codeql-action from 3.29.6 to 3.29.8 +* `#29534 `__: DOC: Add 'now' string to datetime64 documentation +* `#29535 `__: BLD: update licensing metadata to use PEP 639 +* `#29537 `__: ENH: np.unique: support hash based unique for complex dtype +* `#29539 `__: BUG: left bit shift undefined behavior +* `#29540 `__: CI: run some wheel build jobs by default, and clean up the rest +* `#29541 `__: STY: fix typo in dtypemeta.c [skip azp][skip actions] +* `#29542 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29546 `__: fix: File exists error on macOS when running spin lint +* `#29548 `__: MAINT: Use double quotes (ruff rule ``Q``\ ) (only on ``.pyi``... +* `#29550 `__: DEP: Deprecate NumPy warning control utilities +* `#29551 `__: BUG: resolve invalid grep with env neutral script +* `#29553 `__: MAINT: Bump github/codeql-action from 3.29.8 to 3.29.9 +* `#29555 `__: BUG: Fix metadata not roundtripping when pickling datetime +* `#29557 `__: DOC: Document datetime and timedelta to python's object type... +* `#29564 `__: TYP: use ``TypeAliasType`` for ``ArrayLike`` and ``DTypeLike``... +* `#29565 `__: STY: ruff rule name comments +* `#29569 `__: ENH: Add ndmax parameter to np.array to control recursion depth +* `#29572 `__: ENH: enable processing object file for f2py meson backend +* `#29574 `__: TYP: add ``ndmax`` parameter to ``np.array`` +* `#29579 `__: BLD: wire up ``ASIMDDP`` feature to ``ARM_FEATURES`` +* `#29582 `__: DOC: Add link to homepage in doc landing page +* `#29585 `__: TST: update link and version for Intel SDE download +* `#29586 `__: TYP: add ``sorted`` kwarg to ``unique`` +* `#29588 `__: DOC: Make the image credit author link clickable +* `#29589 `__: MAINT: Bump actions/dependency-review-action from 4.7.1 to 4.7.2 +* `#29590 `__: MAINT: Bump github/codeql-action from 3.29.9 to 3.29.10 +* `#29594 `__: TYP: Add defaults to ``numpy/core`` and ``numpy/__init__.py`` +* `#29596 `__: TST: Replace xunit setup with methods +* `#29598 `__: BUG: fix for evaluation of random_f and random_standard_cauchy... +* `#29601 `__: DOC: fix for f2py migrating-to-meson page +* `#29602 `__: MAINT: Bump pypa/cibuildwheel from 3.1.3 to 3.1.4 +* `#29604 `__: DOC: Fix typo in tril_indices and triu_indices docstrings +* `#29605 `__: TST: Replace xunit setup with methods +* `#29607 `__: TST: Enable unit tests for RISC-V CPU dispatcher utilities +* `#29608 `__: TYP: ndarray.fill() takes no keyword arguments +* `#29609 `__: BUG: fix negative samples generated by Wald distribution +* `#29611 `__: CI: more specific mypy_primer ``on:`` paths +* `#29612 `__: CI: replace comment-hider action in mypy_primer workflow +* `#29615 `__: MAINT: Bump github/codeql-action from 3.29.10 to 3.29.11 +* `#29616 `__: TST: Replace xunit setup with methods +* `#29617 `__: DOC: Correct a few formatting issues +* `#29618 `__: MAINT: fix typo in cmds.py +* `#29621 `__: ENH: Extend coverage for benchmark of np.unique +* `#29628 `__: TST: Replace xunit setup with methods +* `#29629 `__: TYP: replace scalar type ``__init__`` with ``__new__`` +* `#29630 `__: TYP: fix slightly incorrect ``memoryview`` type argument in ``ScalarType`` +* `#29631 `__: TYP: Make ``datetime64`` a generic type at runtime +* `#29633 `__: TYP: add missing ``_NoValue`` annotations in ``_core.fromnumeric`` +* `#29634 `__: TYP: Add missing defaults to stubs +* `#29636 `__: MAINT: Bump actions/dependency-review-action from 4.7.2 to 4.7.3 +* `#29641 `__: TST: Replace xunit setup with methods +* `#29642 `__: ENH: Add extended sorting APIs +* `#29646 `__: DOC: Fix typo in basics.strings.rst +* `#29648 `__: TST: delete global env_setup fixture +* `#29649 `__: BUG: avoid thread-unsafe refcount check in temp elision +* `#29653 `__: MAINT: Bump github/codeql-action from 3.29.11 to 3.30.0 +* `#29654 `__: MAINT: Add Linux Foundation Health Badge to README +* `#29655 `__: DOC: clarify numpy.asarray, numpy.asanyarray, numpy.asarray_chkfinite... +* `#29656 `__: ENH: Improve performance of numpy scalar __copy__ and __deepcopy__ +* `#29657 `__: TST: Replace xunit setup with methods +* `#29658 `__: MAINT: Optimize the logical implementation for RISC-V based on... +* `#29662 `__: BLD: Add missing include +* `#29665 `__: BUG: use correct input dtype in flatiter indexed assignment +* `#29666 `__: TST: Replace xunit setup with methods +* `#29667 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.5 to 2.0.6 +* `#29669 `__: MAINT: Bump actions/github-script from 7.0.1 to 8.0.0 +* `#29670 `__: MAINT: Bump actions/setup-python from 5.6.0 to 6.0.0 +* `#29671 `__: TST: Replace test_smoke xunit setup with methods +* `#29678 `__: BUG, BLD: Correct regex for ppc64 VSX3/VSX4 feature detection +* `#29686 `__: MAINT: Bump github/codeql-action from 3.30.0 to 3.30.1 +* `#29692 `__: TST: Replace test_deprecations setup/teardown with context manager +* `#29693 `__: TST: xfail test_kind::test_quad_precision on AIX/PPC +* `#29695 `__: TYP: fix ``np.bool`` method declarations +* `#29697 `__: BUG: Correct ambiguous logic for ``s390x`` CPU feature detection +* `#29704 `__: BLD: Add missing +* `#29706 `__: TYP: fix ``np.number`` and ``np.\*integer`` method declaration +* `#29713 `__: MAINT: update spin to 0.14 in requirements files +* `#29714 `__: TST: update test_regression::test_gph25784 +* `#29715 `__: BUG: Fix ``dtype`` refcount in ``__array__`` +* `#29716 `__: BUG: standardize 'Mean of empty slice' inconsistent message #29711 +* `#29718 `__: TST: not to include the LONGDOUBLE test on AIX +* `#29723 `__: MAINT: Bump github/codeql-action from 3.30.1 to 3.30.2 +* `#29726 `__: MAINT: Update main after 2.3.3 release. +* `#29729 `__: TST: Fix np.random thread test failures +* `#29730 `__: BLD: enable x86-simd-sort to build on KNL with -mavx512f +* `#29732 `__: DOC: update documentation on how to prepare and do a release +* `#29733 `__: TYP: fix method declarations in ``floating``\ , ``timedelta64``\... +* `#29734 `__: TYP: fix ``ndarray.strides`` decorator order +* `#29735 `__: MAINT: Bump actions/checkout from 4.2.2 to 5.0.0 +* `#29736 `__: TYP: sort out some of the ``# type: ignore`` comments in ``__init__.pyi`` +* `#29737 `__: ENH, API: New sorting slots for DType API +* `#29739 `__: TYP: Remove ``None`` from definition of ``DTypeLike`` type alias +* `#29740 `__: TST: disable overflow exception test of numpy.power on AIX +* `#29741 `__: MAINT: Bump github/codeql-action from 3.30.2 to 3.30.3 +* `#29743 `__: MAINT: delete unused variables in unary logical dispatch +* `#29744 `__: TST: Simplify and clarify StringDType testing support utilities +* `#29745 `__: BUG: Fix max_depth validation condition in PyArray_FromAny_int +* `#29749 `__: TYP: mypy 1.18.1 +* `#29750 `__: BLD: change file extension for libnpymath on win-arm64 from .a... +* `#29751 `__: ENH: implement powersort merge-policy for argsort +* `#29753 `__: DOC: Fix typo in absolute_beginners.rst +* `#29754 `__: MAINT: pin asv<0.6.5 +* `#29755 `__: DOC: Clarify description of diagonal covariance in multivariate_normal... +* `#29757 `__: DOC: add dev docs on C debuggers and compiler sanitizers +* `#29760 `__: DOC: Improve documentation for f2py and Meson usage, add ufunc... +* `#29761 `__: BUG: Stable ``ScalarType`` ordering +* `#29764 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29767 `__: DOC: add another mention of 'same_value' +* `#29768 `__: BUG: Fix pocketfft umath strides for powerpc compatibility +* `#29773 `__: DOC: Correct a typo in Troubleshooting guidelines +* `#29774 `__: BUG: np.setbufsize should raise ValueError for negative input +* `#29775 `__: DOC: Link cross references in numpy documentation +* `#29776 `__: TYP: fix and improve ``{f,i}info`` stubs in ``_core.getlimits`` +* `#29777 `__: BLD: Upgrade spin requirement to version 0.15 +* `#29780 `__: BUG: Fix assert in nditer buffer setup +* `#29794 `__: MAINT: Bump pypa/cibuildwheel from 3.1.4 to 3.2.0 +* `#29796 `__: TST: clarify logic in float_alias_names test +* `#29802 `__: MAINT: Remove xfail and deprecation filter from a test. +* `#29803 `__: DOC: Improve the formatting of Random Generator documentation +* `#29806 `__: MAINT: Bump actions/cache from 4.2.4 to 4.3.0 +* `#29811 `__: BUG: linalg: emit a MemoryError on a malloc failure +* `#29812 `__: BLD: refactor to avoid 'unused function' warnings' +* `#29813 `__: ENH: add warning when calling ufunc with 'where' and without... +* `#29814 `__: MAINT: Bump github/codeql-action from 3.30.3 to 3.30.4 +* `#29815 `__: DOC: Update docstring for ``count_nonzero`` +* `#29816 `__: TST: Mark thread-unsafe tests +* `#29817 `__: MAINT: Bump actions/dependency-review-action from 4.7.3 to 4.8.0 +* `#29819 `__: ENH: Add fast path in ufuncs for numerical scalars. +* `#29823 `__: ENH: cleanup warning generation and unmark xfailed tests +* `#29831 `__: MAINT: Bump github/codeql-action from 3.30.4 to 3.30.5 +* `#29832 `__: MAINT: Bump int128/hide-comment-action from 1.43.0 to 1.44.0 +* `#29833 `__: TST: Pin pyparsing to avoid matplotlib errors. +* `#29836 `__: ENH, FEAT: Reorganize finfo and add new constant slot +* `#29837 `__: ENH: speedup numpy.quantile when weights are provided +* `#29841 `__: DEP: Raise ``TypeError`` on attempt to convert array with ``ndim``... +* `#29842 `__: TYP: Fix ``generic.__new__`` return type +* `#29843 `__: TYP: remove unused ``# type: ignore``\ s +* `#29844 `__: TYP: fix ``testing.assert_warns`` decorator order +* `#29845 `__: TYP: Add missing ``rtol`` kwarg to ``linalg.pinv`` +* `#29846 `__: TYP: Fix signatures of ``linalg.matmul`` and ``linalg.outer`` +* `#29847 `__: TYP: Fix incompatible defaults in ``polyfit``\ , ``histogram``\... +* `#29848 `__: MAINT, TYP: bump ``mypy`` to ``1.18.2`` +* `#29849 `__: CI: Use ``uv`` instead of ``pip`` in the mypy workflow +* `#29852 `__: DOC: Add a few missing commas in math operations +* `#29854 `__: MAINT: Bump ossf/scorecard-action from 2.4.2 to 2.4.3 +* `#29856 `__: CI: Try to fix loongarch64 CI +* `#29858 `__: TST: Make temporary file usage thread safe +* `#29861 `__: MAINT: Add Cython linter to spin +* `#29862 `__: TYP: Improved ``ndarray`` augmented assignment operators +* `#29866 `__: MAINT: Bump github/codeql-action from 3.30.5 to 3.30.6 +* `#29867 `__: TST: Fix misc thread unsafe data races +* `#29868 `__: BLD: Fix MSVC warnings and add CI check with allowlist +* `#29869 `__: DOC: Add warning and examples for sliding_window_view +* `#29872 `__: ENH: Set DLPack tensor ``shape`` and ``strides`` to NULL iff... +* `#29875 `__: PERF: Intern strings used to build global tuples. +* `#29880 `__: MAINT: Rewrite setitem to use the new API (mostly) +* `#29882 `__: DOC: Remove unused arrays from the structured dtype ufunc example. +* `#29883 `__: DOC: Add note on meson buildtype for debug builds +* `#29885 `__: MAINT: Simplify string arena growth strategy +* `#29886 `__: CI: macos-13 --> macos-15-intel +* `#29889 `__: DOC: Documentation related finfo refactors and new slot addition +* `#29891 `__: MAINT: Bump github/codeql-action from 3.30.6 to 4.30.7 +* `#29892 `__: DOC: Add Plausible analytics to the NumPy documentation +* `#29893 `__: BUG: Fix INT_MIN % -1 to return 0 for all signed integer types +* `#29895 `__: MAINT: Bump astral-sh/setup-uv from 6.8.0 to 7.0.0 +* `#29897 `__: BUG: Fixup float16 conversion error path and add tests +* `#29899 `__: BUG: Ensure backwards compatibility for patching finfo +* `#29900 `__: ENH: Add registration for sorting loops using new ufunc convenience... +* `#29901 `__: TYP: add missing ``__slots__`` +* `#29902 `__: TYP: wrong argument defaults in ``testing._private`` +* `#29903 `__: TYP: fix incorrect ``ma.sort`` arg default for ``stable`` +* `#29904 `__: MAINT: bump ``ruff`` from ``0.12.0`` to ``0.14.0`` +* `#29905 `__: TYP: Parameters with missing default value +* `#29906 `__: TST: do not use matplotlib 3.10.6 +* `#29908 `__: BUG: Fix float16-sort failures on 32-bit x86 MSVC +* `#29909 `__: DEP: finalize deprecation of numpy/fft.helpers and numpy.linalg.linalg... +* `#29912 `__: DOC: Add URL to valgrind tool in Advanced Debugging Page +* `#29914 `__: TYP: minor fixes related to ``errstate`` +* `#29915 `__: TYP: move ``matrix`` from ``__init__.pyi`` to ``matrixlib/defmatrix.pyi`` +* `#29917 `__: Fix memory leak in import_array() +* `#29919 `__: TST: use requirements/test_requirements across CI +* `#29924 `__: MAINT: Bump github/codeql-action from 4.30.7 to 4.30.8 +* `#29925 `__: MAINT: Avoid assumptions about how memory is allocated +* `#29927 `__: TST: Add unit test for RISC-V CPU features +* `#29930 `__: DOC: Completed and fixed PR #29578 +* `#29931 `__: ENH: In spec registration, allow looking up ufuncs in any module. +* `#29934 `__: CI: Use POWER10 GHA runner for NumPy test jobs +* `#29935 `__: CI: Run mypy on Python 3.14 and ignore more paths +* `#29936 `__: MAINT: Bump astral-sh/setup-uv from 7.0.0 to 7.1.0 +* `#29937 `__: MAINT: Bump pypa/cibuildwheel from 3.2.0 to 3.2.1 +* `#29938 `__: MAINT: Bump int128/hide-comment-action from 1.44.0 to 1.46.0 +* `#29939 `__: MAINT: Bump actions/dependency-review-action from 4.8.0 to 4.8.1 +* `#29942 `__: TST: Convert mixed_types_structured to method +* `#29944 `__: BUG: Fix np.strings.slice if stop=None or start and stop >= len +* `#29947 `__: BUG: support axis sequence in ``np.trim_zeros`` +* `#29948 `__: STY: rename ``@classmethod`` arg to ``cls`` +* `#29951 `__: MAINT: replace use of ``asanyarray`` with ``out=...`` to keep... +* `#29952 `__: TYP: add ``__class_getitem__`` to ``bool`` and ``datetime64`` +* `#29954 `__: TYP: fix inconsistent ``float64.__getformat__`` stub +* `#29956 `__: TYP: fix return type annotation for normalize_axis_tuple utility +* `#29957 `__: TST: Remove recwarn from tests +* `#29958 `__: TYP: Fix inconsistent ``__all__`` stubs +* `#29959 `__: TYP: stub ``numpy.ma.testutils`` +* `#29960 `__: DOC: fix formatting in ``np.percentile`` docstring +* `#29961 `__: TYP: update the ``finfo`` stubs +* `#29962 `__: MAINT: remove obsolete ``generic.tostring`` method descriptor... +* `#29963 `__: MAINT: Remove removed array methods +* `#29964 `__: TYP: add missing ``generic`` methods +* `#29965 `__: TYP: mark ``flexible`` as ``@final`` +* `#29966 `__: TYP: minor fixes in ``__pow__`` methods +* `#29967 `__: TYP: improved ``busdaycalendar`` annotations +* `#29968 `__: TYP: missing ``vectorize`` default argument +* `#29969 `__: TYP: fix stubtest errors in ``lib._function_base_impl`` +* `#29970 `__: BUG: Fix resize when it contains references +* `#29971 `__: TYP: update ``ScalarType`` type +* `#29972 `__: TYP: expand ``TypedDict`` kwargs in ``full`` to appease stubtest +* `#29973 `__: DEP: Remove deprecated ``interpolation`` parameter from quantile/percentile +* `#29976 `__: TYP: fix ``random.Generator.shuffle`` input type +* `#29978 `__: DEP: remove ``in1d`` +* `#29980 `__: DEP: remove ``ndindex.ndincr`` (deprecated since 1.20) +* `#29981 `__: TYP: change ``ndenumerate.__new__`` into ``__init__`` +* `#29982 `__: TYP: change ``nditer.__new__`` into ``__init__`` and tighten... +* `#29983 `__: TYP: minor fixes and improvements in ``record`` and ``recarray`` +* `#29984 `__: DEP: remove the ``fix_imports`` parameter from ``save()`` +* `#29985 `__: MAINT: Remove ``_core.MachAr`` remnants +* `#29986 `__: DEP: Remove ``ndarray.ctypes.get_\*`` methods (deprecated since... +* `#29988 `__: MAINT: remove remnants of ``linalg.linalg`` and ``fft.helper`` +* `#29989 `__: BUG: Fix np.strings.slice if start > stop +* `#29991 `__: TYP: some minor fixes for the constants in ``_core.multiarray`` +* `#29992 `__: DOC: update SIMD build options to cover riscv64 +* `#29993 `__: MAINT: avoid namespace pollution in ``_core._type_aliases`` +* `#29994 `__: DEP: remove the ``newshape`` parameter from ``reshape()`` +* `#29996 `__: MAINT: Update main after the NumPy 2.3.4 release. +* `#29997 `__: MAINT: remove deprecated in numpy/lib/_function_base_impl.py +* `#29998 `__: MAINT: Update write_release.py +* `#29999 `__: TYP: fix ``char.startswith`` signature +* `#30000 `__: ENH: Add ``stable`` kwarg to ``chararray.argsort`` +* `#30001 `__: TYP: fix ``ndarray.sort(stable=True)`` +* `#30002 `__: TYP: inconsistent ``strings.slice`` default argument for ``stop`` +* `#30003 `__: TYP: remove implicit re-export in ``_core._exceptions`` +* `#30004 `__: TYP: stub ``MesonTemplate.objects_substitution()`` in ``f2py._backends._meson`` +* `#30005 `__: CI, TST: Enable parallel threads testing in macOS CI job +* `#30006 `__: TYP: fix stubtest errors in ``numpy.lib.\*`` +* `#30007 `__: MAINT: Remove ``NDArrayOperatorsMixin.um`` class attribute ``umath``... +* `#30008 `__: DOC: Add concrete Meson build example for NumPy C ufunc extension +* `#30009 `__: TYP: update ``corrcoef`` signature +* `#30011 `__: TYP: ``linalg.svdvals``\ : fix inconsistent signature and add... +* `#30012 `__: MAINT: remove confusing parameter default for ``shape`` in ``reshape`` +* `#30013 `__: TYP: ``linalg.tensordot``\ : fix inconsistent signature and simplify... +* `#30014 `__: TYP: stub ``linalg.lapack_lite.LapackError`` +* `#30015 `__: MAINT: Bump github/codeql-action from 4.30.8 to 4.30.9 +* `#30018 `__: TYP: fix stubtest errors in ``numpy.ma`` +* `#30019 `__: MAINT, TST: Increase tolerance in fft test. +* `#30020 `__: DOC: Correct typos in numpy API documentation +* `#30021 `__: DEP: Remove ``delimitor`` kwarg from ``ma.mrecords.fromtextfile`` +* `#30030 `__: MAINT: Bump astral-sh/setup-uv from 7.1.0 to 7.1.1 +* `#30031 `__: TYP: fix stubtest errors in ``numpy.polynomial.\*`` +* `#30032 `__: TYP: ``testing.check_support_sve``\ : fix inconsistent parameter... +* `#30033 `__: TYP: fix stubtest error in ``numpy.typing`` +* `#30034 `__: TYP: Add type annotations for ASIMD, NEON, and RVV targets +* `#30035 `__: DEV: add a ``spin stubtest`` command +* `#30036 `__: TYP: restore abstract scalar type constructor parameters +* `#30039 `__: DEV: Set correct ``PYTHONPATH`` in ``spin stubtest`` +* `#30040 `__: DOC: Clarify signed vs unsigned ``intptr_t`` vs ``uintptr_t``... +* `#30043 `__: CI, TYP: stubtest +* `#30044 `__: MAINT: bump ``hypothesis`` to ``6.142.2`` +* `#30045 `__: DEV: separate stubtest allowlist for py312+ +* `#30049 `__: BLD: update scipy-openblas, use -Dpkg_config_path +* `#30050 `__: CI: Skip test runs if all changes are docs or stubs +* `#30051 `__: CI: Python 3.14 stable +* `#30052 `__: TYP, STY: ``polynomial``\ : reformat the stubs +* `#30053 `__: TYP: Type-checking the stubs +* `#30054 `__: BUG: allow division between object-dtype arrays and timedelta... +* `#30055 `__: TYP: Annotate ``ma.array``\ , ``ma.asarray``\ , and ``ma.asanyarray`` +* `#30057 `__: DOC: Remove nonexistent ``order`` parameter docs of ``ma.asanyarray`` +* `#30058 `__: BUG: fix int left shift UB in CPU feature detection +* `#30060 `__: TYP: ``polynomial.polyutils``\ : fix callable type signatures +* `#30061 `__: CI, TYP: Fix stubtest CI failures on py311 +* `#30064 `__: TST: Remove unnecessary test__datasource thread-unsafe markers +* `#30065 `__: TYP: ``polynomial``\ : Simplify ``chebpts{1,2}`` function stubs +* `#30067 `__: TYP: ``numpy.ma``\ : Annotate 27 functions related to masks and... +* `#30068 `__: MAINT: remove deprecated ``style`` argument and deprecations... +* `#30071 `__: BUG: avoid negating INT_MIN in PyArray_Round implementation +* `#30073 `__: DOC: Correct a typo: an 1d -> a 1d +* `#30074 `__: DOC: Fix a couple typos in generalized-ufuncs.rst. +* `#30077 `__: BUG: prefer passing a pointer to the helper function to avoid... +* `#30080 `__: MAINT: Bump actions/upload-artifact from 4.6.2 to 5.0.0 +* `#30081 `__: MAINT: Bump github/codeql-action from 4.30.9 to 4.31.0 +* `#30082 `__: MAINT: Bump astral-sh/setup-uv from 7.1.1 to 7.1.2 +* `#30083 `__: DOC: Fix the first small 'process_core_dims()' example. +* `#30084 `__: TYP: ``numpy.ma``\ : Annotate the callable wrapper classes +* `#30091 `__: BUG, TYP: Fix ``ma.core._frommethod`` function signatures +* `#30093 `__: DOC: Correct grammatical usage like a/an +* `#30097 `__: CI: Update ARM job (armhf_test) to use Ubuntu 24.04 +* `#30099 `__: BUG, TYP: Fix ``ma.core._convert2ma`` function signatures +* `#30100 `__: BLD: use blobless checkout on CircleCI +* `#30101 `__: TST: Add thread-safe testing guidelines +* `#30102 `__: ENH: Make FPE blas check a runtime check for all apple arm systems +* `#30104 `__: BUG, TYP: ufunc method signatures +* `#30106 `__: MAINT: Bump github/codeql-action from 4.31.0 to 4.31.2 +* `#30108 `__: TYP: shape-type-aware ``swapaxes`` +* `#30111 `__: DOC: Add a plot to the 'unwrap' docstring. +* `#30114 `__: BUG, TYP: ``ndarray`` method runtime signatures +* `#30118 `__: CI: disable flaky ubuntu UBsan CI job +* `#30121 `__: BUG, TYP: scalar-type constructor runtime signatures +* `#30124 `__: BUG, TYP: ``flatiter`` method runtime signatures, and better... +* `#30125 `__: BUG: Fix handling by ``unique`` of signed zero in complex types. +* `#30126 `__: BUG: ``nditer`` runtime signatures +* `#30127 `__: DOC: remove outdated notes on how to build against numpy in conda-forge +* `#30128 `__: BUG: Avoid compilation error of wrapper file generated with SWIG... +* `#30132 `__: BLD: use scipy-openblas 0.3.30.7 +* `#30137 `__: BUG: ``broadcast`` runtime signatures +* `#30138 `__: BUG: array construction function runtime signatures +* `#30139 `__: MAINT,BUG: make later arguments in array2string keyword only. +* `#30140 `__: BUG, DOC, TYP: ``empty`` and ``zeros`` runtime signatures, and... +* `#30141 `__: MAINT: fix math markup (\times -> \\times) in numpy.linalg.multidot... +* `#30142 `__: MAINT: Migrate einsum.c.src to C++ (einsum.cpp) +* `#30143 `__: BUG, TYP: ``_core.multiarray.\*`` function runtime signatures +* `#30147 `__: BUG, TYP: add the remaining ``_core.multiarray`` function runtime... +* `#30148 `__: DOC: Fix Returns section formatting in linalg.qr and linalg.svd +* `#30149 `__: MAINT: Not show signature in git_version +* `#30153 `__: BUG: decref on error in PyArray_NewFromDescr (#30152) +* `#30154 `__: BUG: update requires to requirements in numpy.multiarray see... +* `#30155 `__: BUG, DOC: ``ndarray`` dunder method runtime signatures and missing... +* `#30160 `__: TYP: fix an invalid default value for ``array``\ 's ``ndmax``... +* `#30161 `__: ENH: Run SWIG unit tests in CI action +* `#30163 `__: ENH: Add ``order`` parameter to ``np.ma.asanyarray`` +* `#30164 `__: BUG: ``numpy.random.\*`` class runtime signatures +* `#30165 `__: MAINT: some ``numpy.polynomial.\*`` namespace pollution cleanup +* `#30166 `__: CI: add check for numpy-release version of scipy-openblas +* `#30168 `__: TYP, DEP: ``numpy.fix`` pending deprecation +* `#30169 `__: BUG: ``np.dtype`` and ``np.dtypes.\*`` runtime signatures +* `#30170 `__: ENH: Reduce compute time for ``tobytes`` in non-contiguos paths +* `#30175 `__: ENH: Updates for the ``spin bench`` command. +* `#30176 `__: BUG: Fix check of PyMem_Calloc return value. +* `#30179 `__: MAINT,API: Introduce __numpy_dtype__ and fix dtype attribute... +* `#30183 `__: DOC: Corrected grammatical issues in code comments +* `#30190 `__: MAINT: ``ma.asanyarray``\ : use ``order=None`` as default +* `#30191 `__: MAINT: Bump int128/hide-comment-action from 1.46.0 to 1.47.0 +* `#30193 `__: BUG, DOC: ``np.generic`` missing method runtime signatures and... +* `#30196 `__: DOC: Fix some broken refs and Typos. +* `#30197 `__: ENH,MAINT: rewrite np.fix to use np.trunc internally +* `#30199 `__: DOC: update result_type docs to link to promotion rules +* `#30201 `__: ENH: Detect Fortran vs C order in array_assign_boolean_subscript +* `#30202 `__: MAINT: Bump actions/dependency-review-action from 4.8.1 to 4.8.2 +* `#30203 `__: MAINT: Bump astral-sh/setup-uv from 7.1.2 to 7.1.3 +* `#30206 `__: DOC: an Mercurial -> a Mercurial +* `#30208 `__: DOC: Release notes for the runtime signature changes +* `#30209 `__: MAINT: Bump pypa/cibuildwheel from 3.2.1 to 3.3.0 +* `#30211 `__: ENH: ``ufunc.__signature__`` +* `#30213 `__: DOC: fix links for newly rebuilt numpy-tutorials site +* `#30214 `__: BUG: Fix build on s390x with clang +* `#30219 `__: MAINT: Bump github/codeql-action from 4.31.2 to 4.31.3 +* `#30221 `__: TYP: Annotate remaining ``ma.MaskedArray`` methods +* `#30222 `__: CI: remove (mainly windows) jobs from Azure pipelines +* `#30223 `__: STY: fix ``ma.MaskedAArray.tolist`` docstring indentation +* `#30224 `__: TYP: ``ravel``\ : less awkward return types +* `#30226 `__: TYP: stub ``ma.core.get_masked_subclass`` +* `#30227 `__: CI: fixes https://github.com/numpy/numpy/security/code-scanning/364 +* `#30228 `__: BUG: fix data race in ``wrapping_auxdata_freelist`` by making... +* `#30229 `__: ENH, TYP: transparent ``ma.extras._fromnxfunction`` runtime signatures +* `#30231 `__: TYP: Shape-typing in ``lib._twodim_base_impl`` +* `#30232 `__: CI: removes azure pipelines +* `#30233 `__: TYP: ``_core.numeric``\ : shape-typing and fixed overlapping... +* `#30234 `__: BUG: fix data race in ``PyArray_DescrHash`` +* `#30235 `__: MAINT: undo change to ``fromstring`` text signature for 2.4.0 +* `#30239 `__: DOC: Correct an equation error in ``numpy.random.Generator.pareto`` +* `#30242 `__: BUG: fix einsum ``optimize=True`` parsing error +* `#30243 `__: BUG: Add missing ``PyErr_Occurred()`` check to fast-path +* `#30246 `__: TYP: ``lib._function_base_impl``\ : many typing improvements +* `#30247 `__: DOC: Update wording in numpy.coremath +* `#30248 `__: DOC: remove mention of 'skip azp' since we no longer use azure +* `#30252 `__: MAINT: Bump actions/checkout from 5.0.0 to 5.0.1 +* `#30253 `__: MAINT: Bump github/codeql-action from 4.31.3 to 4.31.4 +* `#30255 `__: BUG: always ignore FPE when Accelerate is the BLAS backend +* `#30256 `__: CI: update ``paths-ignore`` for mypy and wheels workflows +* `#30259 `__: TST: mark tests which call ``gc.collect()`` as thread unsafe +* `#30261 `__: TYP: fix shape-type of structured array fields +* `#30263 `__: TST: scalar fast path multithreaded test +* `#30266 `__: ENH: New-style sorting for StringDType +* `#30270 `__: ENH: Use descriptor rather than custom ``tp_getattro`` +* `#30271 `__: TST: Join threads in ``test_printoptions_thread_safety`` +* `#30273 `__: MAINT: Bump actions/checkout from 5.0.1 to 6.0.0 +* `#30276 `__: MAINT: Bump astral-sh/setup-uv from 7.1.3 to 7.1.4 +* `#30277 `__: BUG: Fix misleading ValueError in convolve on empty inputs due... +* `#30278 `__: BUG: fix np.resize refchk on python 3.14 +* `#30279 `__: MAINT: refactor unary temporary elision check +* `#30282 `__: DEP, TYP: ``ndarray.shape`` setter pending deprecation +* `#30284 `__: DEP: deprecate ``numpy.lib.user_array.container`` +* `#30286 `__: MAINT: add ``matmul`` to ``_core.umath.__all__`` +* `#30288 `__: MAINT: Bump github/codeql-action from 4.31.4 to 4.31.5 +* `#30289 `__: TYP: ``_core.overrides.set_module`` implicit re-export +* `#30290 `__: TYP: move the ``normalize_axis_\*`` function definitions from... +* `#30291 `__: TYP: ``lib._function_base_impl._quantile_ureduce_func`` inline... +* `#30293 `__: TYP: move ``vectorize`` stubs to ``lib._function_base_impl`` +* `#30294 `__: TYP: ``_core.\*``\ : stubs for some private functions and constants +* `#30295 `__: MAINT: remove ``lib._shape_base_impl._replace_zero_by_x_arrays`` +* `#30296 `__: TYP: ``lib.\*``\ : stubs for some private functions used by ``_function_base_imp``... +* `#30297 `__: MAINT: ``broadcast_shapes``\ : update presumed ``NPY_MAXARGS``... +* `#30300 `__: BUG: Fix RecursionError and raise ValueError for unmatched parentheses +* `#30303 `__: MAINT: Bump actions/setup-python from 6.0.0 to 6.1.0 +* `#30310 `__: MAINT: avoid unused variable warnings in dtype tests +* `#30312 `__: MAINT: Implement some RAII classes and use them in stringdtype/casts.cpp +* `#30313 `__: DOC: record a data -> record a data point +* `#30314 `__: BUG: Fix descriptor changes related build/parse value issues... +* `#30318 `__: DOC: Fix duplicate ``import pytest`` in testing documentation... +* `#30321 `__: TYP: ``__numpy_dtype__`` +* `#30324 `__: TYP: ``ndenumerate`` generic type parameter default +* `#30325 `__: DOC, TYP: Expand the 2.3 ``numpy.typing`` deprecation docs +* `#30326 `__: TYP: ``ma.mrecords.MaskedRecords`` generic type parameter defaults +* `#30327 `__: TYP: ``_core._umath_tests`` module stubs +* `#30347 `__: REL: Prepare for the NumPy 2.4.0rc1 release +* `#30377 `__: MAINT: don't assert RecursionError in monster dtype test (#30375) +* `#30378 `__: CI: bump FreeBSD from 14.2 to 14.3 +* `#30398 `__: MAINT: Use RAII objects in unique.cpp to ensure safe resource... +* `#30399 `__: BUG: raise BufferError when creating dlpack with wrong device... +* `#30400 `__: BUG: fix free-threaded races in RandomState +* `#30401 `__: BUG: fix reduction issue in weighted quantile (#30070) +* `#30403 `__: SIMD, BLD: Fix Highway target attribute build failure on ppc64... +* `#30408 `__: BUG: Add missing return status check of NpyIter_EnableExternalLoop()... +* `#30419 `__: DOC: Improve cross-links in thread safety documentation (#30373) +* `#30420 `__: BUG: fix double evaluation in PyArrayScalar_RETURN_BOOL_FROM_LONG... +* `#30432 `__: BUG: fix remaining data races in mtrand.pyx (#30426) +* `#30459 `__: TYP: restore ``generic.__hash__`` (#30456) diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst index 29a7e5ce6073..b6afff63f5f1 100644 --- a/doc/source/release/2.4.0-notes.rst +++ b/doc/source/release/2.4.0-notes.rst @@ -4,16 +4,725 @@ NumPy 2.4.0 Release Notes ========================== +The NumPy 2.4.0 release continues the work to improve free threaded Python +support, user dtypes implementation, and annotations. There are many expired +deprecations and bug fixes as well. + +This release supports Python versions 3.11-3.14 + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +Apart from annotations and ``same_value`` kwarg, the 2.4 highlights are mostly +of interest to downstream developers. They should help in implementing new user +dtypes. + +* Many annotation improvements. In particular, runtime signature introspection. + +* New ``casting`` kwarg ``'same_value'`` for casting by value. + +* New ``PyUFunc_AddLoopsFromSpec`` function that can be used to add user sort + loops using the ``ArrayMethod`` API. + +* New ``__numpy_dtype__`` protocol. + +Deprecations +============ + +Setting the ``strides`` attribute is deprecated +----------------------------------------------- +Setting the strides attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view (no copy) via: + +* ``np.lib.stride_tricks.strided_window_view`` if applicable, +* ``np.lib.stride_tricks.as_strided`` for the general case, +* or the ``np.ndarray`` constructor (``buffer`` is the original array) for a + light-weight version. + +(`gh-28925 `__) + +Positional ``out`` argument to ``np.maximum``, ``np.minimum`` is deprecated +--------------------------------------------------------------------------- +Passing the output array ``out`` positionally to ``numpy.maximum`` and +``numpy.minimum`` is deprecated. For example, ``np.maximum(a, b, c)`` will emit +a deprecation warning, since ``c`` is treated as the output buffer rather than +a third input. + +Always pass the output with the keyword form, e.g. ``np.maximum(a, b, +out=c)``. This makes intent clear and simplifies type annotations. + +(`gh-29052 `__) + +``align=`` must be passed as boolean to ``np.dtype()`` +------------------------------------------------------ +When creating a new ``dtype`` a ``VisibleDeprecationWarning`` will be given if +``align=`` is not a boolean. This is mainly to prevent accidentally passing a +subarray align flag where it has no effect, such as ``np.dtype("f8", 3)`` +instead of ``np.dtype(("f8", 3))``. We strongly suggest to always pass +``align=`` as a keyword argument. + +(`gh-29301 `__) + +Assertion and warning control utilities are deprecated +------------------------------------------------------ +``np.testing.assert_warns`` and ``np.testing.suppress_warnings`` are +deprecated. Use ``warnings.catch_warnings``, ``warnings.filterwarnings``, +``pytest.warns``, or ``pytest.filterwarnings`` instead. + +(`gh-29550 `__) + +``np.fix`` is pending deprecation +--------------------------------- +The ``numpy.fix`` function will be deprecated in a future release. It is +recommended to use ``numpy.trunc`` instead, as it provides the same +functionality of truncating decimal values to their integer parts. Static type +checkers might already report a warning for the use of ``numpy.fix``. + +(`gh-30168 `__) + +in-place modification of ``ndarray.shape`` is pending deprecation +----------------------------------------------------------------- +Setting the ``ndarray.shape`` attribute directly will be deprecated in a future +release. Instead of modifying the shape in place, it is recommended to use the +``numpy.reshape`` function. Static type checkers might already report a +warning for assignments to ``ndarray.shape``. + +(`gh-30282 `__) + +Deprecation of ``numpy.lib.user_array.container`` +------------------------------------------------- +The ``numpy.lib.user_array.container`` class is deprecated and will be removed +in a future version. + +(`gh-30284 `__) + + +Expired deprecations +==================== + +Removed deprecated ``MachAr`` runtime discovery mechanism. +---------------------------------------------------------- + +(`gh-29836 `__) + +Raise ``TypeError`` on attempt to convert array with ``ndim > 0`` to scalar +--------------------------------------------------------------------------- +Conversion of an array with ``ndim > 0`` to a scalar was deprecated in NumPy +1.25. Now, attempting to do so raises ``TypeError``. Ensure you extract a +single element from your array before performing this operation. + +(`gh-29841 `__) + +Removed numpy.linalg.linalg and numpy.fft.helper +------------------------------------------------ +The following were deprecated in NumPy 2.0 and have been moved to private +modules: + +* ``numpy.linalg.linalg`` + Use ``numpy.linalg`` instead. + +* ``numpy.fft.helper`` + Use ``numpy.fft`` instead. + +(`gh-29909 `__) + +Removed ``interpolation`` parameter from quantile and percentile functions +-------------------------------------------------------------------------- +The ``interpolation`` parameter was deprecated in NumPy 1.22.0 and has been +removed from the following functions: + +* ``numpy.percentile`` +* ``numpy.nanpercentile`` +* ``numpy.quantile`` +* ``numpy.nanquantile`` + +Use the ``method`` parameter instead. + +(`gh-29973 `__) + +Removed ``numpy.in1d`` +---------------------- +``numpy.in1d`` has been deprecated since NumPy 2.0 and is now removed in favor of ``numpy.isin``. + +(`gh-29978 `__) + +Removed ``numpy.ndindex.ndincr()`` +---------------------------------- +The ``ndindex.ndincr()`` method has been deprecated since NumPy 1.20 and is now +removed; use ``next(ndindex)`` instead. + +(`gh-29980 `__) + +Removed ``fix_imports`` parameter from ``numpy.save`` +----------------------------------------------------- +The ``fix_imports`` parameter was deprecated in NumPy 2.1.0 and is now removed. +This flag has been ignored since NumPy 1.17 and was only needed to support +loading files in Python 2 that were written in Python 3. + +(`gh-29984 `__) + +Removal of four undocumented ``ndarray.ctypes`` methods +------------------------------------------------------- +Four undocumented methods of the ``ndarray.ctypes`` object have been removed: + +* ``_ctypes.get_data()`` (use ``_ctypes.data`` instead) +* ``_ctypes.get_shape()`` (use ``_ctypes.shape`` instead) +* ``_ctypes.get_strides()`` (use ``_ctypes.strides`` instead) +* ``_ctypes.get_as_parameter()`` (use ``_ctypes._as_parameter_`` instead) + +These methods have been deprecated since NumPy 1.21. + +(`gh-29986 `__) + +Removed ``newshape`` parameter from ``numpy.reshape`` +----------------------------------------------------- +The ``newshape`` parameter was deprecated in NumPy 2.1.0 and has been +removed from ``numpy.reshape``. Pass it positionally or use ``shape=`` +on newer NumPy versions. + +(`gh-29994 `__) + +Removal of deprecated functions and arguments +--------------------------------------------- +The following long-deprecated APIs have been removed: + +* ``numpy.trapz`` — deprecated since NumPy 2.0 (2023-08-18). Use ``numpy.trapezoid`` or + ``scipy.integrate`` functions instead. + +* ``disp`` function — deprecated from 2.0 release and no longer functional. Use + your own printing function instead. + +* ``bias`` and ``ddof`` arguments in ``numpy.corrcoef`` — these had no effect + since NumPy 1.10. + +(`gh-29997 `__) + +Removed ``delimitor`` parameter from ``numpy.ma.mrecords.fromtextfile()`` +------------------------------------------------------------------------- +The ``delimitor`` parameter was deprecated in NumPy 1.22.0 and has been +removed from ``numpy.ma.mrecords.fromtextfile()``. Use ``delimiter`` instead. + +(`gh-30021 `__) + +``numpy.array2string`` and ``numpy.sum`` deprecations finalized +--------------------------------------------------------------- +The following long-deprecated APIs have been removed or converted to errors: + +* The ``style`` parameter has been removed from ``numpy.array2string``. + This argument had no effect since Numpy 1.14.0. Any arguments following + it, such as ``formatter`` have now been made keyword-only. + +* Calling ``np.sum(generator)`` directly on a generator object now raises a + ``TypeError``. This behavior was deprecated in NumPy 1.15.0. Use + ``np.sum(np.fromiter(generator))`` or the python ``sum`` builtin instead. + +(`gh-30068 `__) + + +Compatibility notes +=================== + +* NumPy's C extension modules have begun to use multi-phase initialisation, as + defined by PEP 489. As part of this, a new explicit check has been added that + each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. + + (`gh-29030 `__) + +* ``numpy.round`` now always returns a copy. Previously, it returned a view + for integer inputs for ``decimals >= 0`` and a copy in all other cases. + This change brings ``round`` in line with ``ceil``, ``floor`` and ``trunc``. + + (`gh-29137 `__) + +* Type-checkers will no longer accept calls to ``numpy.arange`` with + ``start`` as a keyword argument. This was done for compatibility with + the Array API standard. At runtime it is still possible to use + ``numpy.arange`` with ``start`` as a keyword argument. + + (`gh-30147 `__) + +* The Macro NPY_ALIGNMENT_REQUIRED has been removed The macro was defined in + the ``npy_cpu.h`` file, so might be regarded as semi public. As it turns out, + with modern compilers and hardware it is almost always the case that + alignment is required, so numpy no longer uses the macro. It is unlikely + anyone uses it, but you might want to compile with the ``-Wundef`` flag or + equivalent to be sure. + + (`gh-29094 `__) + + +C API changes +============= + +The NPY_SORTKIND enum has been enhanced with new variables +---------------------------------------------------------- +This is of interest if you are using ``PyArray_Sort`` or ``PyArray_ArgSort``. +We have changed the semantics of the old names in the ``NPY_SORTKIND`` enum and +added new ones. The changes are backward compatible, and no recompilation is +needed. The new names of interest are: + +* ``NPY_SORT_DEFAULT`` -- default sort (same value as ``NPY_QUICKSORT``) +* ``NPY_SORT_STABLE`` -- the sort must be stable (same value as ``NPY_MERGESORT``) +* ``NPY_SORT_DESCENDING`` -- the sort must be descending + +The semantic change is that ``NPY_HEAPSORT`` is mapped to ``NPY_QUICKSORT`` when used. +Note that ``NPY_SORT_DESCENDING`` is not yet implemented. + +(`gh-29642 `__) + +New ``NPY_DT_get_constant`` slot for DType constant retrieval +------------------------------------------------------------- +A new slot ``NPY_DT_get_constant`` has been added to the DType API, allowing +dtype implementations to provide constant values such as machine limits and +special values. The slot function has the signature:: + + int get_constant(PyArray_Descr *descr, int constant_id, void *ptr) + +It returns 1 on success, 0 if the constant is not available, or -1 on error. +The function is always called with the GIL held and may write to unaligned memory. + +Integer constants (marked with the ``1 << 16`` bit) return ``npy_intp`` values, +while floating-point constants return values of the dtype's native type. + +Implementing this can be used by user DTypes to provide ``numpy.finfo`` values. + +(`gh-29836 `__) + +A new ``PyUFunc_AddLoopsFromSpecs`` convenience function has been added to the C API. +------------------------------------------------------------------------------------- +This function allows adding multiple ufunc loops from their specs in one call +using a NULL-terminated array of ``PyUFunc_LoopSlot`` structs. It allows +registering sorting and argsorting loops using the new ArrayMethod API. + +(`gh-29900 `__) + + +New Features +============ + +* Let ``np.size`` accept multiple axes. + + (`gh-29240 `__) + +* Extend ``numpy.pad`` to accept a dictionary for the ``pad_width`` argument. + + (`gh-29273 `__) + +``'same_value'`` for casting by value +------------------------------------- +The ``casting`` kwarg now has a ``'same_value'`` option that checks the actual +values can be round-trip cast without changing value. Currently it is only +implemented in ``ndarray.astype``. This will raise a ``ValueError`` if any of the +values in the array would change as a result of the cast, including rounding of +floats or overflowing of ints. + +(`gh-29129 `__) + +``StringDType`` fill_value support in ``numpy.ma.MaskedArray`` +-------------------------------------------------------------- +Masked arrays now accept and preserve a Python ``str`` as their ``fill_value`` +when using the variable‑width ``StringDType`` (kind ``'T'``), including through +slicing and views. The default is ``'N/A'`` and may be overridden by any valid +string. This fixes issue `gh‑29421 `__ +and was implemented in pull request `gh‑29423 `__. + +(`gh-29423 `__) + +``ndmax`` option for ``numpy.array`` +------------------------------------ +The ``ndmax`` option is now available for ``numpy.array``. +It explicitly limits the maximum number of dimensions created from nested sequences. + +This is particularly useful when creating arrays of list-like objects with ``dtype=object``. +By default, NumPy recurses through all nesting levels to create the highest possible +dimensional array, but this behavior may not be desired when the intent is to preserve +nested structures as objects. The ``ndmax`` parameter provides explicit control over +this recursion depth. + +.. code-block:: python + + # Default behavior: Creates a 2D array + >>> a = np.array([[1, 2], [3, 4]], dtype=object) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + # With ndmax=1: Creates a 1D array + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + +(`gh-29569 `__) + +Warning emitted when using ``where`` without ``out`` +---------------------------------------------------- +Ufuncs called with a ``where`` mask and without an ``out`` positional or kwarg will +now emit a warning. This usage tends to trip up users who expect some value in +output locations where the mask is ``False`` (the ufunc will not touch those +locations). The warning can be suppressed by using ``out=None``. + +(`gh-29813 `__) + +DType sorting and argsorting supports the ArrayMethod API +--------------------------------------------------------- +User-defined dtypes can now implement custom sorting and argsorting using the +``ArrayMethod`` API. This mechanism can be used in place of the +``PyArray_ArrFuncs`` slots which may be deprecated in the future. + +The sorting and argsorting methods are registered by passing the arraymethod +specs that implement the operations to the new ``PyUFunc_AddLoopsFromSpecs`` +function. See the ``ArrayMethod`` API documentation for details. + +(`gh-29900 `__) + +New ``__numpy_dtype__`` protocol +-------------------------------- +NumPy now has a new ``__numpy_dtype__`` protocol. NumPy will check +for this attribute when converting to a NumPy dtype via ``np.dtype(obj)`` +or any ``dtype=`` argument. + +Downstream projects are encouraged to implement this for all dtype like +objects which may previously have used a ``.dtype`` attribute that returned +a NumPy dtype. +We expect to deprecate ``.dtype`` in the future to prevent interpreting +array-like objects with a ``.dtype`` attribute as a dtype. +If you wish you can implement ``__numpy_dtype__`` to ensure an earlier +warning or error (``.dtype`` is ignored if this is found). + +(`gh-30179 `__) + + +Improvements +============ + +Fix ``flatiter`` indexing edge cases +------------------------------------ + +The ``flatiter`` object now shares the same index preparation logic as +``ndarray``, ensuring consistent behavior and fixing several issues where +invalid indices were previously accepted or misinterpreted. + +Key fixes and improvements: + +* Stricter index validation + + - Boolean non-array indices like ``arr.flat[[True, True]]`` were + incorrectly treated as ``arr.flat[np.array([1, 1], dtype=int)]``. + They now raise an index error. Note that indices that match the + iterator's shape are expected to not raise in the future and be + handled as regular boolean indices. Use ``np.asarray()`` if + you want to match that behavior. + - Float non-array indices were also cast to integer and incorrectly + treated as ``arr.flat[np.array([1.0, 1.0], dtype=int)]``. This is now + deprecated and will be removed in a future version. + - 0-dimensional boolean indices like ``arr.flat[True]`` are also + deprecated and will be removed in a future version. + +* Consistent error types: + + Certain invalid ``flatiter`` indices that previously raised ``ValueError`` + now correctly raise ``IndexError``, aligning with ``ndarray`` behavior. + +* Improved error messages: + + The error message for unsupported index operations now provides more + specific details, including explicitly listing the valid index types, + instead of the generic ``IndexError: unsupported index operation``. + +(`gh-28590 `__) + +Improved error handling in ``np.quantile`` +------------------------------------------ +`np.quantile` now raises errors if: + +* All weights are zero +* At least one weight is ``np.nan`` +* At least one weight is ``np.inf`` + +(`gh-28595 `__) + +Improved error message for ``assert_array_compare`` +--------------------------------------------------- +The error message generated by ``assert_array_compare`` which is used by functions +like ``assert_allclose``, ``assert_array_less`` etc. now also includes information +about the indices at which the assertion fails. + +(`gh-29112 `__) + +Show unit information in ``__repr__`` for ``datetime64("NaT")`` +------------------------------------------------------------------ +When a ``datetime64`` object is "Not a Time" (NaT), its ``__repr__`` method now +includes the time unit of the datetime64 type. This makes it consistent with +the behavior of a ``timedelta64`` object. + +(`gh-29396 `__) + +Performance increase for scalar calculations +-------------------------------------------- +The speed of calculations on scalars has been improved by about a factor 6 for +ufuncs that take only one input (like ``np.sin(scalar)``), reducing the speed +difference from their ``math`` equivalents from a factor 19 to 3 (the speed +for arrays is left unchanged). + +(`gh-29819 `__) + +``numpy.finfo`` Refactor +------------------------ +The ``numpy.finfo`` class has been completely refactored to obtain floating-point +constants directly from C compiler macros rather than deriving them at runtime. +This provides better accuracy, platform compatibility and corrected +several attribute calculations: + +* Constants like ``eps``, ``min``, ``max``, ``smallest_normal``, and + ``smallest_subnormal`` now come directly from standard C macros (``FLT_EPSILON``, + ``DBL_MIN``, etc.), ensuring platform-correct values. + +* The deprecated ``MachAr`` runtime discovery mechanism has been removed. + +* Derived attributes have been corrected to match standard definitions: + ``machep`` and ``negep`` now use ``int(log2(eps))``; ``nexp`` accounts for + all exponent patterns; ``nmant`` excludes the implicit bit; and ``minexp`` + follows the C standard definition. + +* longdouble constants, Specifically ``smallest_normal`` now follows the + C standard definitions as per respecitive platform. + +* Special handling added for PowerPC's IBM double-double format. + +* New test suite added in ``test_finfo.py`` to validate all + ``finfo`` properties against expected machine arithmetic values for + float16, float32, and float64 types. + +(`gh-29836 `__) + +Multiple axes are now supported in ``numpy.trim_zeros`` +------------------------------------------------------- +The ``axis`` argument of ``numpy.trim_zeros`` now accepts a sequence; for example +``np.trim_zeros(x, axis=(0, 1))`` will trim the zeros from a multi-dimensional +array ``x`` along axes 0 and 1. This fixes issue +`gh‑29945 `__ and was implemented +in pull request `gh‑29947 `__. + +(`gh-29947 `__) + +Runtime signature introspection support has been significantly improved +----------------------------------------------------------------------- +Many NumPy functions, classes, and methods that previously raised +``ValueError`` when passed to ``inspect.signature()`` now return meaningful +signatures. This improves support for runtime type checking, IDE autocomplete, +documentation generation, and runtime introspection capabilities across the +NumPy API. + +Over three hundred classes and functions have been updated in total, including, +but not limited to, core classes such as ``ndarray``, ``generic``, ``dtype``, +``ufunc``, ``broadcast``, ``nditer``, etc., most methods of ``ndarray`` and +scalar types, array constructor functions (``array``, ``empty``, ``arange``, +``fromiter``, etc.), all ``ufuncs``, and many other commonly used functions, +including ``dot``, ``concat``, ``where``, ``bincount``, ``can_cast``, and +numerous others. + +(`gh-30208 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` for string dtypes +----------------------------------------------------------- +The hash-based algorithm for unique extraction provides an order-of-magnitude +speedup on large string arrays. In an internal benchmark with about 1 billion +string elements, the hash-based np.unique completed in roughly 33.5 seconds, +compared to 498 seconds with the sort-based method – about 15× faster for +unsorted unique operations on strings. This improvement greatly reduces the +time to find unique values in very large string datasets. + +(`gh-28767 `__) + +Rewrite of ``np.ndindex`` using ``itertools.product`` +----------------------------------------------------- +The ``numpy.ndindex`` function now uses ``itertools.product`` internally, +providing significant improvements in performance for large iteration spaces, +while maintaining the original behavior and interface. For example, for an +array of shape (50, 60, 90) the NumPy ``ndindex`` benchmark improves +performance by a factor 5.2. + +(`gh-29165 `__) + +Performance improvements to ``np.unique`` for complex dtypes +------------------------------------------------------------ +The hash-based algorithm for unique extraction now also supports +complex dtypes, offering noticeable performance gains. + +In our benchmarks on complex128 arrays with 200,000 elements, +the hash-based approach was about 1.4–1.5× faster +than the sort-based baseline when there were 20% of unique values, +and about 5× faster when there were 0.2% of unique values. + +(`gh-29537 `__) + + +Changes +======= + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +* The accuracy of ``np.quantile`` and ``np.percentile`` for 16- and 32-bit + floating point input data has been improved. + + (`gh-29105 `__) + +``unique_values`` for string dtypes may return unsorted data +------------------------------------------------------------ +np.unique now supports hash‐based duplicate removal for string dtypes. +This enhancement extends the hash-table algorithm to byte strings ('S'), +Unicode strings ('U'), and the experimental string dtype ('T', StringDType). +As a result, calling np.unique() on an array of strings will use +the faster hash-based method to obtain unique values. +Note that this hash-based method does not guarantee that the returned unique values will be sorted. +This also works for StringDType arrays containing None (missing values) +when using equal_nan=True (treating missing values as equal). + +(`gh-28767 `__) + +Modulate dispatched x86 CPU features +------------------------------------ +**IMPORTANT**: The default setting for ``cpu-baseline`` on x86 has been raised +to ``x86-64-v2`` microarchitecture. This can be changed to none during build +time to support older CPUs, though SIMD optimizations for pre-2009 processors +are no longer maintained. + +NumPy has reorganized x86 CPU features into microarchitecture-based groups +instead of individual features, aligning with Linux distribution standards and +Google Highway requirements. + +Key changes: + +* Replaced individual x86 features with microarchitecture levels: ``X86_V2``, + ``X86_V3``, and ``X86_V4`` +* Raised the baseline to ``X86_V2`` +* Improved ``-`` operator behavior to properly exclude successor features that + imply the excluded feature +* Added meson redirections for removed feature names to maintain backward + compatibility +* Removed compiler compatibility workarounds for partial feature support (e.g., + AVX512 without mask operations) +* Removed legacy AMD features (XOP, FMA4) and discontinued Intel Xeon Phi + support + +New Feature Group Hierarchy: + +.. list-table:: + :header-rows: 1 + :align: left + + * - Name + - Implies + - Includes + * - ``X86_V2`` + - + - ``SSE`` ``SSE2`` ``SSE3`` ``SSSE3`` ``SSE4_1`` ``SSE4_2`` ``POPCNT`` ``CX16`` ``LAHF`` + * - ``X86_V3`` + - ``X86_V2`` + - ``AVX`` ``AVX2`` ``FMA3`` ``BMI`` ``BMI2`` ``LZCNT`` ``F16C`` ``MOVBE`` + * - ``X86_V4`` + - ``X86_V3`` + - ``AVX512F`` ``AVX512CD`` ``AVX512VL`` ``AVX512BW`` ``AVX512DQ`` + * - ``AVX512_ICL`` + - ``X86_V4`` + - ``AVX512VBMI`` ``AVX512VBMI2`` ``AVX512VNNI`` ``AVX512BITALG`` ``AVX512VPOPCNTDQ`` ``AVX512IFMA`` ``VAES`` ``GFNI`` ``VPCLMULQDQ`` + * - ``AVX512_SPR`` + - ``AVX512_ICL`` + - ``AVX512FP16`` + + +These groups correspond to CPU generations: + +- ``X86_V2``: x86-64-v2 microarchitectures (CPUs since 2009) +- ``X86_V3``: x86-64-v3 microarchitectures (CPUs since 2015) +- ``X86_V4``: x86-64-v4 microarchitectures (AVX-512 capable CPUs) +- ``AVX512_ICL``: Intel Ice Lake and similar CPUs +- ``AVX512_SPR``: Intel Sapphire Rapids and newer CPUs + +.. note:: + On 32-bit x86, ``cx16`` is excluded from ``X86_V2``. + +Documentation has been updated with details on using these new feature groups +with the current meson build system. + +(`gh-28896 `__) + +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause memory +corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. + +(`gh-29179 `__) + +``__array_interface__`` with NULL pointer changed +------------------------------------------------- +The array interface now accepts NULL pointers (NumPy will do its own dummy +allocation, though). Previously, these incorrectly triggered an undocumented +scalar path. In the unlikely event that the scalar path was actually desired, +you can (for now) achieve the previous behavior via the correct scalar path by +not providing a ``data`` field at all. + +(`gh-29338 `__) + +``unique_values`` for complex dtypes may return unsorted data +------------------------------------------------------------- +np.unique now supports hash‐based duplicate removal for complex dtypes. This +enhancement extends the hash‐table algorithm to all complex types ('c'), and +their extended precision variants. The hash‐based method provides faster +extraction of unique values but does not guarantee that the result will be +sorted. + +(`gh-29537 `__) + +Sorting ``kind='heapsort'`` now maps to ``kind='quicksort'`` +------------------------------------------------------------ +It is unlikely that this change will be noticed, but if you do see a change in +execution time or unstable argsort order, that is likely the cause. Please let +us know if there is a performance regression. Congratulate us if it is improved +:) + +(`gh-29642 `__) + +``numpy.typing.DTypeLike`` no longer accepts ``None`` +----------------------------------------------------- +The type alias ``numpy.typing.DTypeLike`` no longer accepts ``None``. Instead of + +.. code-block:: python + + dtype: DTypeLike = None + +it should now be + +.. code-block:: python + + dtype: DTypeLike | None = None + +instead. +(`gh-29739 `__) -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +The ``npymath`` and ``npyrandom`` libraries now have a ``.lib`` rather than a +``.a`` file extension on win-arm64, for compatibility for building with MSVC +and ``setuptools``. Please note that using these static libraries is +discouraged and for existing projects using it, it's best to use it with a +matching compiler toolchain, which is ``clang-cl`` on Windows on Arm. -.. **Content from release note snippets in doc/release/upcoming_changes:** +(`gh-29750 `__) -.. include:: notes-towncrier.rst From 79c222e55b1e329bc0a241db8eac64d7f4d3131a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 20 Dec 2025 18:31:13 -0800 Subject: [PATCH 1065/1718] Update doc/source/reference/routines.ma.rst [skip azp][skip cirrus][skip actions] Co-authored-by: Sebastian Berg --- doc/source/reference/routines.ma.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 0004f2264976..c29ccc4a5f24 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -248,7 +248,7 @@ Conversion operations > to an ndarray -~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ From 49e92dae7ef4a410ba8287ce6524d05fb825c864 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 22 Dec 2025 21:36:57 +0530 Subject: [PATCH 1066/1718] MNT: use if constexpr for compile-time branch selection --- numpy/_core/src/npysort/quicksort.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 2f5adde17b64..3371c02aef49 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -79,7 +79,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if !defined(__CYGWIN__) using TF = typename np::meta::FixedWidth::Type; void (*dispfunc)(TF*, intptr_t) = nullptr; - if (sizeof(T) == sizeof(uint16_t)) { + if constexpr (sizeof(T) == sizeof(uint16_t)) { #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); @@ -88,7 +88,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif } - else if (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { + else if constexpr (sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t)) { #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); From bf31f291c5be88a1f42856ca2f470c53064fd81e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 22 Dec 2025 17:20:19 +0100 Subject: [PATCH 1067/1718] REV: Revert part of #30164 (#30500) --- .github/check-warnings/msvc-allowed-warnings.txt | 8 ++++---- numpy/random/bit_generator.pxd | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/check-warnings/msvc-allowed-warnings.txt b/.github/check-warnings/msvc-allowed-warnings.txt index 4cc6e6ab2124..e3da83492e16 100644 --- a/.github/check-warnings/msvc-allowed-warnings.txt +++ b/.github/check-warnings/msvc-allowed-warnings.txt @@ -15,7 +15,7 @@ C:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned D:\a\numpy\numpy\numpy\random\src\pcg64\pcg64.h(342): warning C4146: unary minus operator applied to unsigned type, result still unsigned cl : Command line warning D9025 : overriding '/arch:SSE2' with '/arch:AVX2' -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26329): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data -numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38353): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win32.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(26345): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data +numpy/random/_generator.cp312-win_arm64.pyd.p/numpy/random/_generator.pyx.c(38369): warning C4244: 'function': conversion from 'int64_t' to 'double', possible loss of data diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index dbaab4721fec..4ba18f17ecb2 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -33,3 +33,8 @@ cdef class SeedSequence(): cdef class SeedlessSeedSequence: pass + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: + pass From d6f040a0d81d93624e7fb204bc361b2929239fab Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 22 Dec 2025 18:03:30 +0100 Subject: [PATCH 1068/1718] TYP: ``numpy.select``: allow passing array-like ``default`` (#30501) --- numpy/lib/_function_base_impl.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 769c321b9988..7a361ff1f876 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -519,19 +519,19 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... def select[ArrayT: np.ndarray]( condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayT], - default: _ScalarLike_co = 0, + default: ArrayLike = 0, ) -> ArrayT: ... @overload def select[ScalarT: np.generic]( condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[_ArrayLike[ScalarT]] | NDArray[ScalarT], - default: _ScalarLike_co = 0, + default: ArrayLike = 0, ) -> NDArray[ScalarT]: ... @overload def select( condlist: _SizedIterable[_ArrayLikeBool_co], choicelist: Sequence[ArrayLike], - default: _ScalarLike_co = 0, + default: ArrayLike = 0, ) -> np.ndarray: ... # keep roughly in sync with `ma.core.copy` From 3befe85e461dd7ee49f8480177745aea552e28a7 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 22 Dec 2025 18:04:31 +0100 Subject: [PATCH 1069/1718] DOC: ``numpy.select``: fix ``default`` parameter docstring (#30498) --- numpy/lib/_function_base_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 3e0005079104..97bdbf2d048d 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -823,7 +823,7 @@ def select(condlist, choicelist, default=0): choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. - default : scalar, optional + default : array_like, optional The element inserted in `output` when all conditions evaluate to False. Returns From c2393a54c266f80e3a14b583f4ed841602e1a2cc Mon Sep 17 00:00:00 2001 From: nlegendree Date: Fri, 28 Nov 2025 18:36:28 +0100 Subject: [PATCH 1070/1718] DOC: rework 'Writing custom array containers' guide Migrate the __array__() DiagonalArray example from basics.dispatch.rst to arrays.classes.rst where it documents the class.__array__ interface. Reduce basics.dispatch.rst, removing redundant content already covered in basics.interoperability.rst while preserving unique testing utilities documentation. Update cross-reference in basics.interoperability.rst to point to the new example location. See #29258 --- doc/source/reference/arrays.classes.rst | 67 +++++ doc/source/user/basics.dispatch.rst | 300 +------------------- doc/source/user/basics.interoperability.rst | 2 +- 3 files changed, 74 insertions(+), 295 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 755a13ff7252..5641a78aa42e 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -338,6 +338,73 @@ NumPy provides several hooks that classes can customize: `, results will *not* be written to the object returned by :func:`__array__`. This practice will return ``TypeError``. + **Example** + + To get a feel for writing custom array containers, we'll begin with a simple + example that has rather narrow utility but illustrates the concepts involved. + + >>> import numpy as np + >>> class DiagonalArray: + ... def __init__(self, N, value): + ... self._N = N + ... self._i = value + ... def __repr__(self): + ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" + ... def __array__(self, dtype=None, copy=None): + ... if copy is False: + ... raise ValueError( + ... "`copy=False` isn't supported. A copy is always created." + ... ) + ... return self._i * np.eye(self._N, dtype=dtype) + + Our custom array can be instantiated like: + + >>> arr = DiagonalArray(5, 1) + >>> arr + DiagonalArray(N=5, value=1) + + We can convert to a numpy array using :func:`numpy.array` or + :func:`numpy.asarray`, which will call its ``__array__`` method to obtain a + standard ``numpy.ndarray``. + + >>> np.asarray(arr) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]]) + + The ``__array__`` method can optionally accept a `dtype` argument. If provided, + this argument specifies the desired data type for the resulting NumPy array. + Your implementation should attempt to convert the data to this `dtype` + if possible. If the conversion is not supported, it's generally best + to fall back to a default type or raise a `TypeError` or `ValueError`. + + Here's an example demonstrating its use with `dtype` specification: + + >>> np.asarray(arr, dtype=np.float32) + array([[1., 0., 0., 0., 0.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.], + [0., 0., 0., 1., 0.], + [0., 0., 0., 0., 1.]], dtype=float32) + + If we operate on ``arr`` with a numpy function, numpy will again use the + ``__array__`` interface to convert it to an array and then apply the function + in the usual way. + + >>> np.multiply(arr, 2) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 2., 0., 0.], + [0., 0., 0., 2., 0.], + [0., 0., 0., 0., 2.]]) + + Notice that the return type is a standard ``numpy.ndarray``. + + >>> type(np.multiply(arr, 2)) + + .. _matrix-objects: Matrix objects diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 117d60f85467..8140517903c3 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -12,301 +12,13 @@ arrays, an N-dimensional array distributed across multiple nodes, and `cupy `_ arrays, an N-dimensional array on a GPU. -To get a feel for writing custom array containers, we'll begin with a simple -example that has rather narrow utility but illustrates the concepts involved. +For comprehensive documentation on writing custom array containers, please see: ->>> import numpy as np ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) - -Our custom array can be instantiated like: - ->>> arr = DiagonalArray(5, 1) ->>> arr -DiagonalArray(N=5, value=1) - -We can convert to a numpy array using :func:`numpy.array` or -:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a -standard ``numpy.ndarray``. - ->>> np.asarray(arr) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - -The ``__array__`` method can optionally accept a `dtype` argument. If provided, -this argument specifies the desired data type for the resulting NumPy array. -Your implementation should attempt to convert the data to this `dtype` -if possible. If the conversion is not supported, it's generally best -to fall back to a default type or raise a `TypeError` or `ValueError`. - -Here's an example demonstrating its use with `dtype` specification: - ->>> np.asarray(arr, dtype=np.float32) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]], dtype=float32) - -If we operate on ``arr`` with a numpy function, numpy will again use the -``__array__`` interface to convert it to an array and then apply the function -in the usual way. - ->>> np.multiply(arr, 2) -array([[2., 0., 0., 0., 0.], - [0., 2., 0., 0., 0.], - [0., 0., 2., 0., 0.], - [0., 0., 0., 2., 0.], - [0., 0., 0., 0., 2.]]) - - -Notice that the return type is a standard ``numpy.ndarray``. - ->>> type(np.multiply(arr, 2)) - - -How can we pass our custom array type through this function? Numpy allows a -class to indicate that it would like to handle computations in a custom-defined -way through the interfaces ``__array_ufunc__`` and ``__array_function__``. Let's -take one at a time, starting with ``__array_ufunc__``. This method covers -:ref:`ufuncs`, a class of functions that includes, for example, -:func:`numpy.multiply` and :func:`numpy.sin`. - -The ``__array_ufunc__`` receives: - -- ``ufunc``, a function like ``numpy.multiply`` -- ``method``, a string, differentiating between ``numpy.multiply(...)`` and - variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so - on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``. -- ``inputs``, which could be a mixture of different types -- ``kwargs``, keyword arguments passed to the function - -For this example we will only handle the method ``__call__`` - ->>> from numbers import Number ->>> class DiagonalArray: -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - -Now our custom array type passes through numpy functions. - ->>> arr = DiagonalArray(5, 1) ->>> np.multiply(arr, 3) -DiagonalArray(N=5, value=3) ->>> np.add(arr, 3) -DiagonalArray(N=5, value=4) ->>> np.sin(arr) -DiagonalArray(N=5, value=0.8414709848078965) - -At this point ``arr + 3`` does not work. - ->>> arr + 3 -Traceback (most recent call last): -... -TypeError: unsupported operand type(s) for +: 'DiagonalArray' and 'int' - -To support it, we need to define the Python interfaces ``__add__``, ``__lt__``, -and so on to dispatch to the corresponding ufunc. We can achieve this -conveniently by inheriting from the mixin -:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`. - ->>> import numpy.lib.mixins ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented - ->>> arr = DiagonalArray(5, 1) ->>> arr + 3 -DiagonalArray(N=5, value=4) ->>> arr > 0 -DiagonalArray(N=5, value=True) - -Now let's tackle ``__array_function__``. We'll create dict that maps numpy -functions to our custom variants. - ->>> HANDLED_FUNCTIONS = {} ->>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin): -... def __init__(self, N, value): -... self._N = N -... self._i = value -... def __repr__(self): -... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" -... def __array__(self, dtype=None, copy=None): -... if copy is False: -... raise ValueError( -... "`copy=False` isn't supported. A copy is always created." -... ) -... return self._i * np.eye(self._N, dtype=dtype) -... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): -... if method == '__call__': -... N = None -... scalars = [] -... for input in inputs: -... # In this case we accept only scalar numbers or DiagonalArrays. -... if isinstance(input, Number): -... scalars.append(input) -... elif isinstance(input, self.__class__): -... scalars.append(input._i) -... if N is not None: -... if N != input._N: -... raise TypeError("inconsistent sizes") -... else: -... N = input._N -... else: -... return NotImplemented -... return self.__class__(N, ufunc(*scalars, **kwargs)) -... else: -... return NotImplemented -... def __array_function__(self, func, types, args, kwargs): -... if func not in HANDLED_FUNCTIONS: -... return NotImplemented -... # Note: this allows subclasses that don't override -... # __array_function__ to handle DiagonalArray objects. -... if not all(issubclass(t, self.__class__) for t in types): -... return NotImplemented -... return HANDLED_FUNCTIONS[func](*args, **kwargs) -... - -A convenient pattern is to define a decorator ``implements`` that can be used -to add functions to ``HANDLED_FUNCTIONS``. - ->>> def implements(np_function): -... "Register an __array_function__ implementation for DiagonalArray objects." -... def decorator(func): -... HANDLED_FUNCTIONS[np_function] = func -... return func -... return decorator -... - -Now we write implementations of numpy functions for ``DiagonalArray``. -For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that -calls ``numpy.sum(self)``, and the same for ``mean``. - ->>> @implements(np.sum) -... def sum(arr): -... "Implementation of np.sum for DiagonalArray objects" -... return arr._i * arr._N -... ->>> @implements(np.mean) -... def mean(arr): -... "Implementation of np.mean for DiagonalArray objects" -... return arr._i / arr._N -... ->>> arr = DiagonalArray(5, 1) ->>> np.sum(arr) -5 ->>> np.mean(arr) -0.2 - -If the user tries to use any numpy functions not included in -``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that -this operation is not supported. For example, concatenating two -``DiagonalArrays`` does not produce another diagonal array, so it is not -supported. - ->>> np.concatenate([arr, arr]) -Traceback (most recent call last): -... -TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [] - -Additionally, our implementations of ``sum`` and ``mean`` do not accept the -optional arguments that numpy's implementation does. - ->>> np.sum(arr, axis=0) -Traceback (most recent call last): -... -TypeError: sum() got an unexpected keyword argument 'axis' - - -The user always has the option of converting to a normal ``numpy.ndarray`` with -:func:`numpy.asarray` and using standard numpy from there. - ->>> np.concatenate([np.asarray(arr), np.asarray(arr)]) -array([[1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.], - [1., 0., 0., 0., 0.], - [0., 1., 0., 0., 0.], - [0., 0., 1., 0., 0.], - [0., 0., 0., 1., 0.], - [0., 0., 0., 0., 1.]]) - - -The implementation of ``DiagonalArray`` in this example only handles the -``np.sum`` and ``np.mean`` functions for brevity. Many other functions in the -Numpy API are also available to wrap and a full-fledged custom array container -can explicitly support all functions that Numpy makes available to wrap. +- :ref:`Interoperability with NumPy ` - the main guide + covering ``__array_ufunc__`` and ``__array_function__`` protocols +- :ref:`Special attributes and methods ` - see + ``class.__array__()`` for documentation and example implementing the + ``__array__()`` method Numpy provides some utilities to aid testing of custom array containers that implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index b1c115ff1de0..9b37f0b0926d 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -150,7 +150,7 @@ If a class implements the old signature ``__array__(self)``, for ``np.array(a)`` a warning will be raised saying that ``dtype`` and ``copy`` arguments are missing. To see an example of a custom array implementation including the use of -``__array__()``, see :ref:`basics.dispatch`. +``__array__()``, see :ref:`special-attributes-and-methods`. The DLPack Protocol ~~~~~~~~~~~~~~~~~~~ From f0ea275f97aea20d21f75b00ba8ce89defebf917 Mon Sep 17 00:00:00 2001 From: nlegendree Date: Tue, 23 Dec 2025 17:00:37 +0100 Subject: [PATCH 1071/1718] DOC: address review comments on array containers guide Simplify verbose text in DiagonalArray example and add link to class.__array__() documentation as suggested. --- doc/source/reference/arrays.classes.rst | 11 ++--------- doc/source/user/basics.interoperability.rst | 5 +---- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 5641a78aa42e..df64e9e5d042 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -340,8 +340,7 @@ NumPy provides several hooks that classes can customize: **Example** - To get a feel for writing custom array containers, we'll begin with a simple - example that has rather narrow utility but illustrates the concepts involved. + Use ``__array__`` to create a diagonal array of fixed size and value: >>> import numpy as np >>> class DiagonalArray: @@ -374,13 +373,7 @@ NumPy provides several hooks that classes can customize: [0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]) - The ``__array__`` method can optionally accept a `dtype` argument. If provided, - this argument specifies the desired data type for the resulting NumPy array. - Your implementation should attempt to convert the data to this `dtype` - if possible. If the conversion is not supported, it's generally best - to fall back to a default type or raise a `TypeError` or `ValueError`. - - Here's an example demonstrating its use with `dtype` specification: + Using ``dtype`` should return an appropriate ndarray or raise an error: >>> np.asarray(arr, dtype=np.float32) array([[1., 0., 0., 0., 0.], diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index 9b37f0b0926d..984a05b4e8bc 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -130,7 +130,7 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``__array__()`` method ensures that any NumPy-like object (an array, any +The `__array__() <../reference/arrays.classes.html#numpy.class.\_\_array\_\_>`__ method ensures that any NumPy-like object (an array, any object exposing the array interface, an object whose ``__array__()`` method returns an array or any nested sequence) that implements it can be used as a NumPy array. If possible, this will mean using ``__array__()`` to create a NumPy @@ -149,9 +149,6 @@ is needed. If a class implements the old signature ``__array__(self)``, for ``np.array(a)`` a warning will be raised saying that ``dtype`` and ``copy`` arguments are missing. -To see an example of a custom array implementation including the use of -``__array__()``, see :ref:`special-attributes-and-methods`. - The DLPack Protocol ~~~~~~~~~~~~~~~~~~~ From 7e6f6069948bacfa5a63f414acfc4e9c9b51db17 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 24 Dec 2025 20:40:17 +0530 Subject: [PATCH 1072/1718] ENH: use c11 atomics for `numpy` (#30489) --- .../upcoming_changes/30489.compatibility.rst | 5 + meson.build | 3 +- numpy/_core/src/common/npy_argparse.c | 9 +- numpy/_core/src/common/npy_atomic.h | 123 ------------------ numpy/_core/src/common/npy_import.c | 28 +++- numpy/_core/src/common/npy_import.h | 50 ++----- numpy/_core/src/multiarray/hashdescr.c | 7 +- 7 files changed, 57 insertions(+), 168 deletions(-) create mode 100644 doc/release/upcoming_changes/30489.compatibility.rst delete mode 100644 numpy/_core/src/common/npy_atomic.h diff --git a/doc/release/upcoming_changes/30489.compatibility.rst b/doc/release/upcoming_changes/30489.compatibility.rst new file mode 100644 index 000000000000..6eb1387fab6b --- /dev/null +++ b/doc/release/upcoming_changes/30489.compatibility.rst @@ -0,0 +1,5 @@ +MSVC support +------------ +NumPy now requires minimum MSVC 19.35 toolchain version on +Windows platforms. This corresponds to Visual Studio 2022 +version 17.5 Preview 2 or newer. \ No newline at end of file diff --git a/meson.build b/meson.build index 2cb7ce987ad5..a72c5bb02734 100644 --- a/meson.build +++ b/meson.build @@ -25,10 +25,11 @@ if cc.get_id() == 'gcc' error('NumPy requires GCC >= 9.3') endif elif cc.get_id() == 'msvc' - if not cc.version().version_compare('>=19.20') + if not cc.version().version_compare('>=19.35') error('NumPy requires at least vc142 (default with Visual Studio 2019) ' + \ 'when building with MSVC') endif + add_project_arguments('/experimental:c11atomics', language: 'c') endif if not cy.version().version_compare('>=3.0.6') error('NumPy requires Cython >= 3.0.6') diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index aa011be9c585..ea15ec68026b 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -1,13 +1,14 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include "numpy/ndarraytypes.h" #include "numpy/npy_2_compat.h" #include "npy_argparse.h" -#include "npy_atomic.h" #include "npy_import.h" #include "arrayfunction_override.h" @@ -299,9 +300,9 @@ _npy_parse_arguments(const char *funcname, /* ... is NULL, NULL, NULL terminated: name, converter, value */ ...) { - if (!npy_atomic_load_uint8(&cache->initialized)) { + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { LOCK_ARGPARSE_MUTEX; - if (!npy_atomic_load_uint8(&cache->initialized)) { + if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { va_list va; va_start(va, kwnames); int res = initialize_keywords(funcname, cache, va); @@ -310,7 +311,7 @@ _npy_parse_arguments(const char *funcname, UNLOCK_ARGPARSE_MUTEX; return -1; } - npy_atomic_store_uint8(&cache->initialized, 1); + atomic_store_explicit((_Atomic(uint8_t) *)&cache->initialized, 1, memory_order_release); } UNLOCK_ARGPARSE_MUTEX; } diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h deleted file mode 100644 index 61a31acc13e0..000000000000 --- a/numpy/_core/src/common/npy_atomic.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Provides wrappers around C11 standard library atomics and MSVC intrinsics - * to provide basic atomic load and store functionality. This is based on - * code in CPython's pyatomic.h, pyatomic_std.h, and pyatomic_msc.h - */ - -#ifndef NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ -#define NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ - -#include "numpy/npy_common.h" - -#ifdef __cplusplus - extern "C++" { - #include - } - #define _NPY_USING_STD using namespace std - #define _Atomic(tp) atomic - #define STDC_ATOMICS -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ - && !defined(__STDC_NO_ATOMICS__) - #include - #include - #define _NPY_USING_STD - #define STDC_ATOMICS -#elif _MSC_VER - #include - #define MSC_ATOMICS - #if !defined(_M_X64) && !defined(_M_IX86) && !defined(_M_ARM64) - #error "Unsupported MSVC build configuration, neither x86 or ARM" - #endif -#elif defined(__GNUC__) && (__GNUC__ > 4) - #define GCC_ATOMICS -#elif defined(__clang__) - #if __has_builtin(__atomic_load) - #define GCC_ATOMICS - #endif -#else - #error "no supported atomic implementation for this platform/compiler" -#endif - - -static inline npy_uint8 -npy_atomic_load_uint8(const npy_uint8 *obj) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); -#elif defined(MSC_ATOMICS) -#if defined(_M_X64) || defined(_M_IX86) - return *(volatile npy_uint8 *)obj; -#else // defined(_M_ARM64) - return (npy_uint8)__ldar8((unsigned __int8 volatile *)obj); -#endif -#elif defined(GCC_ATOMICS) - return __atomic_load_n(obj, __ATOMIC_SEQ_CST); -#endif -} - -static inline void* -npy_atomic_load_ptr(const void *obj) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - return atomic_load((const _Atomic(void *)*)obj); -#elif defined(MSC_ATOMICS) -#if SIZEOF_VOID_P == 8 -#if defined(_M_X64) || defined(_M_IX86) - return (void *)*(volatile uint64_t *)obj; -#elif defined(_M_ARM64) - return (void *)__ldar64((unsigned __int64 volatile *)obj); -#endif -#else -#if defined(_M_X64) || defined(_M_IX86) - return (void *)*(volatile uint32_t *)obj; -#elif defined(_M_ARM64) - return (void *)__ldar32((unsigned __int32 volatile *)obj); -#endif -#endif -#elif defined(GCC_ATOMICS) - return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); -#endif -} - -static inline npy_hash_t -npy_atomic_load_hash_t(const npy_hash_t *obj) { - assert(sizeof(npy_hash_t) == sizeof(void *)); - return (npy_hash_t)npy_atomic_load_ptr((const void *)obj); -} - -static inline void -npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { -#ifdef STDC_ATOMICS - _NPY_USING_STD; - atomic_store((_Atomic(uint8_t)*)obj, value); -#elif defined(MSC_ATOMICS) - _InterlockedExchange8((volatile char *)obj, (char)value); -#elif defined(GCC_ATOMICS) - __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); -#endif -} - -static inline void -npy_atomic_store_ptr(void *obj, void *value) -{ -#ifdef STDC_ATOMICS - _NPY_USING_STD; - atomic_store((_Atomic(void *)*)obj, value); -#elif defined(MSC_ATOMICS) - _InterlockedExchangePointer((void * volatile *)obj, (void *)value); -#elif defined(GCC_ATOMICS) - __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); -#endif -} - -static inline void -npy_atomic_store_hash_t(npy_hash_t *obj, npy_hash_t value) { - assert(sizeof(npy_hash_t) == sizeof(void *)); - npy_atomic_store_ptr((void *)obj, (void *)value); -} - -#undef MSC_ATOMICS -#undef STDC_ATOMICS -#undef GCC_ATOMICS - -#endif // NUMPY_CORE_SRC_COMMON_NPY_NPY_ATOMIC_H_ diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c index a0308ff3e4c7..534d7b34020b 100644 --- a/numpy/_core/src/common/npy_import.c +++ b/numpy/_core/src/common/npy_import.c @@ -3,7 +3,7 @@ #include "numpy/ndarraytypes.h" #include "npy_import.h" -#include "npy_atomic.h" +#include NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; @@ -60,3 +60,29 @@ npy_import_entry_point(const char *entry_point) { } return result; } + + +NPY_NO_EXPORT int +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + PyObject* value = npy_import(module, attr); + if (value == NULL) { + return -1; + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); +#else + PyMutex_Lock(&npy_runtime_imports.import_mutex); +#endif + if (!atomic_load_explicit((_Atomic(PyObject *) *)obj, memory_order_acquire)) { + atomic_store_explicit((_Atomic(PyObject *) *)obj, Py_NewRef(value), memory_order_release); + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_release_lock(npy_runtime_imports.import_mutex); +#else + PyMutex_Unlock(&npy_runtime_imports.import_mutex); +#endif + Py_DECREF(value); + } + return 0; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index fec1b22f3975..9eab510726aa 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -4,7 +4,6 @@ #include #include "numpy/npy_common.h" -#include "npy_atomic.h" #ifdef __cplusplus extern "C" { @@ -78,6 +77,19 @@ npy_import(const char *module, const char *attr) return ret; } +NPY_NO_EXPORT int +init_import_mutex(void); + +/*! \brief Import a Python object from an entry point string. + + * The name should be of the form "(module ':')? (object '.')* attr". + * If no module is present, it is assumed to be "numpy". + * On error, returns NULL. + */ +NPY_NO_EXPORT PyObject* +npy_import_entry_point(const char *entry_point); + + /*! \brief Fetch and cache Python object at runtime. * * Import a Python function and cache it for use. The function checks if @@ -91,42 +103,8 @@ npy_import(const char *module, const char *attr) * @param attr module attribute to cache. * @param obj Storage location for imported function. */ -static inline int -npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { - if (!npy_atomic_load_ptr(obj)) { - PyObject* value = npy_import(module, attr); - if (value == NULL) { - return -1; - } -#if PY_VERSION_HEX < 0x30d00b3 - PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); -#else - PyMutex_Lock(&npy_runtime_imports.import_mutex); -#endif - if (!npy_atomic_load_ptr(obj)) { - npy_atomic_store_ptr(obj, Py_NewRef(value)); - } -#if PY_VERSION_HEX < 0x30d00b3 - PyThread_release_lock(npy_runtime_imports.import_mutex); -#else - PyMutex_Unlock(&npy_runtime_imports.import_mutex); -#endif - Py_DECREF(value); - } - return 0; -} - NPY_NO_EXPORT int -init_import_mutex(void); - -/*! \brief Import a Python object from an entry point string. - - * The name should be of the form "(module ':')? (object '.')* attr". - * If no module is present, it is assumed to be "numpy". - * On error, returns NULL. - */ -NPY_NO_EXPORT PyObject* -npy_import_entry_point(const char *entry_point); +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj); #ifdef __cplusplus } diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 853e247e0b74..be203eb197c3 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -1,12 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE + #define PY_SSIZE_T_CLEAN #include +#include #include -#include "npy_atomic.h" #include "npy_config.h" @@ -303,14 +304,14 @@ PyArray_DescrHash(PyObject* odescr) } descr = (PyArray_Descr*)odescr; - hash = npy_atomic_load_hash_t(&descr->hash); + hash = atomic_load_explicit((_Atomic(npy_hash_t) *)&descr->hash, memory_order_relaxed); if (hash == -1) { hash = _PyArray_DescrHashImp(descr); if (hash == -1) { return -1; } - npy_atomic_store_hash_t(&descr->hash, hash); + atomic_store_explicit((_Atomic(npy_hash_t) *)&descr->hash, hash, memory_order_relaxed); } return hash; From e05a6dcbba65e0e46d61888f60d4d5b3c283cb2b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 24 Dec 2025 18:49:56 +0100 Subject: [PATCH 1073/1718] BUG: Fix leak in flat assignment iterator Both the value array and iterator were missing the DECREF. Since the iterator is a temporary object, I don't really think it is particularly helpful to try to test. Rather, it would be good to try to test with ASAN semi-regularly (or I use `pytest-valgrind` again, which works great but is just very very slow). Closes gh-30508 --- numpy/_core/src/multiarray/iterators.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 1b4ed59fbfe0..bf77abcb547e 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -764,6 +764,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) npy_index_info indices[NPY_MAXDIMS * 2 + 1]; PyArray_Descr *dtype = PyArray_DESCR(self->ao); + PyArrayObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; npy_intp dtype_size = dtype->elsize; NPY_cast_info cast_info = {.func = NULL}; @@ -830,12 +832,12 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) } Py_INCREF(dtype); - PyArrayObject *arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, + arrval = (PyArrayObject *)PyArray_FromAny(val, dtype, 0, 0, NPY_ARRAY_FORCECAST, NULL); if (arrval == NULL) { goto finish; } - PyArrayIterObject *val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); + val_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arrval); if (val_it == NULL) { goto finish; } @@ -908,6 +910,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) for (int i = 0; i < index_num; i++) { Py_XDECREF(indices[i].object); } + Py_XDECREF(val_it); + Py_XDECREF(arrval); return ret; } From d0de6a1a90bb7740cc410144795b203720c82028 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 25 Dec 2025 00:11:30 -0700 Subject: [PATCH 1074/1718] BUG: fix heap overflow in fixed-width string multiply (#30511) --- numpy/_core/src/umath/string_ufuncs.cpp | 10 +++++++--- numpy/_core/tests/test_strings.py | 8 ++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 4a65227cdfa6..18ed4534ea04 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -176,9 +176,14 @@ string_multiply(Buffer buf1, npy_int64 reps, Buffer out) return 0; } + size_t width = out.buffer_width(); + // we know this is positive + size_t reps_ = (size_t)reps; + if (len1 == 1) { - out.buffer_memset(*buf1, reps); - out.buffer_fill_with_zeros_after_index(reps); + size_t end_index = reps_ > width ? width : reps_; + out.buffer_memset(*buf1, end_index); + out.buffer_fill_with_zeros_after_index(end_index); return 0; } @@ -188,7 +193,6 @@ string_multiply(Buffer buf1, npy_int64 reps, Buffer out) } size_t pad = 0; - size_t width = out.buffer_width(); if (width < newlen) { reps = width / len1; pad = width % len1; diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index d8d23d47b5b8..5a4b0a6a7f32 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -192,6 +192,14 @@ def test_large_string_cast(): a.astype("U") +@pytest.mark.parametrize("dt", ["S1", "U1"]) +def test_in_place_mutiply_no_overflow(dt): + # see gh-30495 + a = np.array("a", dtype=dt) + a *= 20 + assert_array_equal(a, np.array("a", dtype=dt)) + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: From 9bf73f29d6d7f608e55dfcbc8613cb739e388051 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Thu, 25 Dec 2025 15:46:57 -0800 Subject: [PATCH 1075/1718] ENH: optimizing np.searchsorted and adding benchmarks --- benchmarks/benchmarks/bench_searchsorted.py | 29 +++++ numpy/_core/src/npysort/binsearch.cpp | 118 +++++++++++++++----- 2 files changed, 117 insertions(+), 30 deletions(-) create mode 100644 benchmarks/benchmarks/bench_searchsorted.py diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py new file mode 100644 index 000000000000..24c62aad6247 --- /dev/null +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -0,0 +1,29 @@ +import numpy as np +from .common import Benchmark + +class SearchSortedInt64(Benchmark): + # Benchmark for np.searchsorted with int64 arrays + params = [ + # 1B u64 is 8gb + [100, 10_000, 1_000_000, 1_000_000_000], # array sizes + [1, 2, 100, 100_000], # number of query elements + ['ordered', 'random'], # query order + [42, 18122022] + ] + param_names = ['array_size', 'n_queries', 'query_order', 'seed'] + + def setup(self, array_size, n_queries, query_order, seed): + self.arr = np.arange(array_size, dtype=np.int64) + + rng = np.random.default_rng(seed) + + low = -array_size // 10 + high = array_size + array_size // 10 + self.queries = rng.integers(low, high, size=n_queries, dtype=np.int64) + + # Generate queries + if query_order == 'ordered': + self.queries.sort() + + def time_searchsorted(self, array_size, n_queries, query_order, seed): + np.searchsorted(self.arr, self.queries) diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index f3f091e99fca..3d4565df6db3 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -64,43 +64,101 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // Let's handle this corner case first: if length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + /* + In this binary search, the candidates are in the range [base, base+length] + and on each iteration we pick a pivot at the mid-point of the range to + compare against the key being search. Depending on the comparison result, + we adjust the base index and halve the length of the interval.. + + To batch multiple queries, we do one pivot pass for each different length + for all keys, storing intermediate values of base for every key. To avoid + consuming extra memory, we use the ret array to store intermediate values + of each base until they become the final result in the last step. + + There are two benefits of this approach: + + 1. Cache locallity of pivots. In early iterations each key is compared + against the same set of pivots. For example, in the first iteration all + keys are compared against the median. In the second iteration, all keys + end up being compared against 1st and 3rd quartiles. + + 2. Independent calculations for out-of-order execution. In the single-key + version, step i+1 depends on computation of step i. Meaning that step i+1 + must wait for step i to complete before proceeding. When batching multiple + keys, we compute each step for all keys before continuing on the next step. + All the computations at a given step are independent across different keys. + Meaning that the CPU can execute multiple keys out-of-order in parallel. + + Invariant: + - cmp(arr[i], key_val) == true for all i < base + - cmp(arr[i], key_val) == false for all i >= base + length + + The insertion index candidates are i in range [base, base+length] and + on each iteration we shrink the range into either + [base, ceil(length / 2)] + or + [base + floor(length / 2), ceil(length / 2)] + + Optimization: we unroll the first iteration for the following reasons: + 1. ret is not initialized with the bases, so we save |keys| writes + by not having to intialize it with 0s. + 2. By assuming the initial base for every key is 0, we also save + |keys| reads. + 3. In the first iteration, all elements are compared against the + median. So we can store it in a variable and use it for all keys. + + This initial block replaces the initialization loop: + + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } - last_key_val = key_val; + Note that when arr_len = 1, then half is 0 so the following block + initializes the array as with 0s. + */ + npy_intp half = arr_len >> 1; + arr_len -= half; // length -> ceil(length / 2) - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const T mid_val = *(const T *)(arr + mid_idx * arr_str); - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + // We write 0 to explicitly refer to base + const T mid_val = *(const T *)(arr + (0 + half) * arr_str); + + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } + + while (arr_len > 1) { + npy_intp half = arr_len >> 1; + arr_len -= half; // length -> ceil(length / 2) + + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T mid_val = *(const T *)(arr + (base + half) * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; } - *(npy_intp *)ret = min_idx; + } + + /* + At this point arr_len == 1, so the candidates are in [base, base + 1]. + + We have two options: + If cmp(arr[base], key_val) == true, insertion index is base + 1 + Otherwise the insertion order is just base + */ + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + base * arr_str), key_val); } } From 4929ee8f8ca5895920a3aecc2123e541fb5c3211 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Thu, 25 Dec 2025 18:32:01 -0800 Subject: [PATCH 1076/1718] Applying linter --- benchmarks/benchmarks/bench_searchsorted.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py index 24c62aad6247..5704bc563ff8 100644 --- a/benchmarks/benchmarks/bench_searchsorted.py +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -1,6 +1,8 @@ import numpy as np + from .common import Benchmark + class SearchSortedInt64(Benchmark): # Benchmark for np.searchsorted with int64 arrays params = [ From 3680121921df3b6a2156b4978bfecb8e4fc90699 Mon Sep 17 00:00:00 2001 From: Anirudh-kasthuri Date: Fri, 26 Dec 2025 12:54:48 +0530 Subject: [PATCH 1077/1718] Fixed-formatting --- doc/source/reference/random/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index f59a2182052b..6da0a8c4e0a0 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -168,9 +168,9 @@ Features Parallel Applications Multithreaded Generation - new-or-different + New or Different Comparing Performance - c-api + C API Examples of using Numba, Cython, CFFI Original Source of the Generator and BitGenerators From e6e056a9df4b11d80c069fbd400afb5a6e96a8da Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Fri, 26 Dec 2025 22:06:47 +0530 Subject: [PATCH 1078/1718] CI: update debug CI to use more recent Python version (#30518) --- .github/workflows/linux.yml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index fed1fba04712..15d3ef45e327 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -90,22 +90,20 @@ jobs: fetch-tags: true persist-credentials: false - name: Install debug Python + uses: deadsnakes/action@e640ac8743173a67cca4d7d77cd837e514bf98e8 # v3.2.0 + with: + python-version: '3.14' + debug: true + - name: Build and install NumPy run: | - sudo apt-get update - sudo apt-get install python3-dbg ninja-build - - name: Build NumPy and install into venv - run: | - python3-dbg -m venv venv - source venv/bin/activate + python --version pip install -U pip pip install . -v -Csetup-args=-Dbuildtype=debug -Csetup-args=-Dallow-noblas=true - name: Install test dependencies run: | - source venv/bin/activate pip install -r requirements/test_requirements.txt - name: Run test suite run: | - source venv/bin/activate cd tools pytest --timeout=600 --durations=10 --pyargs numpy -m "not slow" From 1883d84250008daf7aaf770fc62a2722301cb05c Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Fri, 26 Dec 2025 14:05:46 -0500 Subject: [PATCH 1079/1718] BUG: Ensure summed weights returned by np.average always are correct class (#30522) --- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/tests/test_function_base.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 97bdbf2d048d..a9458378266c 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -594,7 +594,7 @@ def average(a, axis=None, weights=None, returned=False, *, if returned: if scl.shape != avg_as_array.shape: - scl = np.broadcast_to(scl, avg_as_array.shape).copy() + scl = np.broadcast_to(scl, avg_as_array.shape, subok=True).copy() return avg, scl else: return avg diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 412f06d07e20..f586813208e9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -497,6 +497,14 @@ class subclass(np.ndarray): assert_equal(type(np.average(a)), subclass) assert_equal(type(np.average(a, weights=w)), subclass) + # Ensure a possibly returned sum of weights is correct too. + ra, rw = np.average(a, weights=w, returned=True) + assert_equal(type(ra), subclass) + assert_equal(type(rw), subclass) + # Even if it needs to be broadcast. + ra, rw = np.average(a, weights=w[0], axis=1, returned=True) + assert_equal(type(ra), subclass) + assert_equal(type(rw), subclass) def test_upcasting(self): typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), From 9bcdb275eacf53a70f3c1c6305b7b1fabc548d94 Mon Sep 17 00:00:00 2001 From: Bill Tompkins <204918+BillTompkins@users.noreply.github.com> Date: Fri, 26 Dec 2025 21:50:19 -0500 Subject: [PATCH 1080/1718] TYP: Fix return type of histogram2d Closes #30525 --- numpy/lib/_twodim_base_impl.pyi | 2 +- .../typing/tests/data/reveal/twodim_base.pyi | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 63f5f4cdc9c0..de0d69670993 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -57,7 +57,7 @@ type _ArrayLike1DNumber_co = _SupportsArray[np.dtype[_Number_co]] | Sequence[com type _MaskFunc[_T] = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] type _Indices2D = tuple[_Array1D[np.intp], _Array1D[np.intp]] -type _Histogram2D[ScalarT: np.generic] = tuple[_Array1D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] +type _Histogram2D[ScalarT: np.generic] = tuple[_Array2D[np.float64], _Array1D[ScalarT], _Array1D[ScalarT]] @type_check_only class _HasShapeAndNDim(Protocol): diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 8cafe729a943..d8c45afe44ab 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -108,7 +108,7 @@ assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) assert_type( np.histogram2d(_to_1d_f64, _to_1d_f64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -116,7 +116,7 @@ assert_type( assert_type( np.histogram2d(_to_1d_c128, _to_1d_c128), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], @@ -124,7 +124,7 @@ assert_type( assert_type( np.histogram2d(_nd_i64, _nd_bool), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -132,7 +132,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_i64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -140,7 +140,7 @@ assert_type( assert_type( np.histogram2d(_nd_i64, _nd_f64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.float64]], ], @@ -148,7 +148,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -156,7 +156,7 @@ assert_type( assert_type( np.histogram2d(_nd_f64, _nd_c128, bins=8), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -164,7 +164,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128]], np.ndarray[_1D, np.dtype[np.complex128]], ], @@ -172,7 +172,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.uint64]], np.ndarray[_1D, np.dtype[np.uint64]], ], @@ -180,7 +180,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.uint64]], np.ndarray[_1D, np.dtype[np.uint64]], ], @@ -188,7 +188,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], ], @@ -196,7 +196,7 @@ assert_type( assert_type( np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), tuple[ - np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_2D, np.dtype[np.float64]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], np.ndarray[_1D, np.dtype[np.complex128 | Any]], ], From 5fa9c5838e835d3c880964594ad1a1c85b24509e Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Sat, 27 Dec 2025 15:46:58 -0800 Subject: [PATCH 1081/1718] Addressing @eendebakpt comments --- numpy/_core/src/npysort/binsearch.cpp | 51 +++++++++++++-------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index 3d4565df6db3..6815a9992548 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -74,15 +74,16 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, } /* - In this binary search, the candidates are in the range [base, base+length] - and on each iteration we pick a pivot at the mid-point of the range to - compare against the key being search. Depending on the comparison result, - we adjust the base index and halve the length of the interval.. + In this binary search implementation, the candidate insertion indices for + the jth key are in the range [base_j, base_j+length] and on each iteration + we pick a pivot at the mid-point of the range to compare against the jth + key. Depending on the comparison result, we adjust the base_j and halve + the length of the interval. To batch multiple queries, we do one pivot pass for each different length - for all keys, storing intermediate values of base for every key. To avoid - consuming extra memory, we use the ret array to store intermediate values - of each base until they become the final result in the last step. + for all keys, storing intermediate values of base_j for every key_j. To + avoid consuming extra memory, we use the ret array to store intermediate + values of each base until they become the final result in the last step. There are two benefits of this approach: @@ -94,9 +95,10 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, 2. Independent calculations for out-of-order execution. In the single-key version, step i+1 depends on computation of step i. Meaning that step i+1 must wait for step i to complete before proceeding. When batching multiple - keys, we compute each step for all keys before continuing on the next step. - All the computations at a given step are independent across different keys. - Meaning that the CPU can execute multiple keys out-of-order in parallel. + keys, we compute each step for all keys before continuing on the next + step. All the computations at a given step are independent across + different keys. Meaning that the CPU can execute multiple keys + out-of-order in parallel. Invariant: - cmp(arr[i], key_val) == true for all i < base @@ -116,29 +118,25 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, 3. In the first iteration, all elements are compared against the median. So we can store it in a variable and use it for all keys. - This initial block replaces the initialization loop: - - for (npy_intp i = 0; i < key_len; ++i) { - *(npy_intp *)(ret + i * ret_str) = 0; - } - - Note that when arr_len = 1, then half is 0 so the following block - initializes the array as with 0s. + This initial block replaces the initialization loop that is used for the + arr_len==0 case. Note that when arr_len = 1, then half is 0 so the + following block initializes the array as with 0s. */ - npy_intp half = arr_len >> 1; - arr_len -= half; // length -> ceil(length / 2) + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - // We write 0 to explicitly refer to base - const T mid_val = *(const T *)(arr + (0 + half) * arr_str); + npy_intp base = 0; + const T mid_val = *(const T *)(arr + (base + half) * arr_str); for (npy_intp i = 0; i < key_len; ++i) { const T key_val = *(const T *)(key + i * key_str); *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; } - while (arr_len > 1) { - npy_intp half = arr_len >> 1; - arr_len -= half; // length -> ceil(length / 2) + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) for (npy_intp i = 0; i < key_len; ++i) { npy_intp &base = *(npy_intp *)(ret + i * ret_str); @@ -149,7 +147,8 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, } /* - At this point arr_len == 1, so the candidates are in [base, base + 1]. + At this point interval_length == 1, so the candidates are in interval + [base, base + 1]. We have two options: If cmp(arr[base], key_val) == true, insertion index is base + 1 From 2abbbfb98af201f47025a7d4b2078b7678edb05f Mon Sep 17 00:00:00 2001 From: DanielDerefaka Date: Sun, 28 Dec 2025 01:09:18 +0100 Subject: [PATCH 1082/1718] DOC: Add missing import in assert_array_equal docstring example Fixes #30509 The interactive example for `np.testing.assert_array_equal` was missing the `import numpy as np` statement, causing a `NameError: name 'np' is not defined` when running on the web docs. --- numpy/testing/_private/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index fde610b05460..527885308dd2 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1055,6 +1055,8 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, Examples -------- + >>> import numpy as np + The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], From 5e7cc155b9baa8222518776e28c5ddb6596cb343 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 10:25:34 +0100 Subject: [PATCH 1083/1718] TYP/STY: ``_core.defchararray`` formatting --- numpy/_core/defchararray.pyi | 726 +++++++++-------------------------- 1 file changed, 176 insertions(+), 550 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index bc587ed846ba..5df01e30f0fd 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -84,6 +84,8 @@ type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] +### + class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def __new__( @@ -120,388 +122,203 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): order: _OrderKACF = "C", ) -> _CharArray[str_]: ... + # def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] - def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + # @overload # type: ignore[override] - def __eq__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __eq__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __eq__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __eq__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + # @overload # type: ignore[override] - def __ne__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __ne__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __ne__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __ne__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... + # @overload # type: ignore[override] - def __ge__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __ge__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __le__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __le__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __gt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __gt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __lt__( - self: _CharArray[str_], - other: U_co, - ) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... @overload - def __lt__( - self: _CharArray[bytes_], - other: S_co, - ) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __add__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def __add__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... @overload - def __add__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def __add__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload # type: ignore[override] - def __radd__( - self: _CharArray[str_], - other: U_co, - ) -> _CharArray[str_]: ... + def __radd__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... @overload - def __radd__( - self: _CharArray[bytes_], - other: S_co, - ) -> _CharArray[bytes_]: ... + def __radd__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + # + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + + # + def decode(self: _CharArray[bytes_], encoding: str | None = None, errors: str | None = None) -> _CharArray[str_]: ... + def encode(self: _CharArray[str_], encoding: str | None = None, errors: str | None = None) -> _CharArray[bytes_]: ... + + # @overload - def center( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def center(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def center( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def center(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def count( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def count(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def count( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... - - def decode( - self: _CharArray[bytes_], - encoding: str | None = None, - errors: str | None = None, - ) -> _CharArray[str_]: ... - - def encode( - self: _CharArray[str_], - encoding: str | None = None, - errors: str | None = None, - ) -> _CharArray[bytes_]: ... + def count(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def endswith( - self: _CharArray[str_], - suffix: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def endswith(self: _CharArray[str_], suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def endswith( - self: _CharArray[bytes_], - suffix: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... - - def expandtabs( - self, - tabsize: i_co = 8, - ) -> Self: ... + def endswith(self: _CharArray[bytes_], suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + + # + def expandtabs(self, tabsize: i_co = 8) -> Self: ... + # @overload - def find( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def find(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def find( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def find(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def index( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def index(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def index( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def index(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def join( - self: _CharArray[str_], - seq: U_co, - ) -> _CharArray[str_]: ... + def join(self: _CharArray[str_], seq: U_co) -> _CharArray[str_]: ... @overload - def join( - self: _CharArray[bytes_], - seq: S_co, - ) -> _CharArray[bytes_]: ... + def join(self: _CharArray[bytes_], seq: S_co) -> _CharArray[bytes_]: ... + # @overload - def ljust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def ljust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def ljust( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def ljust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def lstrip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def lstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def lstrip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def lstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload # type: ignore[override] - def partition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def partition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... @overload - def partition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def partition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # @overload - def replace( - self: _CharArray[str_], - old: U_co, - new: U_co, - count: i_co | None = None, - ) -> _CharArray[str_]: ... + def replace(self: _CharArray[str_], old: U_co, new: U_co, count: i_co | None = None) -> _CharArray[str_]: ... @overload - def replace( - self: _CharArray[bytes_], - old: S_co, - new: S_co, - count: i_co | None = None, - ) -> _CharArray[bytes_]: ... + def replace(self: _CharArray[bytes_], old: S_co, new: S_co, count: i_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def rfind( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rfind(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def rfind( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rfind(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def rindex( - self: _CharArray[str_], - sub: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rindex(self: _CharArray[str_], sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @overload - def rindex( - self: _CharArray[bytes_], - sub: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[int_]: ... + def rindex(self: _CharArray[bytes_], sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... + # @overload - def rjust( - self: _CharArray[str_], - width: i_co, - fillchar: U_co = " ", - ) -> _CharArray[str_]: ... + def rjust(self: _CharArray[str_], width: i_co, fillchar: U_co = " ") -> _CharArray[str_]: ... @overload - def rjust( - self: _CharArray[bytes_], - width: i_co, - fillchar: str | S_co = " ", - ) -> _CharArray[bytes_]: ... + def rjust(self: _CharArray[bytes_], width: i_co, fillchar: str | S_co = " ") -> _CharArray[bytes_]: ... + # @overload - def rpartition( - self: _CharArray[str_], - sep: U_co, - ) -> _CharArray[str_]: ... + def rpartition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... @overload - def rpartition( - self: _CharArray[bytes_], - sep: S_co, - ) -> _CharArray[bytes_]: ... + def rpartition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... + # @overload - def rsplit( - self: _CharArray[str_], - sep: U_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def rsplit( - self: _CharArray[bytes_], - sep: S_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def rsplit(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + # @overload - def rstrip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def rstrip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def rstrip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def rstrip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def split( - self: _CharArray[str_], - sep: U_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def split(self: _CharArray[str_], sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload - def split( - self: _CharArray[bytes_], - sep: S_co | None = None, - maxsplit: i_co | None = None, - ) -> NDArray[object_]: ... + def split(self: _CharArray[bytes_], sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... + # def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... + # @overload - def startswith( - self: _CharArray[str_], - prefix: U_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[str_], prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... @overload - def startswith( - self: _CharArray[bytes_], - prefix: S_co, - start: i_co = 0, - end: i_co | None = None, - ) -> NDArray[np.bool]: ... + def startswith(self: _CharArray[bytes_], prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + # @overload - def strip( - self: _CharArray[str_], - chars: U_co | None = None, - ) -> _CharArray[str_]: ... + def strip(self: _CharArray[str_], chars: U_co | None = None) -> _CharArray[str_]: ... @overload - def strip( - self: _CharArray[bytes_], - chars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def strip(self: _CharArray[bytes_], chars: S_co | None = None) -> _CharArray[bytes_]: ... + # @overload - def translate( - self: _CharArray[str_], - table: U_co, - deletechars: U_co | None = None, - ) -> _CharArray[str_]: ... + def translate(self: _CharArray[str_], table: U_co, deletechars: U_co | None = None) -> _CharArray[str_]: ... @overload - def translate( - self: _CharArray[bytes_], - table: S_co, - deletechars: S_co | None = None, - ) -> _CharArray[bytes_]: ... + def translate(self: _CharArray[bytes_], table: S_co, deletechars: S_co | None = None) -> _CharArray[bytes_]: ... + # def zfill(self, width: i_co) -> Self: ... def capitalize(self) -> Self: ... def title(self) -> Self: ... def swapcase(self) -> Self: ... def lower(self) -> Self: ... def upper(self) -> Self: ... + + # def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... @@ -600,16 +417,8 @@ def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTy @overload def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... -def decode( - a: S_co, - encoding: str | None = None, - errors: str | None = None, -) -> NDArray[str_]: ... -def encode( - a: U_co | T_co, - encoding: str | None = None, - errors: str | None = None, -) -> NDArray[bytes_]: ... +def decode(a: S_co, encoding: str | None = None, errors: str | None = None) -> NDArray[str_]: ... +def encode(a: U_co | T_co, encoding: str | None = None, errors: str | None = None) -> NDArray[bytes_]: ... @overload def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... @@ -666,58 +475,24 @@ def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _ def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def replace( - a: U_co, - old: U_co, - new: U_co, - count: i_co | None = -1, -) -> NDArray[str_]: ... +def replace(a: U_co, old: U_co, new: U_co, count: i_co | None = -1) -> NDArray[str_]: ... @overload -def replace( - a: S_co, - old: S_co, - new: S_co, - count: i_co | None = -1, -) -> NDArray[bytes_]: ... +def replace(a: S_co, old: S_co, new: S_co, count: i_co | None = -1) -> NDArray[bytes_]: ... @overload def replace( - a: _StringDTypeSupportsArray, - old: _StringDTypeSupportsArray, - new: _StringDTypeSupportsArray, - count: i_co = -1, + a: _StringDTypeSupportsArray, old: _StringDTypeSupportsArray, new: _StringDTypeSupportsArray, count: i_co = -1 ) -> _StringDTypeArray: ... @overload -def replace( - a: T_co, - old: T_co, - new: T_co, - count: i_co = -1, -) -> _StringDTypeOrUnicodeArray: ... - -@overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = " ", -) -> NDArray[str_]: ... -@overload -def rjust( - a: S_co, - width: i_co, - fillchar: str | S_co = " ", -) -> NDArray[bytes_]: ... -@overload -def rjust( - a: _StringDTypeSupportsArray, - width: i_co, - fillchar: str | _StringDTypeSupportsArray = " ", -) -> _StringDTypeArray: ... +def replace(a: T_co, old: T_co, new: T_co, count: i_co = -1) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... @overload -def rjust( - a: T_co, - width: i_co, - fillchar: T_co = " ", -) -> _StringDTypeOrUnicodeArray: ... +def rjust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... @@ -729,29 +504,15 @@ def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def rsplit( - a: U_co, - sep: U_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload -def rsplit( - a: S_co, - sep: S_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def rsplit( - a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = None, - maxsplit: i_co | None = None, + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... @overload -def rsplit( - a: T_co, - sep: T_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def rsplit(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... @@ -763,29 +524,15 @@ def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def split( - a: U_co, - sep: U_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: U_co, sep: U_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload -def split( - a: S_co, - sep: S_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: S_co, sep: S_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... @overload def split( - a: _StringDTypeSupportsArray, - sep: _StringDTypeSupportsArray | None = None, - maxsplit: i_co | None = None, + a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray | None = None, maxsplit: i_co | None = None ) -> NDArray[object_]: ... @overload -def split( - a: T_co, - sep: T_co | None = None, - maxsplit: i_co | None = None, -) -> NDArray[object_]: ... +def split(a: T_co, sep: T_co | None = None, maxsplit: i_co | None = None) -> NDArray[object_]: ... def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... @@ -817,29 +564,13 @@ def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def translate( - a: U_co, - table: str, - deletechars: str | None = None, -) -> NDArray[str_]: ... +def translate(a: U_co, table: str, deletechars: str | None = None) -> NDArray[str_]: ... @overload -def translate( - a: S_co, - table: str, - deletechars: str | None = None, -) -> NDArray[bytes_]: ... +def translate(a: S_co, table: str, deletechars: str | None = None) -> NDArray[bytes_]: ... @overload -def translate( - a: _StringDTypeSupportsArray, - table: str, - deletechars: str | None = None, -) -> _StringDTypeArray: ... +def translate(a: _StringDTypeSupportsArray, table: str, deletechars: str | None = None) -> _StringDTypeArray: ... @overload -def translate( - a: T_co, - table: str, - deletechars: str | None = None, -) -> _StringDTypeOrUnicodeArray: ... +def translate(a: T_co, table: str, deletechars: str | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[str_]: ... @@ -861,92 +592,32 @@ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... # String information @overload -def count( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def count( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def count( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def endswith( - a: U_co, - suffix: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def endswith( - a: S_co, - suffix: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def endswith( - a: T_co, - suffix: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... - -@overload -def find( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def find( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def find( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def index( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def index( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def index( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... +def count(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def count(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def endswith(a: U_co, suffix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: S_co, suffix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def endswith(a: T_co, suffix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... + +@overload +def find(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def find(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def index(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def index(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... def isalpha(a: UST_co) -> NDArray[np.bool]: ... def isalnum(a: UST_co) -> NDArray[np.bool]: ... @@ -959,70 +630,25 @@ def istitle(a: UST_co) -> NDArray[np.bool]: ... def isupper(a: UST_co) -> NDArray[np.bool]: ... @overload -def rfind( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rfind( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rfind( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def rindex( - a: U_co, - sub: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rindex( - a: S_co, - sub: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[int_]: ... -@overload -def rindex( - a: T_co, - sub: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.int_]: ... - -@overload -def startswith( - a: U_co, - prefix: U_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def startswith( - a: S_co, - prefix: S_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... -@overload -def startswith( - a: T_co, - prefix: T_co, - start: i_co = 0, - end: i_co | None = None, -) -> NDArray[np.bool]: ... +def rfind(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rfind(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def rindex(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... +@overload +def rindex(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... + +@overload +def startswith(a: U_co, prefix: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: S_co, prefix: S_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... +@overload +def startswith(a: T_co, prefix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... def str_len(A: UST_co) -> NDArray[int_]: ... From def4bc6189c8fb9bdae71183d47935ca61efe913 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 16:29:49 +0100 Subject: [PATCH 1084/1718] TYP: ``{lin,log,geom}space`` shape-typing --- numpy/_core/function_base.pyi | 186 +++++++++++++++++- .../tests/data/reveal/array_constructors.pyi | 26 +-- 2 files changed, 191 insertions(+), 21 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 060a44d416ea..7fa9d07481ea 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,5 +1,5 @@ from _typeshed import Incomplete -from typing import Literal as L, SupportsIndex, overload +from typing import Any, Literal as L, SupportsIndex, overload import numpy as np from numpy._typing import ( @@ -7,16 +7,31 @@ from numpy._typing import ( NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ComplexLike_co, _DTypeLike, ) from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _ToFloat64 = float | np.integer | np.bool # `np.float64` is assignable to `float` type _ToArrayFloat64 = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] ### +@overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.float64]: ... @overload def linspace( start: _ToArrayFloat64, @@ -40,7 +55,19 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[np.complex128 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -52,7 +79,31 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... @overload def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -90,6 +141,18 @@ def linspace( device: L["cpu"] | None = None, ) -> NDArray[Incomplete]: ... @overload +def linspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.float64], np.float64]: ... +@overload def linspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, @@ -112,7 +175,19 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.floating], np.floating]: ... +) -> tuple[NDArray[np.float64 | Any], np.float64 | Any]: ... +@overload +def linspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -124,7 +199,19 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... +) -> tuple[NDArray[np.complex128 | Any], np.complex128 | Any]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[_Array1D[ScalarT], ScalarT]: ... @overload def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -150,6 +237,17 @@ def linspace( device: L["cpu"] | None = None, ) -> tuple[NDArray[Incomplete], Incomplete]: ... +# +@overload +def logspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... @overload def logspace( start: _ToArrayFloat64, @@ -169,7 +267,17 @@ def logspace( base: _ArrayLikeFloat_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... +@overload +def logspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + base: complex = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -179,7 +287,28 @@ def logspace( base: _ArrayLikeComplex_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + base: _ComplexLike_co, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def logspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... @overload def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -212,6 +341,16 @@ def logspace( axis: SupportsIndex = 0, ) -> NDArray[Incomplete]: ... +# +@overload +def geomspace( + start: _ToFloat64, + stop: _ToFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.float64]: ... @overload def geomspace( start: _ToArrayFloat64, @@ -229,7 +368,16 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.floating]: ... +) -> NDArray[np.float64 | Any]: ... +@overload +def geomspace( + start: complex, + stop: complex, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> _Array1D[np.complex128 | Any]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -238,7 +386,26 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[np.complexfloating]: ... +) -> NDArray[np.complex128 | Any]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... +@overload +def geomspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, +) -> _Array1D[ScalarT]: ... @overload def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -268,6 +435,7 @@ def geomspace( axis: SupportsIndex = 0, ) -> NDArray[Incomplete]: ... +# def add_newdoc( place: str, obj: str, diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 36440fca9487..23101e224ea3 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -6,6 +6,8 @@ import numpy as np import numpy.typing as npt from numpy._typing import _AnyShape +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... class IntoSubClass[ScalarT: np.generic]: @@ -143,23 +145,23 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10), _Array1D[np.float64]) +assert_type(np.linspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.linspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) -assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) -assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0, 10, retstep=True), tuple[_Array1D[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[_Array1D[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10), _Array1D[np.float64]) +assert_type(np.logspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.logspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) -assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) -assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10), _Array1D[np.float64]) +assert_type(np.geomspace(0, 10j), _Array1D[np.complex128 | Any]) +assert_type(np.geomspace(0, 10, dtype=np.int64), _Array1D[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) assert_type(np.zeros_like(A), npt.NDArray[np.float64]) From 0740fa12854f0d70133a67e7710e9837de4032dc Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 17:06:48 +0100 Subject: [PATCH 1085/1718] TYP: ``{lin,log,geom}space`` fix overload order to avoid overlap --- numpy/_core/function_base.pyi | 218 +++++++++++++++++----------------- 1 file changed, 109 insertions(+), 109 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 7fa9d07481ea..982a169bcbd5 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -34,8 +34,8 @@ def linspace( ) -> _Array1D[np.float64]: ... @overload def linspace( - start: _ToArrayFloat64, - stop: _ToArrayFloat64, + start: complex, + stop: complex, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, @@ -43,23 +43,35 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.float64]: ... +) -> _Array1D[np.complex128 | Any]: ... @overload -def linspace( - start: _ArrayLikeFloat_co, - stop: _ArrayLikeFloat_co, +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> _Array1D[ScalarT]: ... +@overload +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, - dtype: None = None, - axis: SupportsIndex = 0, *, + dtype: _DTypeLike[ScalarT], + axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> NDArray[np.float64 | Any]: ... +) -> _Array1D[ScalarT]: ... @overload def linspace( - start: complex, - stop: complex, + start: _ToArrayFloat64, + stop: _ToArrayFloat64, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, @@ -67,11 +79,11 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> _Array1D[np.complex128 | Any]: ... +) -> NDArray[np.float64]: ... @overload def linspace( - start: _ArrayLikeComplex_co, - stop: _ArrayLikeComplex_co, + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, @@ -79,31 +91,19 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[np.complex128 | Any]: ... -@overload -def linspace[ScalarT: np.generic]( - start: _ComplexLike_co, - stop: _ComplexLike_co, - num: SupportsIndex, - endpoint: bool, - retstep: L[False], - dtype: _DTypeLike[ScalarT], - axis: SupportsIndex = 0, - *, - device: L["cpu"] | None = None, -) -> _Array1D[ScalarT]: ... +) -> NDArray[np.float64 | Any]: ... @overload -def linspace[ScalarT: np.generic]( - start: _ComplexLike_co, - stop: _ComplexLike_co, +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, retstep: L[False] = False, - *, - dtype: _DTypeLike[ScalarT], + dtype: None = None, axis: SupportsIndex = 0, + *, device: L["cpu"] | None = None, -) -> _Array1D[ScalarT]: ... +) -> NDArray[np.complex128 | Any]: ... @overload def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -154,8 +154,8 @@ def linspace( ) -> tuple[_Array1D[np.float64], np.float64]: ... @overload def linspace( - start: _ToArrayFloat64, - stop: _ToArrayFloat64, + start: complex, + stop: complex, num: SupportsIndex = 50, endpoint: bool = True, *, @@ -163,23 +163,23 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.float64], np.float64]: ... +) -> tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]: ... @overload -def linspace( - start: _ArrayLikeFloat_co, - stop: _ArrayLikeFloat_co, +def linspace[ScalarT: np.generic]( + start: _ComplexLike_co, + stop: _ComplexLike_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: None = None, + dtype: _DTypeLike[ScalarT], axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.float64 | Any], np.float64 | Any]: ... +) -> tuple[_Array1D[ScalarT], ScalarT]: ... @overload def linspace( - start: complex, - stop: complex, + start: _ToArrayFloat64, + stop: _ToArrayFloat64, num: SupportsIndex = 50, endpoint: bool = True, *, @@ -187,11 +187,11 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[_Array1D[np.complex128 | Any], np.complex128 | Any]: ... +) -> tuple[NDArray[np.float64], np.float64]: ... @overload def linspace( - start: _ArrayLikeComplex_co, - stop: _ArrayLikeComplex_co, + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, num: SupportsIndex = 50, endpoint: bool = True, *, @@ -199,19 +199,19 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[np.complex128 | Any], np.complex128 | Any]: ... +) -> tuple[NDArray[np.float64 | Any], np.float64 | Any]: ... @overload -def linspace[ScalarT: np.generic]( - start: _ComplexLike_co, - stop: _ComplexLike_co, +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, num: SupportsIndex = 50, endpoint: bool = True, *, retstep: L[True], - dtype: _DTypeLike[ScalarT], + dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[_Array1D[ScalarT], ScalarT]: ... +) -> tuple[NDArray[np.complex128 | Any], np.complex128 | Any]: ... @overload def linspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, @@ -249,26 +249,6 @@ def logspace( axis: SupportsIndex = 0, ) -> _Array1D[np.float64]: ... @overload -def logspace( - start: _ToArrayFloat64, - stop: _ToArrayFloat64, - num: SupportsIndex = 50, - endpoint: bool = True, - base: _ToArrayFloat64 = 10.0, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.float64]: ... -@overload -def logspace( - start: _ArrayLikeFloat_co, - stop: _ArrayLikeFloat_co, - num: SupportsIndex = 50, - endpoint: bool = True, - base: _ArrayLikeFloat_co = 10.0, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.float64 | Any]: ... -@overload def logspace( start: complex, stop: complex, @@ -279,16 +259,6 @@ def logspace( axis: SupportsIndex = 0, ) -> _Array1D[np.complex128 | Any]: ... @overload -def logspace( - start: _ArrayLikeComplex_co, - stop: _ArrayLikeComplex_co, - num: SupportsIndex = 50, - endpoint: bool = True, - base: _ArrayLikeComplex_co = 10.0, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.complex128 | Any]: ... -@overload def logspace[ScalarT: np.generic]( start: _ComplexLike_co, stop: _ComplexLike_co, @@ -310,6 +280,36 @@ def logspace[ScalarT: np.generic]( axis: SupportsIndex = 0, ) -> _Array1D[ScalarT]: ... @overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def logspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64 | Any]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complex128 | Any]: ... +@overload def logspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, @@ -352,24 +352,6 @@ def geomspace( axis: SupportsIndex = 0, ) -> _Array1D[np.float64]: ... @overload -def geomspace( - start: _ToArrayFloat64, - stop: _ToArrayFloat64, - num: SupportsIndex = 50, - endpoint: bool = True, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.float64]: ... -@overload -def geomspace( - start: _ArrayLikeFloat_co, - stop: _ArrayLikeFloat_co, - num: SupportsIndex = 50, - endpoint: bool = True, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.float64 | Any]: ... -@overload def geomspace( start: complex, stop: complex, @@ -379,15 +361,6 @@ def geomspace( axis: SupportsIndex = 0, ) -> _Array1D[np.complex128 | Any]: ... @overload -def geomspace( - start: _ArrayLikeComplex_co, - stop: _ArrayLikeComplex_co, - num: SupportsIndex = 50, - endpoint: bool = True, - dtype: None = None, - axis: SupportsIndex = 0, -) -> NDArray[np.complex128 | Any]: ... -@overload def geomspace[ScalarT: np.generic]( start: _ComplexLike_co, stop: _ComplexLike_co, @@ -407,6 +380,33 @@ def geomspace[ScalarT: np.generic]( axis: SupportsIndex = 0, ) -> _Array1D[ScalarT]: ... @overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64 | Any]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complex128 | Any]: ... +@overload def geomspace[ScalarT: np.generic]( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, From 07cd9feb36c6f3301ea3e473e41af9469c4514ab Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 18:38:42 +0100 Subject: [PATCH 1086/1718] TYP: move ``memmap`` stubs to ``_core.memmap`` --- numpy/__init__.pyi | 56 +------------- numpy/_core/memmap.pyi | 93 ++++++++++++++++++++++- numpy/typing/tests/data/reveal/memmap.pyi | 14 ++-- 3 files changed, 101 insertions(+), 62 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 71984361e760..61d2b103db92 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -289,7 +289,7 @@ from numpy._core.getlimits import ( finfo, iinfo, ) - +from numpy._core.memmap import memmap from numpy._core.multiarray import ( array, empty_like, @@ -922,13 +922,6 @@ type _NDIterFlagsOp = L[ "writemasked", ] -type _MemMapModeKind = L[ - "readonly", "r", - "copyonwrite", "c", - "readwrite", "r+", - "write", "w+", -] # fmt: skip - type _DT64Item = dt.date | int | None type _TD64Item = dt.timedelta | int | None @@ -986,9 +979,6 @@ class _SupportsFileMethods(SupportsFlush, Protocol): def tell(self) -> SupportsIndex: ... def seek(self, offset: int, whence: int, /) -> object: ... -@type_check_only -class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... - @type_check_only class _SupportsDLPack[StreamT](Protocol): def __dlpack__(self, /, *, stream: StreamT | None = None) -> CapsuleType: ... @@ -6172,50 +6162,6 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -class memmap(ndarray[_ShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] - filename: str | None - offset: int - mode: str - @overload - def __new__( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: type[uint8] = ..., - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype[uint8]]: ... - @overload - def __new__[ScalarT: generic]( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: _DTypeLike[ScalarT], - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype[ScalarT]]: ... - @overload - def __new__( - subtype, - filename: StrOrBytesPath | _SupportsFileMethodsRW, - dtype: DTypeLike, - mode: _MemMapModeKind = "r+", - offset: int = 0, - shape: int | tuple[int, ...] | None = None, - order: _OrderKACF = "C", - ) -> memmap[Any, dtype]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __array_wrap__( # type: ignore[override] - self, - array: memmap[_ShapeT_co, _DTypeT_co], - context: tuple[ufunc, tuple[Any, ...], int] | None = None, - return_scalar: py_bool = False, - ) -> Any: ... - def flush(self) -> None: ... - class poly1d: @property def variable(self) -> LiteralString: ... diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 0b31328404fb..5e940097e1f3 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -1,3 +1,94 @@ -from numpy import memmap +from _typeshed import StrOrBytesPath, SupportsWrite +from typing import ( + Any, + ClassVar, + Final, + Literal, + Protocol, + Self, + overload, + override, + type_check_only, +) +from typing_extensions import TypeVar + +import numpy as np +from numpy import _OrderKACF, _SupportsFileMethods +from numpy._typing import DTypeLike, _AnyShape, _DTypeLike, _Shape __all__ = ["memmap"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +type _Mode = Literal["r", "c", "r+", "w+"] +type _ToMode = Literal[_Mode, "readonly", "copyonwrite", "readwrite", "write"] + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +### + +class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): + __module__: Literal["numpy"] = "numpy" + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] + + filename: Final[str | None] + offset: Final[int] + mode: Final[_Mode] + + @overload + def __new__[ScalarT: np.generic]( + subtype, # pyright: ignore[reportSelfClsParameterName] + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeT_co, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> Self: ... + @overload + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[np.uint8] = ..., + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[np.uint8]]: ... + @overload + def __new__[ScalarT: np.generic]( + subtype, # pyright: ignore[reportSelfClsParameterName] + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[ScalarT], + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[_AnyShape, np.dtype[ScalarT]]: ... + @overload + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _ToMode = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap: ... + + # + @override + def __array_finalize__(self, obj: object, /) -> None: ... + @override + def __array_wrap__( # type: ignore[override] + self, + /, + array: memmap[_ShapeT_co, _DTypeT_co], + context: tuple[np.ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> Any: ... + + # + def flush(self) -> None: ... diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi index f3e20ed2d5e7..0babdeefb6f1 100644 --- a/numpy/typing/tests/data/reveal/memmap.pyi +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,19 +1,21 @@ -from typing import Any, assert_type +from typing import Any, Literal, assert_type import numpy as np -memmap_obj: np.memmap[Any, np.dtype[np.str_]] +type _Memmap[ScalarT: np.generic] = np.memmap[tuple[Any, ...], np.dtype[ScalarT]] + +memmap_obj: _Memmap[np.str_] assert_type(np.memmap.__array_priority__, float) assert_type(memmap_obj.__array_priority__, float) assert_type(memmap_obj.filename, str | None) assert_type(memmap_obj.offset, int) -assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.mode, Literal["r", "r+", "w+", "c"]) assert_type(memmap_obj.flush(), None) -assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) -assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +assert_type(np.memmap("file.txt", offset=5), _Memmap[np.uint8]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), _Memmap[np.float64]) with open("file.txt", "rb") as f: - assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap) assert_type(memmap_obj.__array_finalize__(object()), None) From dc9fdd7d927339ebfbd46abdca00055362797485 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 19:45:36 +0100 Subject: [PATCH 1087/1718] TYP: inline declarations for ``empty``, ``zeros``, and ``ones`` --- numpy/_core/multiarray.pyi | 360 +++++++++++++++--------- numpy/_core/numeric.pyi | 113 +++++++- numpy/typing/tests/data/pass/literal.py | 8 +- 3 files changed, 334 insertions(+), 147 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index e516c3cdab72..4a7f57092cab 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,4 +1,3 @@ -# TODO: Sort out any and all missing functions in this namespace import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Buffer, Callable, Iterable, Sequence @@ -217,143 +216,6 @@ type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] class _SupportsArray[ArrayT_co: np.ndarray](Protocol): def __array__(self, /) -> ArrayT_co: ... -@type_check_only -class _ConstructorEmpty(Protocol): - # 1-D shape - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[float64]: ... - @overload - def __call__[DTypeT: np.dtype]( - self, - /, - shape: SupportsIndex, - dtype: DTypeT | _SupportsDType[DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[tuple[int], DTypeT]: ... - @overload - def __call__[ScalarT: np.generic]( - self, - /, - shape: SupportsIndex, - dtype: type[ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[ScalarT]: ... - @overload - def __call__( - self, - /, - shape: SupportsIndex, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array1D[Incomplete]: ... - - # known shape - @overload - def __call__[ShapeT: _Shape]( - self, - /, - shape: ShapeT, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[ShapeT, float64]: ... - @overload - def __call__[ShapeT: _Shape, DTypeT: np.dtype]( - self, - /, - shape: ShapeT, - dtype: DTypeT | _SupportsDType[DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[ShapeT, DTypeT]: ... - @overload - def __call__[ShapeT: _Shape, ScalarT: np.generic]( - self, - /, - shape: ShapeT, - dtype: type[ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[ShapeT, ScalarT]: ... - @overload - def __call__[ShapeT: _Shape]( - self, - /, - shape: ShapeT, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> _Array[ShapeT, Incomplete]: ... - - # unknown shape - @overload - def __call__( - self, /, - shape: _ShapeLike, - dtype: None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[float64]: ... - @overload - def __call__[DTypeT: np.dtype]( - self, /, - shape: _ShapeLike, - dtype: DTypeT | _SupportsDType[DTypeT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> ndarray[_AnyShape, DTypeT]: ... - @overload - def __call__[ScalarT: np.generic]( - self, /, - shape: _ShapeLike, - dtype: type[ScalarT], - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[ScalarT]: ... - @overload - def __call__( - self, - /, - shape: _ShapeLike, - dtype: DTypeLike | None = None, - order: _OrderCF = "C", - *, - device: L["cpu"] | None = None, - like: _SupportsArrayFunc | None = None, - ) -> NDArray[Incomplete]: ... - # using `Final` or `TypeAlias` will break stubtest error = Exception @@ -397,9 +259,227 @@ MAY_SHARE_BOUNDS: Final = 0 MAY_SHARE_EXACT: Final = -1 tracemalloc_domain: Final = 389_047 -zeros: Final[_ConstructorEmpty] = ... -empty: Final[_ConstructorEmpty] = ... +# keep in sync with zeros (below) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def empty[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def empty[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def empty[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def empty[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def empty[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def empty[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def empty[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... + +# keep in sync with empty (above) and ones (`_core/numeric.pyi`) +@overload # 1d, float64 default +def zeros( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def zeros[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def zeros[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def zeros( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def zeros[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def zeros[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def zeros[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def zeros( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def zeros[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def zeros[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def zeros( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... +# @overload def empty_like[ArrayT: np.ndarray]( prototype: ArrayT, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index ddf0bfa31977..a577b8563c6d 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -3,7 +3,6 @@ from builtins import bool as py_bool from collections.abc import Callable, Iterable, Sequence from typing import ( Any, - Final, Literal as L, SupportsAbs, SupportsIndex, @@ -29,6 +28,7 @@ from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -127,7 +127,6 @@ from .multiarray import ( RAISE as RAISE, WRAP as WRAP, _Array, - _ConstructorEmpty, arange, array, asanyarray, @@ -706,7 +705,115 @@ def zeros_like( device: L["cpu"] | None = None, ) -> NDArray[Any]: ... -ones: Final[_ConstructorEmpty] +# keep in sync with empty and zeros (`_core/multiarray.pyi`) +@overload # 1d, float64 default +def ones( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d, specific dtype +def ones[DTypeT: np.dtype]( + shape: SupportsIndex, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[tuple[int], DTypeT]: ... +@overload # 1d, specific scalar type +def ones[ScalarT: np.generic]( + shape: SupportsIndex, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d, unknown dtype +def ones( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... +@overload # known shape, float64 default +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, float64]: ... +@overload # known shape, specific dtype +def ones[ShapeT: _Shape, DTypeT: np.dtype]( + shape: ShapeT, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[ShapeT, DTypeT]: ... +@overload # known shape, specific scalar type +def ones[ShapeT: _Shape, ScalarT: np.generic]( + shape: ShapeT, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, ScalarT]: ... +@overload # known shape, unknown dtype +def ones[ShapeT: _Shape]( + shape: ShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[ShapeT, Incomplete]: ... +@overload # unknown shape, float64 default +def ones( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[float64]: ... +@overload # unknown shape, specific dtype +def ones[DTypeT: np.dtype]( + shape: _ShapeLike, + dtype: DTypeT | _SupportsDType[DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> ndarray[_AnyShape, DTypeT]: ... +@overload # unknown shape, specific scalar type +def ones[ScalarT: np.generic]( + shape: _ShapeLike, + dtype: type[ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[ScalarT]: ... +@overload # unknown shape, unknown dtype +def ones( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Incomplete]: ... # keep in sync with `zeros_like` @overload diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index f1e0cb2a69d3..c6d86baf7397 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -25,11 +25,11 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), - # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + # NOTE: __call__ is needed due to python/mypy#17620 (KACF, partial(np.ndarray.__call__, 1)), - (CF, partial(np.zeros.__call__, 1)), - (CF, partial(np.ones.__call__, 1)), - (CF, partial(np.empty.__call__, 1)), + (CF, partial(np.zeros, 1)), + (CF, partial(np.ones, 1)), + (CF, partial(np.empty, 1)), (CF, partial(np.full, 1, 1)), (KACF, partial(np.zeros_like, AR)), (KACF, partial(np.ones_like, AR)), From b4f22cf00138640baaa60fa127a2a50059c48152 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 20:11:26 +0100 Subject: [PATCH 1088/1718] TYP: ``bincount`` shape-typing --- numpy/_core/multiarray.pyi | 15 ++++++++------- numpy/typing/tests/data/reveal/multiarray.pyi | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index e516c3cdab72..c6dfe3c2698e 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -213,6 +213,9 @@ type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 type _ToDates = dt.date | _NestedSequence[dt.date] type _ToDeltas = dt.timedelta | _NestedSequence[dt.timedelta] +type _BitOrder = L["big", "little"] +type _MaxWork = L[-1, 0] + @type_check_only class _SupportsArray[ArrayT_co: np.ndarray](Protocol): def __array__(self, /) -> ArrayT_co: ... @@ -619,15 +622,15 @@ def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... @overload def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... -def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... +# +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> _Array1D[intp]: ... +# def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... -type _BitOrder = L["big", "little"] - @overload -def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> _Array1D[uint8]: ... @overload def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... @@ -638,7 +641,7 @@ def unpackbits( axis: None = None, count: SupportsIndex | None = None, bitorder: _BitOrder = "big", -) -> ndarray[tuple[int], dtype[uint8]]: ... +) -> _Array1D[uint8]: ... @overload def unpackbits( a: _ArrayLike[uint8], @@ -648,8 +651,6 @@ def unpackbits( bitorder: _BitOrder = "big", ) -> NDArray[uint8]: ... -type _MaxWork = L[-1, 0] - # any two python objects will be accepted, not just `ndarray`s def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index ada8f7777696..fefb6e2fbb5d 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -86,7 +86,7 @@ assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) assert_type(np.vdot(AR_u1, 1), np.signedinteger) assert_type(np.vdot(1.5j, 1), np.complexfloating) -assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) +assert_type(np.bincount(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) From 030962b9db2ddcdb3f33b7f391651f5917437d03 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 28 Dec 2025 21:50:56 +0100 Subject: [PATCH 1089/1718] TYP: ``from{buffer,string,file}`` shape-typing --- numpy/_core/multiarray.pyi | 53 ++++++++++--------- .../tests/data/reveal/array_constructors.pyi | 32 +++++------ 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ab02dfab09e0..27fcc851475b 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -858,30 +858,31 @@ def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... def fromstring( string: str | bytes, dtype: None = None, - count: SupportsIndex = ..., + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... @overload def fromstring[ScalarT: np.generic]( string: str | bytes, dtype: _DTypeLike[ScalarT], - count: SupportsIndex = ..., + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def fromstring( string: str | bytes, - dtype: DTypeLike | None = ..., - count: SupportsIndex = ..., + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, *, sep: str, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +# @overload def frompyfunc[ReturnT]( func: Callable[[Any], ReturnT], /, @@ -964,7 +965,7 @@ def fromfile( offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... +) -> _Array1D[float64]: ... @overload def fromfile[ScalarT: np.generic]( file: StrOrBytesPath | _SupportsFileMethods, @@ -974,7 +975,7 @@ def fromfile[ScalarT: np.generic]( offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def fromfile( file: StrOrBytesPath | _SupportsFileMethods, @@ -984,7 +985,7 @@ def fromfile( offset: SupportsIndex = ..., *, like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... @overload def fromiter[ScalarT: np.generic]( @@ -1007,29 +1008,29 @@ def fromiter( def frombuffer( buffer: Buffer, dtype: None = None, - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[float64]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[float64]: ... @overload def frombuffer[ScalarT: np.generic]( buffer: Buffer, dtype: _DTypeLike[ScalarT], - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[ScalarT]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... @overload def frombuffer( buffer: Buffer, - dtype: DTypeLike | None = ..., - count: SupportsIndex = ..., - offset: SupportsIndex = ..., + dtype: DTypeLike | None = None, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... # keep in sync with ma.core.arange # NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 23101e224ea3..d17793044d37 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -92,27 +92,27 @@ assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) -assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) - -assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) -assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) -assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.fromstring("1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), _Array1D[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), _Array1D[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), _Array1D[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), _Array1D[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), _Array1D[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), _Array1D[Any]) with open("test.txt") as f: - assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) - assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(f, sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), _Array1D[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), _Array1D[np.float64]) assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) assert_type(np.fromiter("12345", float), npt.NDArray[Any]) -assert_type(np.frombuffer(A), npt.NDArray[np.float64]) -assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.frombuffer(A), _Array1D[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), _Array1D[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), _Array1D[Any]) _x_bool: bool _x_int: int From a20f72df9f499cdf7892815053b886d3720a5dea Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Sun, 28 Dec 2025 20:25:14 -0800 Subject: [PATCH 1090/1718] Apply suggestions from code review Co-authored-by: Pieter Eendebak --- benchmarks/benchmarks/bench_searchsorted.py | 2 +- numpy/_core/src/npysort/binsearch.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py index 5704bc563ff8..d56fd6f1142a 100644 --- a/benchmarks/benchmarks/bench_searchsorted.py +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -10,7 +10,7 @@ class SearchSortedInt64(Benchmark): [100, 10_000, 1_000_000, 1_000_000_000], # array sizes [1, 2, 100, 100_000], # number of query elements ['ordered', 'random'], # query order - [42, 18122022] + [42, 18122022], # seed ] param_names = ['array_size', 'n_queries', 'query_order', 'seed'] diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index 6815a9992548..b7104f8137ad 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -104,7 +104,7 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, - cmp(arr[i], key_val) == true for all i < base - cmp(arr[i], key_val) == false for all i >= base + length - The insertion index candidates are i in range [base, base+length] and + The insertion index candidates are in range [base, base+length] and on each iteration we shrink the range into either [base, ceil(length / 2)] or @@ -147,8 +147,8 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, } /* - At this point interval_length == 1, so the candidates are in interval - [base, base + 1]. + At this point interval_length == 1, so the candidates are in the + interval [base, base + 1]. We have two options: If cmp(arr[base], key_val) == true, insertion index is base + 1 From 55ba5936343bdce3844e8c00a5ee81a2dc6071cd Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Sun, 28 Dec 2025 20:48:36 -0800 Subject: [PATCH 1091/1718] Improving code comments --- numpy/_core/src/npysort/binsearch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index b7104f8137ad..0f8559ca5ef2 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -65,7 +65,7 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, using T = typename Tag::type; auto cmp = side_to_cmp::value; - // Let's handle this corner case first: if length is 0 we return all 0s + // If the array length is 0 we return all 0s if (arr_len <= 0) { for (npy_intp i = 0; i < key_len; ++i) { *(npy_intp *)(ret + i * ret_str) = 0; From da986faf00917f7646892a5d140b7543c1860858 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:25:32 +0100 Subject: [PATCH 1092/1718] TYP: ``Unique*Result`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index b0b918bd3e23..8bb35cecee7a 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -42,22 +42,23 @@ _AnyScalarT = TypeVar( type _NumericScalar = np.number | np.timedelta64 | np.object_ type _IntArray = NDArray[np.intp] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] ### class UniqueAllResult[ScalarT: np.generic](NamedTuple): - values: NDArray[ScalarT] - indices: _IntArray + values: _Array1D[ScalarT] + indices: _Array1D[np.intp] inverse_indices: _IntArray - counts: _IntArray + counts: _Array1D[np.intp] class UniqueCountsResult[ScalarT: np.generic](NamedTuple): - values: NDArray[ScalarT] - counts: _IntArray + values: _Array1D[ScalarT] + counts: _Array1D[np.intp] class UniqueInverseResult[ScalarT: np.generic](NamedTuple): - values: NDArray[ScalarT] - inverse_indices: _IntArray + values: _Array1D[ScalarT] + inverse_indices: NDArray[np.intp] # @overload From b4b5da5e9b086cd661a5cfff88000c235fff5e40 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:25:53 +0100 Subject: [PATCH 1093/1718] TYP: ``ediff1d`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 8bb35cecee7a..81ba0682a18b 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,3 +1,4 @@ +from _typeshed import Incomplete from typing import Any, Literal as L, NamedTuple, SupportsIndex, TypeVar, overload import numpy as np @@ -66,25 +67,25 @@ def ediff1d( ary: _ArrayLikeBool_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[np.int8]: ... +) -> _Array1D[np.int8]: ... @overload def ediff1d[NumericT: _NumericScalar]( ary: _ArrayLike[NumericT], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[NumericT]: ... +) -> _Array1D[NumericT]: ... @overload def ediff1d( ary: _ArrayLike[np.datetime64[Any]], to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> NDArray[np.timedelta64]: ... +) -> _Array1D[np.timedelta64]: ... @overload def ediff1d( ary: _ArrayLikeNumber_co, to_end: ArrayLike | None = None, to_begin: ArrayLike | None = None, -) -> np.ndarray: ... +) -> _Array1D[Incomplete]: ... # @overload # known scalar-type, FFF From e26c1fa6404e43f0695aeba7d945294b3e39d5c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:26:38 +0100 Subject: [PATCH 1094/1718] TYP: ``unique_values`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 81ba0682a18b..094f8a5e7c83 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -373,9 +373,9 @@ def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... # @overload -def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> NDArray[ScalarT]: ... +def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload -def unique_values(x: ArrayLike) -> np.ndarray: ... +def unique_values(x: ArrayLike) -> _Array1D[Incomplete]: ... # @overload # known scalar-type, return_indices=False (default) From 4e2181546de7650ddffe8c5800e81b78edd965ad Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:29:03 +0100 Subject: [PATCH 1095/1718] TYP: ``intersect1d`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 094f8a5e7c83..74ed31049d4b 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -45,6 +45,8 @@ type _NumericScalar = np.number | np.timedelta64 | np.object_ type _IntArray = NDArray[np.intp] type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _IntersectResult[ScalarT: np.generic] = tuple[_Array1D[ScalarT], _Array1D[np.intp], _Array1D[np.intp]] + ### class UniqueAllResult[ScalarT: np.generic](NamedTuple): @@ -384,14 +386,14 @@ def intersect1d( ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, -) -> NDArray[_AnyScalarT]: ... +) -> _Array1D[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) def intersect1d( ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], -) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... +) -> _IntersectResult[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (keyword) def intersect1d( ar1: _ArrayLike[_AnyScalarT], @@ -399,21 +401,21 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[NDArray[_AnyScalarT], _IntArray, _IntArray]: ... +) -> _IntersectResult[_AnyScalarT]: ... @overload # unknown scalar-type, return_indices=False (default) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, return_indices: L[False] = False, -) -> np.ndarray: ... +) -> _Array1D[Incomplete]: ... @overload # unknown scalar-type, return_indices=True (positional) def intersect1d( ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool, return_indices: L[True], -) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +) -> _IntersectResult[Incomplete]: ... @overload # unknown scalar-type, return_indices=True (keyword) def intersect1d( ar1: ArrayLike, @@ -421,7 +423,7 @@ def intersect1d( assume_unique: bool = False, *, return_indices: L[True], -) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +) -> _IntersectResult[Incomplete]: ... # @overload From 44959253265902be894b4fafbd53001984bdaa35 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:29:33 +0100 Subject: [PATCH 1096/1718] TYP: ``union1d`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 74ed31049d4b..cae13e0e9810 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -433,9 +433,9 @@ def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np. # @overload -def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> NDArray[_AnyScalarT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _Array1D[_AnyScalarT]: ... @overload -def union1d(ar1: ArrayLike, ar2: ArrayLike) -> np.ndarray: ... +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _Array1D[Incomplete]: ... # @overload From bc5007c19551a0d72c0e70247db0edb885e66edc Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:29:58 +0100 Subject: [PATCH 1097/1718] TYP: ``setxor1d`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index cae13e0e9810..9095b3f564eb 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -427,9 +427,9 @@ def intersect1d( # @overload -def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... +def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> _Array1D[_AnyScalarT]: ... @overload -def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... # @overload From 17fb14c70add5e38a4fc74d1969c8d533125d7ee Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:30:16 +0100 Subject: [PATCH 1098/1718] TYP: ``setdiff1d`` shape-typing --- numpy/lib/_arraysetops_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 9095b3f564eb..b91f8dee04ef 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -439,9 +439,9 @@ def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _Array1D[Incomplete]: ... # @overload -def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> NDArray[_AnyScalarT]: ... +def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> _Array1D[_AnyScalarT]: ... @overload -def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> np.ndarray: ... +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... # def isin( From d842af9ee69420f7220a134363453fd88b231696 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 15:52:41 +0100 Subject: [PATCH 1099/1718] TYP: update set ops type-tests --- .../typing/tests/data/reveal/arraysetops.pyi | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 36fc0603dcfd..ee74eedd61c7 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -8,6 +8,8 @@ from numpy.lib._arraysetops_impl import ( UniqueInverseResult, ) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -16,40 +18,40 @@ AR_O: npt.NDArray[np.object_] AR_LIKE_f8: list[float] -assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) -assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) -assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) -assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) -assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) +assert_type(np.ediff1d(AR_b), _Array1D[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), _Array1D[np.int64]) +assert_type(np.ediff1d(AR_M), _Array1D[np.timedelta64]) +assert_type(np.ediff1d(AR_O), _Array1D[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), _Array1D[Any]) -assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.intersect1d(AR_i8, AR_i8), _Array1D[np.int64]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. -assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] -assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.intersect1d(AR_f8, AR_i8), _Array1D[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), - tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], + tuple[_Array1D[np.float64], _Array1D[np.intp], _Array1D[np.intp]], ) -assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setxor1d(AR_i8, AR_i8), _Array1D[np.int64]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. -assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] -assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.setxor1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) -assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.union1d(AR_i8, AR_i8), _Array1D[np.int64]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. -assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) # type: ignore[assert-type] -assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.union1d(AR_M, AR_M), _Array1D[np.datetime64]) # type: ignore[assert-type] +assert_type(np.union1d(AR_f8, AR_i8), _Array1D[Any]) -assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setdiff1d(AR_i8, AR_i8), _Array1D[np.int64]) # NOTE: Mypy incorrectly infers `ndarray[Any, Any]`, but pyright behaves correctly. assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) # type: ignore[assert-type] -assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.setdiff1d(AR_f8, AR_i8), _Array1D[Any]) assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) @@ -74,5 +76,5 @@ assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) -assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) -assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.unique_values(AR_f8), _Array1D[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), _Array1D[Any]) From e11d5e374d88a89265bc9611e73c46d7df72b70f Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Mon, 29 Dec 2025 20:30:12 +0530 Subject: [PATCH 1100/1718] TST: do not share arrays across threads in `test_ufunc_at_scalar_value_fastpath` (#30541) --- numpy/_core/tests/test_ufunc.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 09d01eab8186..55ed0d881002 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2261,14 +2261,14 @@ def test_cast_index_fastpath(self): np.add.at(arr, index, values) assert arr[0] == len(values) - @pytest.mark.parametrize("value", [ - np.ones(1), np.ones(()), np.float64(1.), 1.]) - def test_ufunc_at_scalar_value_fastpath(self, value): - arr = np.zeros(1000) - # index must be cast, which may be buffered in chunks: - index = np.repeat(np.arange(1000), 2) - np.add.at(arr, index, value) - assert_array_equal(arr, np.full_like(arr, 2 * value)) + def test_ufunc_at_scalar_value_fastpath(self): + values = [np.ones(1), np.ones(()), np.float64(1.), 1.] + for value in values: + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) def test_ufunc_at_multiD(self): a = np.arange(9).reshape(3, 3) From eb8017130fe6b5a324fca6d01fdbaddb75358225 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Dec 2025 17:05:57 +0000 Subject: [PATCH 1101/1718] MAINT: Bump egor-tensin/setup-cygwin from 4.0.1 to 4.0.2 Bumps [egor-tensin/setup-cygwin](https://github.com/egor-tensin/setup-cygwin) from 4.0.1 to 4.0.2. - [Release notes](https://github.com/egor-tensin/setup-cygwin/releases) - [Commits](https://github.com/egor-tensin/setup-cygwin/compare/d2c752bab416d4b0662591bd366fc2686297c82d...fca9069f92361187d4abfaa5d8a7490e435d8349) --- updated-dependencies: - dependency-name: egor-tensin/setup-cygwin dependency-version: 4.0.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index a0a914c0204d..eebb6de405ab 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -29,7 +29,7 @@ jobs: fetch-tags: true persist-credentials: false - name: Install Cygwin - uses: egor-tensin/setup-cygwin@d2c752bab416d4b0662591bd366fc2686297c82d # v4 + uses: egor-tensin/setup-cygwin@fca9069f92361187d4abfaa5d8a7490e435d8349 # v4 with: platform: x86_64 install-dir: 'C:\tools\cygwin' From 0f41523d6ca2098d70aec5d452f6dd4446980597 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 20:17:29 +0100 Subject: [PATCH 1102/1718] TYP: ``histogram_bin_edges``: shape-typing and dtype specialization --- numpy/lib/_histograms_impl.pyi | 62 ++++++++++++++----- numpy/typing/tests/data/reveal/histograms.pyi | 24 ++++++- 2 files changed, 68 insertions(+), 18 deletions(-) diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 0c4c673ef063..ef125ca59698 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,28 +1,60 @@ +from _typeshed import Incomplete from collections.abc import Sequence -from typing import Any, Literal as L, SupportsIndex +from typing import Any, Literal as L, SupportsIndex, overload -from numpy._typing import ArrayLike, NDArray +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _NestedSequence, +) __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] -type _BinKind = L[ - "stone", - "auto", - "doane", - "fd", - "rice", - "scott", - "sqrt", - "sturges", -] +### +type _BinKind = L["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"] + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _WeightsLike = _ArrayLikeNumber_co | _ArrayLikeObject_co + +### + +# NOTE: The return type can also be complex or `object_`, not only floating like the docstring suggests. +@overload # +float64 def histogram_bin_edges( - a: ArrayLike, + a: _ArrayLikeInt_co | _NestedSequence[float], bins: _BinKind | SupportsIndex | ArrayLike = 10, range: tuple[float, float] | None = None, - weights: ArrayLike | None = None, -) -> NDArray[Any]: ... + weights: _WeightsLike | None = None, +) -> _Array1D[np.float64]: ... +@overload # ~complex +def histogram_bin_edges( + a: list[complex] | _NestedSequence[list[complex]], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[np.complex128]: ... +@overload # ~inexact | object_ +def histogram_bin_edges[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[ScalarT]: ... +@overload # fallback +def histogram_bin_edges( + a: _ArrayLikeNumber_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: _WeightsLike | None = None, +) -> _Array1D[Incomplete]: ... +# def histogram( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = 10, diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index c1c63d59cb88..564996c2377d 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -3,12 +3,30 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] + +list_i: list[int] +list_f: list[float] +list_c: list[complex] + +### -assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) -assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_f4), _Array1D[np.float32]) +assert_type(np.histogram_bin_edges(AR_f8), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(AR_c8), _Array1D[np.complex64]) +assert_type(np.histogram_bin_edges(AR_c16), _Array1D[np.complex128]) +assert_type(np.histogram_bin_edges(list_i), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_f), _Array1D[np.float64]) +assert_type(np.histogram_bin_edges(list_c), _Array1D[np.complex128]) assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) From 27d55845f030ec0626735f8bb9352ffd836758dc Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 21:15:44 +0100 Subject: [PATCH 1103/1718] TYP: ``histogram``: shape-typing and dtype specialization (20 overloads) --- numpy/lib/_histograms_impl.pyi | 213 ++++++++++++++++-- numpy/typing/tests/data/reveal/histograms.pyi | 33 ++- 2 files changed, 223 insertions(+), 23 deletions(-) diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index ef125ca59698..dc7baa18a4d1 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -7,8 +7,9 @@ from numpy._typing import ( ArrayLike, NDArray, _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ArrayLikeInt_co, - _ArrayLikeNumber_co, _ArrayLikeObject_co, _NestedSequence, ) @@ -19,54 +20,228 @@ __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] type _BinKind = L["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"] +type _Range = tuple[float, float] +type _NestedList[T] = list[T] | _NestedSequence[list[T]] + +type _WeightsLike = _ArrayLikeComplex_co | _ArrayLikeObject_co type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -type _WeightsLike = _ArrayLikeNumber_co | _ArrayLikeObject_co +type _HistogramResult[HistT: np.generic, EdgeT: np.generic] = tuple[_Array1D[HistT], _Array1D[EdgeT]] ### # NOTE: The return type can also be complex or `object_`, not only floating like the docstring suggests. -@overload # +float64 +@overload # dtype +float64 def histogram_bin_edges( a: _ArrayLikeInt_co | _NestedSequence[float], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, + range: _Range | None = None, weights: _WeightsLike | None = None, ) -> _Array1D[np.float64]: ... -@overload # ~complex +@overload # dtype ~complex def histogram_bin_edges( - a: list[complex] | _NestedSequence[list[complex]], + a: _NestedList[complex], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, + range: _Range | None = None, weights: _WeightsLike | None = None, ) -> _Array1D[np.complex128]: ... -@overload # ~inexact | object_ +@overload # dtype known def histogram_bin_edges[ScalarT: np.inexact | np.object_]( a: _ArrayLike[ScalarT], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, + range: _Range | None = None, weights: _WeightsLike | None = None, ) -> _Array1D[ScalarT]: ... -@overload # fallback +@overload # dtype unknown def histogram_bin_edges( - a: _ArrayLikeNumber_co, + a: _ArrayLikeComplex_co, bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, + range: _Range | None = None, weights: _WeightsLike | None = None, ) -> _Array1D[Incomplete]: ... -# +# There are 4 groups of 2 + 3 overloads (2 for density=True, 3 for density=False) = 20 in total +@overload # a: +float64, density: True (keyword), weights: +float | None (default) def histogram( - a: ArrayLike, + a: _ArrayLikeInt_co | _NestedSequence[float], bins: _BinKind | SupportsIndex | ArrayLike = 10, - range: tuple[float, float] | None = None, - density: bool | None = None, - weights: ArrayLike | None = None, -) -> tuple[NDArray[Any], NDArray[Any]]: ... + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.float64]: ... +@overload # a: +float64, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.float64]: ... +@overload # a: +float64, density: False (default), weights: ~int | None (default) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.float64]: ... +@overload # a: +float64, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.float64]: ... +@overload # a: +float64, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeInt_co | _NestedSequence[float], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.float64]: ... +@overload # a: ~complex, density: True (keyword), weights: +float | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, np.complex128]: ... +@overload # a: ~complex, density: True (keyword), weights: +complex +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: ~int | None (default) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, np.complex128]: ... +@overload # a: ~complex, density: False (default), weights: unknown (keyword) +def histogram( + a: _NestedList[complex], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, np.complex128]: ... +@overload # a: known, density: True (keyword), weights: +float | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, ScalarT]: ... +@overload # a: known, density: True (keyword), weights: +complex +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, ScalarT]: ... +@overload # a: known, density: False (default), weights: ~int | None (default) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, ScalarT]: ... +@overload # a: known, density: False (default), weights: known (keyword) +def histogram[ScalarT: np.inexact | np.object_, WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, ScalarT]: ... +@overload # a: known, density: False (default), weights: unknown (keyword) +def histogram[ScalarT: np.inexact | np.object_]( + a: _ArrayLike[ScalarT], + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, ScalarT]: ... +@overload # a: unknown, density: True (keyword), weights: +float | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> _HistogramResult[np.float64, Incomplete]: ... +@overload # a: unknown, density: True (keyword), weights: +complex +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + *, + density: L[True], + weights: _ArrayLike[np.complexfloating] | _NestedList[complex], +) -> _HistogramResult[np.complex128, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: int | None (default) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + weights: _NestedSequence[int] | None = None, +) -> _HistogramResult[np.intp, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: known (keyword) +def histogram[WeightsT: np.bool | np.number | np.timedelta64]( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _ArrayLike[WeightsT], +) -> _HistogramResult[WeightsT, Incomplete]: ... +@overload # a: unknown, density: False (default), weights: unknown (keyword) +def histogram( + a: _ArrayLikeComplex_co, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: _Range | None = None, + density: L[False] | None = None, + *, + weights: _WeightsLike, +) -> _HistogramResult[Incomplete, Incomplete]: ... +# def histogramdd( sample: ArrayLike, bins: SupportsIndex | ArrayLike = 10, - range: Sequence[tuple[float, float]] | None = None, + range: Sequence[_Range] | None = None, density: bool | None = None, weights: ArrayLike | None = None, ) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index 564996c2377d..0bbc03981cb8 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -5,6 +5,7 @@ import numpy.typing as npt type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +AR_i4: npt.NDArray[np.int32] AR_i8: npt.NDArray[np.int64] AR_f4: npt.NDArray[np.float32] AR_f8: npt.NDArray[np.float64] @@ -28,10 +29,34 @@ assert_type(np.histogram_bin_edges(list_i), _Array1D[np.float64]) assert_type(np.histogram_bin_edges(list_f), _Array1D[np.float64]) assert_type(np.histogram_bin_edges(list_c), _Array1D[np.complex128]) -assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) -assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="auto"), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[_Array1D[np.float64], _Array1D[np.float64]]) +assert_type(np.histogram(AR_f4), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f8), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(AR_c8), tuple[_Array1D[np.intp], _Array1D[np.complex64]]) +assert_type(np.histogram(AR_c16), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(list_i), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_f), tuple[_Array1D[np.intp], _Array1D[np.float64]]) +assert_type(np.histogram(list_c), tuple[_Array1D[np.intp], _Array1D[np.complex128]]) +assert_type(np.histogram(AR_f4, density=True), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_i4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f4), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c8), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_i), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_f), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, density=True, weights=list_c), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_i4), tuple[_Array1D[np.int32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f4), tuple[_Array1D[np.float32], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_f8), tuple[_Array1D[np.float64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c8), tuple[_Array1D[np.complex64], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=AR_c16), tuple[_Array1D[np.complex128], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_i), tuple[_Array1D[np.intp], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_f), tuple[_Array1D[Any], _Array1D[np.float32]]) +assert_type(np.histogram(AR_f4, weights=list_c), tuple[_Array1D[Any], _Array1D[np.float32]]) assert_type(np.histogramdd(AR_i8, bins=[1]), tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) From 324dc87349ae35ad907cf5e31c4a173711b8c381 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 21:27:48 +0100 Subject: [PATCH 1104/1718] TYP: ``histogramdd``: shape-typing and dtype specialization --- numpy/lib/_histograms_impl.pyi | 34 ++++++++++++++++--- numpy/typing/tests/data/reveal/histograms.pyi | 17 +++++----- 2 files changed, 39 insertions(+), 12 deletions(-) diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index dc7baa18a4d1..9f9bdb25aa6c 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -8,6 +8,7 @@ from numpy._typing import ( NDArray, _ArrayLike, _ArrayLikeComplex_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, _ArrayLikeInt_co, _ArrayLikeObject_co, @@ -237,11 +238,36 @@ def histogram( weights: _WeightsLike, ) -> _HistogramResult[Incomplete, Incomplete]: ... -# +# unlike `histogram`, `weights` must be safe-castable to f64 +@overload # dtype +float64 +def histogramdd( + sample: _ArrayLikeInt_co | _NestedSequence[float] | _ArrayLikeObject_co, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.float64], ...]]: ... +@overload # dtype ~complex +def histogramdd( + sample: _NestedList[complex], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]: ... +@overload # dtype known +def histogramdd[ScalarT: np.inexact]( + sample: _ArrayLike[ScalarT], + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[_Range] | None = None, + density: bool | None = None, + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[ScalarT], ...]]: ... +@overload # dtype unknown def histogramdd( - sample: ArrayLike, + sample: _ArrayLikeComplex_co, bins: SupportsIndex | ArrayLike = 10, range: Sequence[_Range] | None = None, density: bool | None = None, - weights: ArrayLike | None = None, -) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... + weights: _ArrayLikeFloat64_co | None = None, +) -> tuple[NDArray[np.float64], tuple[_Array1D[Any], ...]]: ... diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index 0bbc03981cb8..278961247698 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -58,11 +58,12 @@ assert_type(np.histogram(AR_f4, weights=list_i), tuple[_Array1D[np.intp], _Array assert_type(np.histogram(AR_f4, weights=list_f), tuple[_Array1D[Any], _Array1D[np.float32]]) assert_type(np.histogram(AR_f4, weights=list_c), tuple[_Array1D[Any], _Array1D[np.float32]]) -assert_type(np.histogramdd(AR_i8, bins=[1]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_i8, weights=AR_f8), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) -assert_type(np.histogramdd(AR_f8, density=True), - tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, bins=[1]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_i8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float64], ...]]) +assert_type(np.histogramdd(AR_f4), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.float32], ...]]) +assert_type(np.histogramdd(AR_c8), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex64], ...]]) +assert_type(np.histogramdd(AR_c16), tuple[npt.NDArray[np.float64], tuple[_Array1D[np.complex128], ...]]) From ee79296887c4aee460d299e3632d8bbebded88b6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 29 Dec 2025 23:11:00 +0100 Subject: [PATCH 1105/1718] TYP/STY: ``lib._shape_base_impl`` formatting --- numpy/lib/_shape_base_impl.pyi | 115 +++++++++++---------------------- 1 file changed, 38 insertions(+), 77 deletions(-) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 280f70a508cb..25cf94be3927 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,3 +1,4 @@ +from _typeshed import Incomplete from collections.abc import Callable, Sequence from typing import ( Any, @@ -10,15 +11,6 @@ from typing import ( ) import numpy as np -from numpy import ( - complexfloating, - floating, - integer, - object_, - signedinteger, - ufunc, - unsignedinteger, -) from numpy._typing import ( ArrayLike, NDArray, @@ -55,7 +47,7 @@ class _ArrayWrap(Protocol): def __call__( self, array: NDArray[Any], - context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + context: tuple[np.ufunc, tuple[Any, ...], int] | None = ..., return_scalar: bool = ..., /, ) -> Any: ... @@ -81,17 +73,19 @@ class _SupportsSplitOps(Protocol): def take_along_axis[ScalarT: np.generic]( arr: ScalarT | NDArray[ScalarT], - indices: NDArray[integer], + indices: NDArray[np.integer], axis: int | None = -1, ) -> NDArray[ScalarT]: ... +# def put_along_axis[ScalarT: np.generic]( arr: NDArray[ScalarT], - indices: NDArray[integer], + indices: NDArray[np.integer], values: ArrayLike, axis: int | None, ) -> None: ... +# @overload def apply_along_axis[**Tss, ScalarT: np.generic]( func1d: Callable[Concatenate[np.ndarray, Tss], _ArrayLike[ScalarT]], @@ -102,36 +96,39 @@ def apply_along_axis[**Tss, ScalarT: np.generic]( ) -> NDArray[ScalarT]: ... @overload def apply_along_axis[**Tss]( - func1d: Callable[Concatenate[NDArray[Any], Tss], Any], + func1d: Callable[Concatenate[np.ndarray, Tss], Any], axis: SupportsIndex, arr: ArrayLike, *args: Tss.args, **kwargs: Tss.kwargs, ) -> NDArray[Any]: ... +# def apply_over_axes[ScalarT: np.generic]( func: Callable[[np.ndarray, int], NDArray[ScalarT]], a: ArrayLike, - axes: int | Sequence[int], + axes: _ShapeLike, ) -> NDArray[ScalarT]: ... +# @overload def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... @overload -def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Any]: ... +def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.column_stack` @overload def column_stack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... # keep in sync with `numpy.ma.extras.dstack` @overload def dstack[ScalarT: np.generic](tup: Sequence[_ArrayLike[ScalarT]]) -> NDArray[ScalarT]: ... @overload -def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Incomplete]: ... +# @overload def array_split[SplitableT: _SupportsSplitOps]( ary: SplitableT, @@ -145,12 +142,9 @@ def array_split[ScalarT: np.generic]( axis: SupportsIndex = 0, ) -> list[NDArray[ScalarT]]: ... @overload -def array_split( - ary: ArrayLike, - indices_or_sections: _ShapeLike, - axis: SupportsIndex = 0, -) -> list[NDArray[Any]]: ... +def array_split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... +# @overload def split[SplitableT: _SupportsSplitOps]( ary: SplitableT, @@ -164,83 +158,50 @@ def split[ScalarT: np.generic]( axis: SupportsIndex = 0, ) -> list[NDArray[ScalarT]]: ... @overload -def split( - ary: ArrayLike, - indices_or_sections: _ShapeLike, - axis: SupportsIndex = 0, -) -> list[NDArray[Any]]: ... +def split(ary: ArrayLike, indices_or_sections: _ShapeLike, axis: SupportsIndex = 0) -> list[NDArray[Incomplete]]: ... # keep in sync with `numpy.ma.extras.hsplit` @overload -def hsplit[SplitableT: _SupportsSplitOps]( - ary: SplitableT, - indices_or_sections: _ShapeLike, -) -> list[SplitableT]: ... +def hsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def hsplit[ScalarT: np.generic]( - ary: _ArrayLike[ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[ScalarT]]: ... +def hsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... @overload -def hsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def vsplit[SplitableT: _SupportsSplitOps]( - ary: SplitableT, - indices_or_sections: _ShapeLike, -) -> list[SplitableT]: ... +def vsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def vsplit[ScalarT: np.generic]( - ary: _ArrayLike[ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[ScalarT]]: ... +def vsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... @overload -def vsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def vsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload -def dsplit[SplitableT: _SupportsSplitOps]( - ary: SplitableT, - indices_or_sections: _ShapeLike, -) -> list[SplitableT]: ... +def dsplit[SplitableT: _SupportsSplitOps](ary: SplitableT, indices_or_sections: _ShapeLike) -> list[SplitableT]: ... @overload -def dsplit[ScalarT: np.generic]( - ary: _ArrayLike[ScalarT], - indices_or_sections: _ShapeLike, -) -> list[NDArray[ScalarT]]: ... +def dsplit[ScalarT: np.generic](ary: _ArrayLike[ScalarT], indices_or_sections: _ShapeLike) -> list[NDArray[ScalarT]]: ... @overload -def dsplit( - ary: ArrayLike, - indices_or_sections: _ShapeLike, -) -> list[NDArray[Any]]: ... +def dsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[NDArray[Incomplete]]: ... +# @overload def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... @overload -def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[np.unsignedinteger]: ... @overload -def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[np.signedinteger]: ... @overload -def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[np.floating]: ... @overload -def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[np.complexfloating]: ... @overload -def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +def kron(a: _ArrayLikeObject_co, b: object) -> NDArray[np.object_]: ... @overload -def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... +def kron(a: object, b: _ArrayLikeObject_co) -> NDArray[np.object_]: ... +# @overload -def tile[ScalarT: np.generic]( - A: _ArrayLike[ScalarT], - reps: int | Sequence[int], -) -> NDArray[ScalarT]: ... +def tile[ScalarT: np.generic](A: _ArrayLike[ScalarT], reps: int | Sequence[int]) -> NDArray[ScalarT]: ... @overload -def tile( - A: ArrayLike, - reps: int | Sequence[int], -) -> NDArray[Any]: ... +def tile(A: ArrayLike, reps: int | Sequence[int]) -> NDArray[Incomplete]: ... From d46d9ec1cea5c7d2424e19a625a51412435f2449 Mon Sep 17 00:00:00 2001 From: Mohataseem Khan Date: Tue, 30 Dec 2025 10:32:54 +0530 Subject: [PATCH 1106/1718] BLD: update descriptions of build options in meson.options (#30317) Co-authored-by: Ralf Gommers --- meson.options | 66 +++++++++++++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/meson.options b/meson.options index e7011a3b2f2e..8ba7c4b79e03 100644 --- a/meson.options +++ b/meson.options @@ -1,45 +1,53 @@ +# BLAS / LAPACK selection option('blas', type: 'string', value: 'auto', - description: 'Option for BLAS library selection. By default, try to find any in the order given by `blas-order`') + description: 'BLAS library to use (default: autodetect based on `blas-order`)') option('lapack', type: 'string', value: 'auto', - description: 'Option for LAPACK library selection. By default, try to find any in the order given by `lapack-order`') + description: 'LAPACK library to use (default: autodetect based on `lapack-order`)') option('allow-noblas', type: 'boolean', value: true, - description: 'If set to true, allow building with (slow!) internal fallback routines') + description: 'Allow building with (slow!) internal fallback routines if no BLAS library is found') option('blas-order', type: 'array', value: ['auto'], - description: 'Order of BLAS libraries to search for. E.g.: mkl,openblas,blis,blas') + description: 'Preferred search order for BLAS libraries (e.g., mkl, openblas, blis, blas)') option('lapack-order', type: 'array', value: ['auto'], - description: 'Order of LAPACK libraries to search for. E.g.: mkl,openblas,lapack') + description: 'Preferred search order for LAPACK libraries (e.g., mkl, openblas, lapack)') option('use-ilp64', type: 'boolean', value: false, - description: 'Use ILP64 (64-bit integer) BLAS and LAPACK interfaces') + description: 'Use ILP64 (64-bit integer) BLAS/LAPACK interfaces') option('blas-symbol-suffix', type: 'string', value: 'auto', - description: 'BLAS and LAPACK symbol suffix to use, if any') -option('mkl-threading', type: 'string', value: 'auto', - description: 'MKL threading method, one of: `seq`, `iomp`, `gomp`, `tbb`') -option('disable-svml', type: 'boolean', value: false, - description: 'Disable building against SVML') -option('disable-highway', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Google Highway') -option('disable-intel-sort', type: 'boolean', value: false, - description: 'Disables SIMD-optimized operations related to Intel x86-simd-sort') + description: 'Symbol suffix for BLAS/LAPACK symbols (if any)') +option('mkl-threading', type: 'combo', value: 'auto', + choices: ['auto', 'seq', 'iomp', 'gomp', 'tbb'], + description: 'Threading backend for MKL') + +# Threading & parallelism option('disable-threading', type: 'boolean', value: false, description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') option('enable-openmp', type: 'boolean', value: false, - description: 'Enable building NumPy with openmp support') + description: 'Enable compilation with OpenMP support') + +# CPU optimization / SIMD option('disable-optimization', type: 'boolean', value: false, - description: 'Disable CPU optimized code (dispatch,simd,unroll...)') + description: 'Disable all CPU optimizations (dispatch, SIMD, loop unrolling)') +option('disable-svml', type: 'boolean', value: false, + description: 'Disable use of Intel SVML') +option('disable-highway', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Google Highway') +option('disable-intel-sort', type: 'boolean', value: false, + description: 'Disable SIMD-optimized operations related to Intel x86-simd-sort') option('cpu-baseline', type: 'string', value: 'min', - description: 'Minimal set of required CPU features') + description: 'Minimal set of required CPU features') option('cpu-baseline-detect', type: 'feature', value: 'auto', - description: 'Detect CPU baseline from the compiler flags') + description: 'Detect CPU baseline from the compiler flags') option('cpu-dispatch', type: 'string', value: 'max', - description: 'Dispatched set of additional CPU features') + description: 'Additional CPU features to dispatch to (beyond baseline)') + +# SIMD test options option('test-simd', type: 'array', - value: [ - 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', - 'VSX', 'VSX2', 'VSX3', 'VSX4', - 'NEON', 'ASIMD', - 'VX', 'VXE', 'VXE2', - 'LSX', - ], - description: 'Specify a list of CPU features to be tested against NumPy SIMD interface') + value: [ + 'BASELINE', 'X86_V2', 'X86_V3', 'X86_V4', + 'VSX', 'VSX2', 'VSX3', 'VSX4', + 'NEON', 'ASIMD', + 'VX', 'VXE', 'VXE2', + 'LSX', + ], + description: 'CPU SIMD feature sets to be tested by the NumPy SIMD test module') option('test-simd-args', type: 'string', value: '', - description: 'Extra args to be passed to the `_simd` module that is used for testing the NumPy SIMD interface') + description: 'Extra arguments passed to the internal `_simd` test module') From ab639acc43f2f0e63332064b1bbb5b435fa7adea Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 30 Dec 2025 10:23:32 +0100 Subject: [PATCH 1107/1718] DOC: add a page on CPU/SIMD support under "Building from source" Implements a suggestion made in issue 30492. [skip actions] [skip azp] --- doc/source/building/cpu_simd.rst | 26 +++++++++++++++++++++ doc/source/building/index.rst | 3 ++- doc/source/reference/simd/build-options.rst | 2 ++ 3 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 doc/source/building/cpu_simd.rst diff --git a/doc/source/building/cpu_simd.rst b/doc/source/building/cpu_simd.rst new file mode 100644 index 000000000000..40922fb63c53 --- /dev/null +++ b/doc/source/building/cpu_simd.rst @@ -0,0 +1,26 @@ +CPU support & SIMD +================== + +NumPy supports a wide range of platforms and CPUs, and includes a significant +amount of code optimized for specific CPUs. By default, NumPy targets a +baseline with the minimum required SIMD instruction sets that are needed +(e.g., SSE4.2 on x86-64 CPUs) and uses dynamic dispatch to use newer instruction +sets (e.g., AVX2 and AVX512 on x86-64) when those are detected at runtime. + +There are a number of build options that can be used to modify that behavior. +The default build settings are chosen for both portability and performance, and +should be reasonably close to optimal for creating redistributable binaries as +well as local installs. That said, there are reasons one may want to change the +default behavior, for example to obtain smaller binaries, to install on very old +hardware, to work around bugs, or for testing. + +To detect and uses all CPU features available on your local machine:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" + +To use a lower baseline without any SIMD optimizations, useful for very old CPUs:: + + $ python -m pip install . -Csetup-args=-Dcpu-baseline="none" + +For more usage scenarios and more in-depth information about NumPy's SIMD support, +see :ref:`cpu-build-options`. diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index d027ecb0ee8f..2a89fefde4f9 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -237,7 +237,7 @@ your system. First, install Microsoft Visual Studio - the 2022 Community Edition will work (see the `Visual Studio download site `__). - Ensure that you have installed necessary Visual Studio components for building NumPy + Ensure that you have installed necessary Visual Studio components for building NumPy on WoA from `here `__. To use the flang compiler for Windows on ARM64, install Latest LLVM @@ -515,6 +515,7 @@ Customizing builds compilers_and_options blas_lapack + cpu_simd cross_compilation redistributable_binaries diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 229a9ebbae0a..8532ee307fa1 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -1,3 +1,5 @@ +.. _cpu-build-options: + ***************** CPU Build Options ***************** From 738a581c621f61b3f9d70e33ebcd72ab2dc99f8b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 30 Dec 2025 10:48:22 -0700 Subject: [PATCH 1108/1718] MAINT: Update cpu_simd.rst Replace "uses" by "use". --- doc/source/building/cpu_simd.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/building/cpu_simd.rst b/doc/source/building/cpu_simd.rst index 40922fb63c53..f99a68b0c28a 100644 --- a/doc/source/building/cpu_simd.rst +++ b/doc/source/building/cpu_simd.rst @@ -14,7 +14,7 @@ well as local installs. That said, there are reasons one may want to change the default behavior, for example to obtain smaller binaries, to install on very old hardware, to work around bugs, or for testing. -To detect and uses all CPU features available on your local machine:: +To detect and use all CPU features available on your local machine:: $ python -m pip install . -Csetup-args=-Dcpu-baseline="native" -Csetup-args=-Dcpu-dispatch="none" From a6d53e00ae773167670eb22f63293534e97c1ff4 Mon Sep 17 00:00:00 2001 From: Harish E Date: Wed, 31 Dec 2025 00:53:17 +0530 Subject: [PATCH 1109/1718] MAINT: Remove obsolete Doxygen tags from config Removed obsolete configuration tags from doc/source/doxyfile to fix build warnings. These tags are no longer supported in the version of Doxygen currently used by the build system. Fixes build warnings reported in #28694. --- doc/source/doxyfile | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/doc/source/doxyfile b/doc/source/doxyfile index ea45b9578309..3d85fb645488 100644 --- a/doc/source/doxyfile +++ b/doc/source/doxyfile @@ -11,7 +11,6 @@ OUTPUT_DIRECTORY = @ROOT_DIR/doc/build/doxygen CREATE_SUBDIRS = NO ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English -OUTPUT_TEXT_DIRECTION = None BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = "The $name class" \ @@ -145,15 +144,11 @@ REFERENCES_LINK_SOURCE = YES SOURCE_TOOLTIPS = YES USE_HTAGS = NO VERBATIM_HEADERS = YES -CLANG_ASSISTED_PARSING = NO -CLANG_OPTIONS = -CLANG_DATABASE_PATH = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = YES COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- @@ -168,7 +163,6 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 HTML_COLORSTYLE_SAT = 100 HTML_COLORSTYLE_GAMMA = 80 -HTML_TIMESTAMP = NO HTML_DYNAMIC_MENUS = YES HTML_DYNAMIC_SECTIONS = NO HTML_INDEX_NUM_ENTRIES = 100 @@ -201,7 +195,6 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 -FORMULA_TRANSPARENT = YES FORMULA_MACROFILE = USE_MATHJAX = NO MATHJAX_FORMAT = HTML-CSS @@ -234,9 +227,7 @@ PDF_HYPERLINKS = YES USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain -LATEX_TIMESTAMP = NO LATEX_EMOJI_DIRECTORY = #--------------------------------------------------------------------------- # Configuration options related to the RTF output @@ -247,7 +238,6 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = -RTF_SOURCE_CODE = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -268,7 +258,6 @@ XML_NS_MEMB_FILE_SCOPE = NO #--------------------------------------------------------------------------- GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -303,13 +292,10 @@ EXTERNAL_PAGES = YES #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -CLASS_DIAGRAMS = YES DIA_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO DOT_NUM_THREADS = 0 -DOT_FONTNAME = Helvetica -DOT_FONTSIZE = 10 DOT_FONTPATH = CLASS_GRAPH = YES COLLABORATION_GRAPH = YES @@ -334,7 +320,6 @@ PLANTUML_CFG_FILE = PLANTUML_INCLUDE_PATH = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 -DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES From 41e8f7658fc53da2210876a6bc9cc0a97d537f9d Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Tue, 30 Dec 2025 21:58:21 -0800 Subject: [PATCH 1110/1718] DOC: Add reference to Ziggurat method of Marsaglia and Tsang [skip actions][skip azp][skip cirrus] --- numpy/random/_generator.pyx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 35794bdfca6a..41f6673623f6 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -539,7 +539,7 @@ cdef class Generator: Byteorder must be native. The default value is np.float64. method : str, optional Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method. - 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang. + 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang [1]_. out : ndarray, optional Alternative output array in which to place the result. If size is not None, it must have the same shape as the provided size and must match the type of @@ -550,6 +550,12 @@ cdef class Generator: out : float or ndarray Drawn samples. + References + ---------- + .. [1] Marsaglia, G. and Tsang, W. W. (2000). The Ziggurat method for + generating random variables. Journal of Statistical Software, 5, 1-7. + https://doi.org/10.18637/jss.v005.i08 + Examples -------- Output a 3x8000 array: From 75c9fde92b3ec46cc7342ee15700fd2d16cf4cd6 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Tue, 30 Dec 2025 22:42:03 -0800 Subject: [PATCH 1111/1718] Adding release notes --- doc/release/upcoming_changes/30517.performance.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release/upcoming_changes/30517.performance.rst diff --git a/doc/release/upcoming_changes/30517.performance.rst b/doc/release/upcoming_changes/30517.performance.rst new file mode 100644 index 000000000000..df15498f7470 --- /dev/null +++ b/doc/release/upcoming_changes/30517.performance.rst @@ -0,0 +1,8 @@ +Improved performance of ``numpy.searchsorted`` +---------------------------------------------- +The C++ binary search implementation used by ``numpy.searchsorted`` now has +a much better performance when searching for multiple keys. The new +implementation batches binary search steps across all keys to leverage cache +locality and out-of-order execution. Benchmarks show the new implementation can +be up to 20 times faster for hundreds of thousands keys while single-key +performance remains comparable to previous versions. \ No newline at end of file From bb43e33cc44ff5e6b4b8d39812ea8bdd316d9187 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 31 Dec 2025 16:14:15 +0100 Subject: [PATCH 1112/1718] BLD: update vendored Meson to 1.9.2 --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index e72c717199fa..5d5a3d478da1 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit e72c717199fa18d34020c7c97f9de3f388c5e055 +Subproject commit 5d5a3d478da115c812be77afa651db2492d52171 From c5769f5c7a077bf05437ba1d0ba08405b1129061 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Dec 2025 17:05:24 +0000 Subject: [PATCH 1113/1718] MAINT: Bump int128/hide-comment-action from 1.48.0 to 1.49.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.48.0 to 1.49.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/9cdf7fd49089308931b20966baee90f4aadb9f6e...d56df214f3902ffb330ca629acf5051a4e22aee2) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.49.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 13eda8c230b3..b29f36dbf7e7 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@9cdf7fd49089308931b20966baee90f4aadb9f6e # v1.48.0 + uses: int128/hide-comment-action@d56df214f3902ffb330ca629acf5051a4e22aee2 # v1.49.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From bdc0af2e7be18b51db7bd35a30755256f7c391e5 Mon Sep 17 00:00:00 2001 From: Ivan Perez Avellaneda <80877217+iperezav@users.noreply.github.com> Date: Wed, 31 Dec 2025 21:21:15 -0500 Subject: [PATCH 1114/1718] Fix formatting and punctuation in docstrings Hi I fixed a couple of grammar typos in the docstrings of gradient and _percentile. Best, Ivan --- numpy/lib/_function_base_impl.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index a9458378266c..7d4d86b20d50 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1017,7 +1017,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: - 1. single scalar to specify a sample distance for all dimensions. + 1. Single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each @@ -1033,7 +1033,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. axis : None or int or tuple of ints, optional - Gradient is calculated only along the given axis or axes + Gradient is calculated only along the given axis or axes. The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. @@ -4715,8 +4715,8 @@ def _quantile( ) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. - These methods are extended to this function using _ureduce - See nanpercentile for parameter usage + These methods are extended to this function using _ureduce. + See nanpercentile for parameter usage. It computes the quantiles of the array for the given axis. A linear interpolation is performed based on the `method`. From 493898c09ab6bc8e4e1bd8d164f28acb9ab6a7bc Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 17:34:02 +0100 Subject: [PATCH 1115/1718] MAINT: consistent ``__new__`` parameter naming --- numpy/_core/defchararray.py | 6 +++--- numpy/_core/defchararray.pyi | 6 +++--- numpy/_core/memmap.py | 4 ++-- numpy/_core/memmap.pyi | 8 ++++---- numpy/_core/records.py | 8 +++----- numpy/_core/records.pyi | 4 ++-- numpy/_core/tests/test_umath.py | 4 ++-- numpy/ma/core.py | 4 ++-- numpy/ma/core.pyi | 2 +- numpy/matrixlib/defmatrix.py | 8 +++----- numpy/matrixlib/defmatrix.pyi | 7 +------ 11 files changed, 26 insertions(+), 35 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 1a8750507f41..e88c1e10d12f 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -543,7 +543,7 @@ class adds the following functionality: [b'abc', b'abc', b'abc']], dtype='|S5') """ - def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + def __new__(cls, shape, itemsize=1, unicode=False, buffer=None, offset=0, strides=None, order='C'): if unicode: dtype = str_ @@ -563,10 +563,10 @@ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, filler = None if buffer is None: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), order=order) else: - self = ndarray.__new__(subtype, shape, (dtype, itemsize), + self = ndarray.__new__(cls, shape, (dtype, itemsize), buffer=buffer, offset=offset, strides=strides, order=order) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 5df01e30f0fd..713cfea0e05e 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -89,7 +89,7 @@ type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, unicode: L[False] = False, @@ -100,7 +100,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[bytes_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt, unicode: L[True], @@ -111,7 +111,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): ) -> _CharArray[str_]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, itemsize: SupportsIndex | SupportsInt = 1, *, diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index e0b638c6f976..2632933f583b 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -213,7 +213,7 @@ class memmap(ndarray): __array_priority__ = -100.0 - def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + def __new__(cls, filename, dtype=uint8, mode='r+', offset=0, shape=None, order='C'): # Import here to minimize 'import numpy' overhead import mmap @@ -290,7 +290,7 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, array_offset = offset - start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) - self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + self = ndarray.__new__(cls, shape, dtype=descr, buffer=mm, offset=array_offset, order=order) self._mmap = mm self.offset = offset diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 5e940097e1f3..ebc750f849a8 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -39,7 +39,7 @@ class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def __new__[ScalarT: np.generic]( - subtype, # pyright: ignore[reportSelfClsParameterName] + cls, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: _DTypeT_co, mode: _ToMode = "r+", @@ -49,7 +49,7 @@ class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): ) -> Self: ... @overload def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] + cls, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[np.uint8] = ..., mode: _ToMode = "r+", @@ -59,7 +59,7 @@ class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): ) -> memmap[_AnyShape, np.dtype[np.uint8]]: ... @overload def __new__[ScalarT: np.generic]( - subtype, # pyright: ignore[reportSelfClsParameterName] + cls, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: _DTypeLike[ScalarT], mode: _ToMode = "r+", @@ -69,7 +69,7 @@ class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): ) -> memmap[_AnyShape, np.dtype[ScalarT]]: ... @overload def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] + cls, filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, mode: _ToMode = "r+", diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 9a6af16e3b23..65d5a0b85f2b 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -383,7 +383,7 @@ class recarray(ndarray): """ - def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C'): @@ -395,12 +395,10 @@ def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, ).dtype if buf is None: - self = ndarray.__new__( - subtype, shape, (record, descr), order=order - ) + self = ndarray.__new__(cls, shape, (record, descr), order=order) else: self = ndarray.__new__( - subtype, shape, (record, descr), buffer=buf, + cls, shape, (record, descr), buffer=buf, offset=offset, strides=strides, order=order ) return self diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 326a0fe6e476..87a2e7616d00 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -75,7 +75,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): @overload def __new__( - subtype, + cls, shape: _ShapeLike, dtype: None = None, buf: Buffer | None = None, @@ -91,7 +91,7 @@ class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): ) -> _RecArray[record]: ... @overload def __new__( - subtype, + cls, shape: _ShapeLike, dtype: DTypeLike | None, buf: Buffer | None = None, diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 40b815f88984..b8c46d636870 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4482,8 +4482,8 @@ class TestSubclass: def test_subclass_op(self): class simple(np.ndarray): - def __new__(subtype, shape): - self = np.ndarray.__new__(subtype, shape, dtype=object) + def __new__(cls, shape): + self = np.ndarray.__new__(cls, shape, dtype=object) self.fill(0) return self diff --git a/numpy/ma/core.py b/numpy/ma/core.py index f6d7b0a8c8e5..13ad3bbf3751 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6530,11 +6530,11 @@ class mvoid(MaskedArray): Fake a 'void' object to use for masked array with structured dtypes. """ - def __new__(self, data, mask=nomask, dtype=None, fill_value=None, + def __new__(cls, data, mask=nomask, dtype=None, fill_value=None, hardmask=False, copy=False, subok=True): copy = None if not copy else True _data = np.array(data, copy=copy, subok=subok, dtype=dtype) - _data = _data.view(self) + _data = _data.view(cls) _data._hardmask = hardmask if mask is not nomask: if isinstance(mask, np.void): diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 36766932fa14..1e10150106b7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2563,7 +2563,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( - self, # pyright: ignore[reportSelfClsParameterName] + cls, data, mask=..., dtype=..., diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index d706e09ed947..9da6bcae2b64 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -116,7 +116,7 @@ class matrix(N.ndarray): """ __array_priority__ = 10.0 - def __new__(subtype, data, dtype=None, copy=True): + def __new__(cls, data, dtype=None, copy=True): warnings.warn('the matrix subclass is not the recommended way to ' 'represent matrices or deal with linear algebra (see ' 'https://docs.scipy.org/doc/numpy/user/' @@ -136,7 +136,7 @@ def __new__(subtype, data, dtype=None, copy=True): intype = data.dtype else: intype = N.dtype(dtype) - new = data.view(subtype) + new = data.view(cls) if intype != data.dtype: return new.astype(intype) if copy: @@ -166,9 +166,7 @@ def __new__(subtype, data, dtype=None, copy=True): if not (order or arr.flags.contiguous): arr = arr.copy() - ret = N.ndarray.__new__(subtype, shape, arr.dtype, - buffer=arr, - order=order) + ret = N.ndarray.__new__(cls, shape, arr.dtype, buffer=arr, order=order) return ret def __array_finalize__(self, obj): diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 55b9d795078c..5ec4b6687755 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -30,12 +30,7 @@ type _ToIndex2 = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] - def __new__( - subtype, # pyright: ignore[reportSelfClsParameterName] - data: ArrayLike, - dtype: DTypeLike | None = None, - copy: bool = True, - ) -> _Matrix[Incomplete]: ... + def __new__(cls, data: ArrayLike, dtype: DTypeLike | None = None, copy: bool = True) -> _Matrix[Incomplete]: ... # @overload # type: ignore[override] From 6f0c7e7afea4fd800e1c632486db7293af19e0da Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 18:38:20 +0100 Subject: [PATCH 1116/1718] TYP: ``ma.concatenate`` --- numpy/ma/core.pyi | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 36766932fa14..8ebdc8710c9b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3,7 +3,7 @@ import datetime as dt import types -from _typeshed import Incomplete +from _typeshed import Incomplete, SupportsLenAndGetItem from collections.abc import Buffer, Callable, Sequence from typing import ( Any, @@ -3419,7 +3419,13 @@ def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT] @overload def compressed(x: ArrayLike) -> _Array1D[Any]: ... -def concatenate(arrays, axis=0): ... +# +@overload +def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MaskedArray[ScalarT]: ... +@overload +def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MaskedArray[Incomplete]: ... + +# def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... From 5bc05157d9514b08fa23546770aa74b0320a40c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 18:42:32 +0100 Subject: [PATCH 1117/1718] TYP: ``ma.diag`` --- numpy/ma/core.pyi | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 8ebdc8710c9b..c375f0021a26 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -296,6 +296,7 @@ type _Ignored = object # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _Masked2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] @@ -308,6 +309,9 @@ type _MaskedArrayTD64_co = _MaskedArray[np.timedelta64 | np.integer | np.bool] type _ArrayInt_co = NDArray[np.integer | np.bool] type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +# Workaround for https://github.com/microsoft/pyright/issues/10232 +type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co @@ -3425,8 +3429,23 @@ def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: Supports @overload def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MaskedArray[Incomplete]: ... +# keep in sync with `diag` and `lib._twodim_base_impl.diag` +@overload +def diag[ScalarT: np.generic](v: _ArrayNoD[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array2D[ScalarT] | Sequence[Sequence[ScalarT]], k: int = 0) -> _Masked1D[ScalarT]: ... +@overload +def diag[ScalarT: np.generic](v: _Array1D[ScalarT] | Sequence[ScalarT], k: int = 0) -> _Masked2D[ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Masked1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Masked2D[Incomplete]: ... +@overload +def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MaskedArray[ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> _MaskedArray[Incomplete]: ... + # -def diag(v, k=0): ... def left_shift(a, n): ... def right_shift(a, n): ... def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... From e2c59bed80446a8148ee249a462ec72265a9ce39 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 18:56:46 +0100 Subject: [PATCH 1118/1718] TYP: ``ma.{left,right}_shift`` --- numpy/ma/core.pyi | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c375f0021a26..c1bc2e66afd0 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3445,9 +3445,27 @@ def diag[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MaskedArra @overload def diag(v: ArrayLike, k: int = 0) -> _MaskedArray[Incomplete]: ... +# keep in sync with `right_shift` +@overload +def left_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def left_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def left_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `left_shift` +@overload +def right_shift[ShapeT: _Shape, ScalarT: np.bool | np.integer | np.object_]( + a: ndarray[ShapeT, np.dtype[ScalarT]], n: int +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def right_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[ScalarT], n: int) -> _MaskedArray[ScalarT]: ... +@overload +def right_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... + # -def left_shift(a, n): ... -def right_shift(a, n): ... def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... def transpose(a, axes=None): ... From 1156e484f59a8fb21dd4e23e9d807f84a39ad23f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 18:59:03 +0100 Subject: [PATCH 1119/1718] TYP: ``ma.put`` --- numpy/ma/core.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index c1bc2e66afd0..d88fa562cc6c 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3465,8 +3465,10 @@ def right_shift[ScalarT: np.bool | np.integer | np.object_](a: _ArrayLike[Scalar @overload def right_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... +# keep in sync with `_core.fromnumeric.put` +def put(a: np.ndarray, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + # -def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... def transpose(a, axes=None): ... def reshape(a, new_shape, order="C"): ... From d518eb2479a4580eb2bfa5bc2eae0e6748b5dd7e Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 19:51:17 +0100 Subject: [PATCH 1120/1718] TYP: ``ma.putmask`` --- numpy/ma/core.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d88fa562cc6c..2914570b1bd3 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3469,7 +3469,9 @@ def right_shift(a: _ArrayLikeInt_co, n: int) -> _MaskedArray[Incomplete]: ... def put(a: np.ndarray, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... # -def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +def putmask(a: np.ndarray, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... + +# def transpose(a, axes=None): ... def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... From 268bbc9ac3973c8bc410cb28db68bc8cc50a3130 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 19:56:38 +0100 Subject: [PATCH 1121/1718] TYP: ``ma.transpose`` --- numpy/ma/core.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2914570b1bd3..ae8b1488de6a 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3471,8 +3471,13 @@ def put(a: np.ndarray, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _Mode # def putmask(a: np.ndarray, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +# keep in sync with `_core.fromnumeric.transpose` +@overload +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> _MaskedArray[ScalarT]: ... +@overload +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> _MaskedArray[Incomplete]: ... + # -def transpose(a, axes=None): ... def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... From 36fe6f84115e855a6e0158e35a6a021d1184cef1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:03:19 +0100 Subject: [PATCH 1122/1718] TYP: ``ma.reshape`` --- numpy/ma/core.pyi | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ae8b1488de6a..2292b14fb2e4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3477,8 +3477,27 @@ def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | No @overload def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> _MaskedArray[Incomplete]: ... +# keep in sync with `_core.fromnumeric.reshape` +@overload # shape: index +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: SupportsIndex, order: _OrderACF = "C" +) -> _Masked1D[ScalarT]: ... +@overload # shape: ~ShapeT +def reshape[ScalarT: np.generic, ShapeT: _Shape]( + a: _ArrayLike[ScalarT], new_shape: ShapeT, order: _OrderACF = "C" +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], new_shape: Sequence[SupportsIndex], order: _OrderACF = "C" +) -> _MaskedArray[ScalarT]: ... +@overload # shape: index +def reshape(a: ArrayLike, new_shape: SupportsIndex, order: _OrderACF = "C") -> _Masked1D[Incomplete]: ... +@overload # shape: ~ShapeT +def reshape[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT, order: _OrderACF = "C") -> MaskedArray[ShapeT]: ... +@overload # shape: Sequence[index] +def reshape(a: ArrayLike, new_shape: Sequence[SupportsIndex], order: _OrderACF = "C") -> _MaskedArray[Incomplete]: ... + # -def reshape(a, new_shape, order="C"): ... def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... From c375c8c501608e6b7a44eca290dfb67bcf51e584 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:09:34 +0100 Subject: [PATCH 1123/1718] TYP: ``ma.resize`` --- numpy/ma/core.pyi | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2292b14fb2e4..ff2b8e173661 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3497,8 +3497,25 @@ def reshape[ShapeT: _Shape](a: ArrayLike, new_shape: ShapeT, order: _OrderACF = @overload # shape: Sequence[index] def reshape(a: ArrayLike, new_shape: Sequence[SupportsIndex], order: _OrderACF = "C") -> _MaskedArray[Incomplete]: ... +# keep in sync with `_core.fromnumeric.resize` +@overload +def resize[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex] +) -> _Masked1D[ScalarT]: ... +@overload +def resize[ScalarT: np.generic, ShapeT: _Shape]( + x: _ArrayLike[ScalarT], new_shape: ShapeT +) -> MaskedArray[ShapeT, np.dtype[ScalarT]]: ... +@overload +def resize[ScalarT: np.generic](x: _ArrayLike[ScalarT], new_shape: _ShapeLike) -> _MaskedArray[ScalarT]: ... +@overload +def resize(x: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Masked1D[Incomplete]: ... +@overload +def resize[ShapeT: _Shape](x: ArrayLike, new_shape: ShapeT) -> MaskedArray[ShapeT]: ... +@overload +def resize(x: ArrayLike, new_shape: _ShapeLike) -> _MaskedArray[Incomplete]: ... + # -def resize(x, new_shape): ... def ndim(obj: ArrayLike) -> int: ... def shape(obj): ... def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... From 1b20f6095af73dbbbe8783ca994b4dd427e3f143 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:15:44 +0100 Subject: [PATCH 1124/1718] TYP: ``ma.shape`` --- numpy/ma/core.pyi | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ff2b8e173661..ab49de5e81dc 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -13,6 +13,7 @@ from typing import ( Literal, Never, NoReturn, + Protocol, Self, SupportsComplex, SupportsFloat, @@ -22,6 +23,7 @@ from typing import ( final, overload, override, + type_check_only, ) from typing_extensions import TypeIs, TypeVar @@ -327,6 +329,14 @@ type _FillValue = complex | None # int | float | complex | None type _FillValueCallable = Callable[[np.dtype | ArrayLike], _FillValue] type _DomainCallable = Callable[..., NDArray[np.bool]] +type _PyArray[T] = list[T] | tuple[T, ...] +type _PyScalar = complex | bytes | str + +@type_check_only +class _HasShape[ShapeT_co: _Shape](Protocol): + @property + def shape(self, /) -> ShapeT_co: ... + ### MaskType = np.bool_ @@ -3517,7 +3527,24 @@ def resize(x: ArrayLike, new_shape: _ShapeLike) -> _MaskedArray[Incomplete]: ... # def ndim(obj: ArrayLike) -> int: ... -def shape(obj): ... + +# # keep in sync with `_core.fromnumeric.shape` +@overload # this prevents `Any` from being returned with Pyright +def shape(obj: _HasShape[Never]) -> _AnyShape: ... +@overload +def shape[ShapeT: _Shape](obj: _HasShape[ShapeT]) -> ShapeT: ... +@overload +def shape(obj: _PyScalar) -> tuple[()]: ... +@overload # `collections.abc.Sequence` can't be used because `bytes` and `str` are assignable to it +def shape(obj: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(obj: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +@overload # requires PEP 688 support +def shape(obj: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(obj: ArrayLike) -> _AnyShape: ... + +# def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... def where(condition, x=..., y=...): ... From db422c3a7e924a206fbf83671864ef7e0089f894 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:23:08 +0100 Subject: [PATCH 1125/1718] TYP: ``ma.diff`` --- numpy/lib/_function_base_impl.pyi | 2 +- numpy/ma/core.pyi | 104 +++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 7a361ff1f876..b5e02aad3c94 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -652,7 +652,7 @@ def gradient( edge_order: L[1, 2] = 1, ) -> Incomplete: ... -# +# keep in sync with `ma.core.diff` @overload # n == 0; return input unchanged def diff[T]( a: T, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index ab49de5e81dc..0db911ec50ed 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3546,7 +3546,109 @@ def shape(obj: ArrayLike) -> _AnyShape: ... # def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... -def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... + +# keep in sync with `lib._function_base_impl.diff` +@overload # known array-type +def diff[MArrayT: _MaskedArray[np.inexact | np.timedelta64 | np.object_]]( + a: MArrayT, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MArrayT: ... +@overload # known shape, datetime64 +def diff[ShapeT: _Shape]( + a: MaskedArray[ShapeT, np.dtype[np.datetime64]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> MaskedArray[ShapeT, np.dtype[np.timedelta64]]: ... +@overload # unknown shape, known scalar-type +def diff[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[ScalarT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: Sequence[int], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.int_]: ... +@overload # 2d int +def diff( + a: Sequence[Sequence[int]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.float64]: ... +@overload # 2d float +def diff( + a: Sequence[list[float]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: Sequence[list[complex]], + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Masked2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type +def diff( + a: ArrayLike, + /, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _MaskedArray[Incomplete]: ... + +# def where(condition, x=..., y=...): ... def choose(indices, choices, out=None, mode="raise"): ... def round_(a, decimals=0, out=None): ... From fb0061b2c64958babc58fa4028a92aa9ee6ee832 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:25:20 +0100 Subject: [PATCH 1126/1718] TYP: ``ma.where`` --- numpy/_core/multiarray.pyi | 1 + numpy/ma/core.pyi | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 27fcc851475b..ca5ba0ed2dcd 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -668,6 +668,7 @@ def concatenate[OutT: np.ndarray]( def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... +# keep in sync with `ma.core.numeric` @overload def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0db911ec50ed..f58fd4323ec1 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3648,8 +3648,13 @@ def diff( append: ArrayLike | _NoValueType = ..., ) -> _MaskedArray[Incomplete]: ... +# keep in sync with `_core.multiarray.where` +@overload +def where(condition: ArrayLike, x: _NoValueType = ..., y: _NoValueType = ..., /) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> _MaskedArray[Incomplete]: ... + # -def where(condition, x=..., y=...): ... def choose(indices, choices, out=None, mode="raise"): ... def round_(a, decimals=0, out=None): ... round = round_ From 624775f3cfd397c191565e6531caf161afff563b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:30:36 +0100 Subject: [PATCH 1127/1718] TYP: ``ma.choose`` --- numpy/_core/fromnumeric.pyi | 7 +++++-- numpy/ma/core.pyi | 35 ++++++++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0d5b26131146..08cce57adde7 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -184,6 +184,7 @@ def take[ArrayT: np.ndarray]( mode: _ModeKind = "raise", ) -> ArrayT: ... +# keep in sync with `ma.core.reshape` @overload # shape: index def reshape[ScalarT: np.generic]( a: _ArrayLike[ScalarT], @@ -239,6 +240,7 @@ def reshape( copy: bool | None = None, ) -> NDArray[Any]: ... +# keep in sync with `ma.core.choose` @overload def choose( a: _IntLike_co, @@ -294,7 +296,7 @@ def repeat( axis: SupportsIndex, ) -> NDArray[Any]: ... -# +# keep in sync with `ma.core.put` def put( a: NDArray[Any], ind: _ArrayLikeInt_co, @@ -310,6 +312,7 @@ def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... +# keep in sync with `ma.core.transpose` @overload def transpose[ScalarT: np.generic]( a: _ArrayLike[ScalarT], @@ -521,7 +524,7 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = None, # 1D int array ) -> NDArray[intp]: ... -# +# keep in sync with `ma.core.resize` @overload def resize[ScalarT: np.generic](a: _ArrayLike[ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> _Array1D[ScalarT]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f58fd4323ec1..8beb85491a7c 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3650,12 +3650,41 @@ def diff( # keep in sync with `_core.multiarray.where` @overload -def where(condition: ArrayLike, x: _NoValueType = ..., y: _NoValueType = ..., /) -> tuple[_MaskedArray[np.intp], ...]: ... +def where(condition: ArrayLike, x: _NoValueType = ..., y: _NoValueType = ...) -> tuple[_MaskedArray[np.intp], ...]: ... @overload -def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> _MaskedArray[Incomplete]: ... +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.choose` +@overload +def choose( + indices: _IntLike_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def choose[ScalarT: np.generic]( + indices: _ArrayLikeInt_co, + choices: _ArrayLike[ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[ScalarT]: ... +@overload +def choose( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Incomplete]: ... +@overload +def choose[ArrayT: np.ndarray]( + indices: _ArrayLikeInt_co, + choices: ArrayLike, + out: ArrayT, + mode: _ModeKind = "raise", +) -> ArrayT: ... # -def choose(indices, choices, out=None, mode="raise"): ... def round_(a, decimals=0, out=None): ... round = round_ From a7ce4341ef90d32fe1fb21492c25f6ec5bbdee5d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:35:15 +0100 Subject: [PATCH 1128/1718] TYP: ``ma.round`` --- numpy/ma/core.pyi | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 8beb85491a7c..3c07d6ba0820 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3685,7 +3685,17 @@ def choose[ArrayT: np.ndarray]( ) -> ArrayT: ... # -def round_(a, decimals=0, out=None): ... +@overload # a: masked_array, out: None (default) +def round_[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +def round_[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +def round_(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + round = round_ def inner(a, b): ... From 8a1cb6df7f2b93eb6fbaae9ebfee59ea2b11c3da Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:39:21 +0100 Subject: [PATCH 1129/1718] TYP: ``ma.inner`` --- numpy/_core/multiarray.pyi | 1 + numpy/ma/core.pyi | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index ca5ba0ed2dcd..341ad27cb822 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -666,6 +666,7 @@ def concatenate[OutT: np.ndarray]( casting: _CastingKind | None = "same_kind", ) -> OutT: ... +# keep in sync with `ma.core.inner` def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... # keep in sync with `ma.core.numeric` diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 3c07d6ba0820..2b190fa63199 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3698,9 +3698,12 @@ def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) round = round_ -def inner(a, b): ... +# keep in sync with `_core.multiarray.inner` +def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... + innerproduct = inner +# def outer(a, b): ... outerproduct = outer From 1fdf03be1f4a363c3af3eb2bcbc25264b9ebbed2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:46:04 +0100 Subject: [PATCH 1130/1718] TYP: ``ma.outer`` --- numpy/_core/numeric.pyi | 3 ++- numpy/ma/core.pyi | 26 ++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a577b8563c6d..052cc48ae861 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1086,7 +1086,8 @@ def convolve( a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" ) -> _Array1D[np.timedelta64 | Any]: ... -# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload, +# and also keep in sync with `ma.core.outer` (minus `out`) @overload def outer( a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2b190fa63199..1405fdefe0ef 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -291,6 +291,16 @@ _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=T # the additional `Callable[...]` bound simplifies self-binding to the ufunc's callable signature _UFuncT_co = TypeVar("_UFuncT_co", bound=np.ufunc | Callable[..., object], default=np.ufunc, covariant=True) +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) # fmt: skip + type _RealNumber = np.floating | np.integer type _Ignored = object @@ -3703,8 +3713,20 @@ def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... innerproduct = inner -# -def outer(a, b): ... +# keep in sync with `_core.numeric.outer` +@overload +def outer(a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT]) -> _Masked2D[_AnyNumericScalarT]: ... +@overload +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> _Masked2D[np.bool]: ... +@overload +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> _Masked2D[np.int_ | Any]: ... +@overload +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> _Masked2D[np.float64 | Any]: ... +@overload +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> _Masked2D[np.complex128 | Any]: ... +@overload +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co) -> _Masked2D[np.timedelta64 | Any]: ... + outerproduct = outer def correlate(a, v, mode="valid", propagate_mask=True): ... From 180aaf586c7150ad4d7b77243dd49a2f91a65432 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:53:01 +0100 Subject: [PATCH 1131/1718] TYP: ``ma.correlate`` and ``ma.convolve`` --- numpy/_core/numeric.pyi | 2 +- numpy/ma/core.pyi | 93 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 91 insertions(+), 4 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 052cc48ae861..2a69e2922253 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1046,7 +1046,7 @@ def isfortran(a: ndarray | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... -# keep in sync with `convolve` +# keep in sync with `convolve` and `ma.core.correlate` @overload def correlate( a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 1405fdefe0ef..0e686776ee57 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -342,6 +342,8 @@ type _DomainCallable = Callable[..., NDArray[np.bool]] type _PyArray[T] = list[T] | tuple[T, ...] type _PyScalar = complex | bytes | str +type _CorrelateMode = Literal["valid", "same", "full"] + @type_check_only class _HasShape[ShapeT_co: _Shape](Protocol): @property @@ -3729,11 +3731,96 @@ def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co) -> _Masked2D[np.timedelta6 outerproduct = outer -def correlate(a, v, mode="valid", propagate_mask=True): ... -def convolve(a, v, mode="full", propagate_mask=True): ... +# keep in sync with `convolve` and `_core.numeric.correlate` +@overload +def correlate( + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[_AnyNumericScalarT]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[np.bool]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[np.int_ | Any]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[np.float64 | Any]: ... +@overload +def correlate( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "valid", + propagate_mask: bool = True, +) -> _Array1D[np.timedelta64 | Any]: ... + +# keep in sync with `correlate` and `_core.numeric.convolve` +@overload +def convolve( + a: _ArrayLike[_AnyNumericScalarT], + v: _ArrayLike[_AnyNumericScalarT], + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[_AnyNumericScalarT]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[np.bool]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[np.int_ | Any]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[np.float64 | Any]: ... +@overload +def convolve( + a: _ArrayLikeNumber_co, + v: _ArrayLikeNumber_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[np.complex128 | Any]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = "full", + propagate_mask: bool = True, +) -> _Array1D[np.timedelta64 | Any]: ... +# def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... - def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... def fromflex(fxarray): ... From 5f4f24bae00853789b32cf57cfc796c7c1b47246 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 20:55:43 +0100 Subject: [PATCH 1132/1718] TYP: ``ma.fromflex`` --- numpy/ma/core.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0e686776ee57..883954f05d07 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3823,8 +3823,10 @@ def convolve( def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... -def fromflex(fxarray): ... +# +def fromflex[ShapeT: _Shape](fxarray: np.ndarray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT, np.dtype[Incomplete]]: ... +# def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... From 7606890fb957baa8fee6068023cd67fc0259af6f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 21:07:02 +0100 Subject: [PATCH 1133/1718] TYP: ``ma.append`` --- numpy/lib/_function_base_impl.pyi | 2 +- numpy/ma/core.pyi | 79 ++++++++++++++++++++++++++----- 2 files changed, 67 insertions(+), 14 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index b5e02aad3c94..5f9e65d1a4a2 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -2272,7 +2272,7 @@ def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None @overload # unknown scalar-type, axis specified def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... -# +# keep in sync with `ma.core.append` @overload # known array type, axis specified def append[ArrayT: np.ndarray](arr: ArrayT, values: ArrayT, axis: SupportsIndex) -> ArrayT: ... @overload # 1d, known scalar type, axis specified diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 883954f05d07..adbb11e259f3 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -309,6 +309,7 @@ type _Ignored = object type _MaskedArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] type _Masked1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] type _Masked2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Masked3D[ScalarT: np.generic] = MaskedArray[tuple[int, int, int], np.dtype[ScalarT]] type _MaskedArrayUInt_co = _MaskedArray[np.unsignedinteger | np.bool] type _MaskedArrayInt_co = _MaskedArray[np.integer | np.bool] @@ -342,6 +343,9 @@ type _DomainCallable = Callable[..., NDArray[np.bool]] type _PyArray[T] = list[T] | tuple[T, ...] type _PyScalar = complex | bytes | str +type _Seq2D[T] = Sequence[Sequence[T]] +type _Seq3D[T] = Sequence[_Seq2D[T]] + type _CorrelateMode = Literal["valid", "same", "full"] @type_check_only @@ -3738,42 +3742,42 @@ def correlate( v: _ArrayLike[_AnyNumericScalarT], mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[_AnyNumericScalarT]: ... +) -> _Masked1D[_AnyNumericScalarT]: ... @overload def correlate( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[np.bool]: ... +) -> _Masked1D[np.bool]: ... @overload def correlate( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[np.int_ | Any]: ... +) -> _Masked1D[np.int_ | Any]: ... @overload def correlate( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[np.float64 | Any]: ... +) -> _Masked1D[np.float64 | Any]: ... @overload def correlate( a: _ArrayLikeNumber_co, v: _ArrayLikeNumber_co, mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[np.complex128 | Any]: ... +) -> _Masked1D[np.complex128 | Any]: ... @overload def correlate( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, mode: _CorrelateMode = "valid", propagate_mask: bool = True, -) -> _Array1D[np.timedelta64 | Any]: ... +) -> _Masked1D[np.timedelta64 | Any]: ... # keep in sync with `correlate` and `_core.numeric.convolve` @overload @@ -3782,42 +3786,42 @@ def convolve( v: _ArrayLike[_AnyNumericScalarT], mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[_AnyNumericScalarT]: ... +) -> _Masked1D[_AnyNumericScalarT]: ... @overload def convolve( a: _ArrayLikeBool_co, v: _ArrayLikeBool_co, mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[np.bool]: ... +) -> _Masked1D[np.bool]: ... @overload def convolve( a: _ArrayLikeInt_co, v: _ArrayLikeInt_co, mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[np.int_ | Any]: ... +) -> _Masked1D[np.int_ | Any]: ... @overload def convolve( a: _ArrayLikeFloat_co, v: _ArrayLikeFloat_co, mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[np.float64 | Any]: ... +) -> _Masked1D[np.float64 | Any]: ... @overload def convolve( a: _ArrayLikeNumber_co, v: _ArrayLikeNumber_co, mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[np.complex128 | Any]: ... +) -> _Masked1D[np.complex128 | Any]: ... @overload def convolve( a: _ArrayLikeTD64_co, v: _ArrayLikeTD64_co, mode: _CorrelateMode = "full", propagate_mask: bool = True, -) -> _Array1D[np.timedelta64 | Any]: ... +) -> _Masked1D[np.timedelta64 | Any]: ... # def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... @@ -3826,8 +3830,57 @@ def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float # def fromflex[ShapeT: _Shape](fxarray: np.ndarray[ShapeT, np.dtype[np.void]]) -> MaskedArray[ShapeT, np.dtype[Incomplete]]: ... +# keep in sync with `lib._function_base_impl.append` +@overload # known array type, axis specified +def append[MArrayT: MaskedArray]( + a: MArrayT, + b: MArrayT, + axis: SupportsIndex, +) -> MArrayT: ... +@overload # 1d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: Sequence[ScalarT], + b: Sequence[ScalarT], + axis: SupportsIndex, +) -> _Masked1D[ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq2D[ScalarT], + b: _Seq2D[ScalarT], + axis: SupportsIndex, +) -> _Masked2D[ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _Seq3D[ScalarT], + b: _Seq3D[ScalarT], + axis: SupportsIndex, +) -> _Masked3D[ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append[ScalarT: np.generic]( + a: _NestedSequence[ScalarT], + b: _NestedSequence[ScalarT], + axis: SupportsIndex, +) -> _MaskedArray[ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append( + a: np.ndarray | _NestedSequence[_ScalarLike_co], + b: _NestedSequence[_ScalarLike_co], + axis: SupportsIndex, +) -> _MaskedArray[Incomplete]: ... +@overload # known scalar type, axis=None +def append[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + b: _ArrayLike[ScalarT], + axis: None = None, +) -> _Masked1D[ScalarT]: ... +@overload # unknown scalar type, axis=None +def append( + a: ArrayLike, + b: ArrayLike, + axis: None = None, +) -> _Masked1D[Incomplete]: ... + # -def append(a, b, axis=None): ... def dot(a, b, strict=False, out=None): ... # internal wrapper functions for the functions below From 1e9d3732393d7895e64ba91b4807e266dcb7ee97 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 21:09:37 +0100 Subject: [PATCH 1134/1718] TYP: ``ma.dot`` --- numpy/_core/multiarray.pyi | 1 + numpy/ma/core.pyi | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 341ad27cb822..1ea923180522 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -682,6 +682,7 @@ def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind def min_scalar_type(a: ArrayLike, /) -> dtype: ... def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... +# keep in sync with `ma.core.dot` @overload def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index adbb11e259f3..8cf876f9d0f4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3880,8 +3880,11 @@ def append( axis: None = None, ) -> _Masked1D[Incomplete]: ... -# -def dot(a, b, strict=False, out=None): ... +# keep in sync with `_core.multiarray.dot` +@overload +def dot(a: ArrayLike, b: ArrayLike, strict: bool = False, out: None = None) -> Incomplete: ... +@overload +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, strict: bool = False, *, out: OutT) -> OutT: ... # internal wrapper functions for the functions below def _convert2ma( From 63a7822ec6910379bd4c1456b863127b0736f5d7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 21:20:11 +0100 Subject: [PATCH 1135/1718] TYP: ``ma.argsort`` --- numpy/ma/core.pyi | 69 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 8cf876f9d0f4..9cd40569c44e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -25,7 +25,7 @@ from typing import ( override, type_check_only, ) -from typing_extensions import TypeIs, TypeVar +from typing_extensions import TypeIs, TypeVar, deprecated import numpy as np from numpy import ( @@ -3418,9 +3418,73 @@ def take[ArrayT: np.ndarray]( mode: _ModeKind = "raise", ) -> ArrayT: ... +# def power(a, b, third=None): ... -def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... +# +@overload # axis: (deprecated) +@deprecated( + "In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + category=MaskedArrayFutureWarning, + stacklevel=2, +) +def argsort( + a: ArrayLike, + axis: _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # MaskedArray, axis: None +def argsort( + a: MaskedArray, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Masked1D[np.intp]: ... +@overload # MaskedArray, axis: int-like +def argsort( + a: MaskedArray, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _MaskedArray[np.intp]: ... +@overload # array-like, axis: None +def argsort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +@overload # array-like, axis: int-like +def argsort( + a: ArrayLike, + axis: SupportsIndex, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool | None = None, +) -> NDArray[np.intp]: ... + +# @overload def sort[ArrayT: np.ndarray]( a: ArrayT, @@ -3444,6 +3508,7 @@ def sort( stable: Literal[False] | None = None, ) -> NDArray[Any]: ... +# @overload def compressed[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[ScalarT]: ... @overload From bbe65e8eca05da2abd6f33da16d04d75a048c03e Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 21:21:09 +0100 Subject: [PATCH 1136/1718] TYP: ``ma.sort``: accept `axis=None` --- numpy/ma/core.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 9cd40569c44e..e513a0c18117 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3499,7 +3499,7 @@ def sort[ArrayT: np.ndarray]( @overload def sort( a: ArrayLike, - axis: SupportsIndex = -1, + axis: SupportsIndex | None = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, endwith: bool | None = True, From 997caa2cd9314b8e925c4812b06dde99990da309 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 21:23:01 +0100 Subject: [PATCH 1137/1718] TYP: ``ma.power`` (kinda) --- numpy/ma/core.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index e513a0c18117..67d7a6cca0c4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3419,7 +3419,7 @@ def take[ArrayT: np.ndarray]( ) -> ArrayT: ... # -def power(a, b, third=None): ... +def power(a: ArrayLike, b: ArrayLike, third: None = None) -> _MaskedArray[Incomplete]: ... # @overload # axis: (deprecated) From 5dad46bebd1786c606f9d2ebba56506723796aa0 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 1 Jan 2026 21:34:49 +0100 Subject: [PATCH 1138/1718] TYP: fix typos in comments Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- numpy/_core/multiarray.pyi | 2 +- numpy/ma/core.pyi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 1ea923180522..029494c2e4e7 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -669,7 +669,7 @@ def concatenate[OutT: np.ndarray]( # keep in sync with `ma.core.inner` def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... -# keep in sync with `ma.core.numeric` +# keep in sync with `ma.core.where` @overload def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 67d7a6cca0c4..3699a7e37173 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3609,7 +3609,7 @@ def resize(x: ArrayLike, new_shape: _ShapeLike) -> _MaskedArray[Incomplete]: ... # def ndim(obj: ArrayLike) -> int: ... -# # keep in sync with `_core.fromnumeric.shape` +# keep in sync with `_core.fromnumeric.shape` @overload # this prevents `Any` from being returned with Pyright def shape(obj: _HasShape[Never]) -> _AnyShape: ... @overload From 00f0a1f8964d298cd911c4e0e3c5ef797106528f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:23:55 +0100 Subject: [PATCH 1139/1718] TYP: ``ma.mrecords.MaskedRecords`` stubs --- numpy/ma/mrecords.pyi | 129 ++++++++++++++++++++++++++++-------------- 1 file changed, 85 insertions(+), 44 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index f1319d4bf69d..7b285d4fab8c 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,64 +1,105 @@ -from typing import Any, Generic +from _typeshed import Incomplete +from collections.abc import Buffer, Sequence +from typing import Any, Generic, Self, SupportsIndex, overload, override from typing_extensions import TypeVar import numpy as np -from numpy._typing import _AnyShape +from numpy import _ByteOrder, _ToIndices +from numpy._typing import ( + DTypeLike, + _AnyShape, + _ArrayLikeBool_co, + _DTypeLike, + _HasDType, + _ScalarLike_co, + _Shape, + _ShapeLike, + _VoidDTypeLike, +) from .core import MaskedArray -__all__ = [ - "MaskedRecords", - "mrecarray", - "fromarrays", - "fromrecords", - "fromtextfile", - "addfield", -] +__all__ = ["MaskedRecords", "mrecarray", "fromarrays", "fromrecords", "fromtextfile", "addfield"] -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +### + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +type _Ignored = object + ### # mypy: disable-error-code=no-untyped-def class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): - def __new__( - cls, - shape, - dtype=..., - buf=..., - offset=..., - strides=..., - formats=..., - names=..., - titles=..., - byteorder=..., - aligned=..., - mask=..., - hard_mask=..., - fill_value=..., - keep_mask=..., - copy=..., - **options, - ): ... _mask: Any _fill_value: Any + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike | None = None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + mask: _ArrayLikeBool_co = ..., + hard_mask: bool = False, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + copy: bool = False, + **options: _Ignored, + ) -> Self: ... + + # @property - def _data(self): ... + @override + def _data(self, /) -> np.recarray[_ShapeT_co, _DTypeT_co]: ... @property - def _fieldmask(self): ... - def __array_finalize__(self, obj): ... - def __len__(self): ... - def __getattribute__(self, attr): ... - def __setattr__(self, attr, val): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def view(self, dtype=None, type=None): ... # type: ignore[override] - def harden_mask(self): ... - def soften_mask(self): ... - def copy(self): ... # type: ignore[override] - def tolist(self, fill_value=None): ... - def __reduce__(self): ... + def _fieldmask(self, /) -> np.ndarray[_ShapeT_co, np.dtype[np.bool]] | np.bool: ... + + # + @override + def __array_finalize__(self, obj: np.ndarray) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __getitem__(self, indx: str | _ToIndices, /) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __setitem__(self, indx: str | _ToIndices, value: Incomplete, /) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray`, these two methods don't return `Self` + @override + def harden_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def soften_mask(self) -> None: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `MaskedArray.view`, but without the `fill_value` + @override # type: ignore[override] + @overload # () + def view(self, /, dtype: None = None, type: None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view[DTypeT: np.dtype]( + self, /, dtype: DTypeT | _HasDType[DTypeT], type: None = None + ) -> MaskedRecords[_ShapeT_co, DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view[ScalarT: np.generic]( + self, /, dtype: _DTypeLike[ScalarT], type: None = None + ) -> MaskedRecords[_ShapeT_co, np.dtype[ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None = None, *, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view[ArrayT: np.ndarray](self, /, dtype: DTypeLike | None, type: type[ArrayT]) -> ArrayT: ... + @overload # (dtype: ArrayT, /) + def view[ArrayT: np.ndarray](self, /, dtype: type[ArrayT], type: None = None) -> ArrayT: ... + @overload # (dtype: ) + def view(self, /, dtype: _VoidDTypeLike | str | None, type: None = None) -> MaskedRecords[_ShapeT_co, np.dtype]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # unlike `MaskedArray` and `ndarray`, this `copy` method has no `order` parameter + @override + def copy(self, /) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] mrecarray = MaskedRecords From 2774e23f818f4f35d9f58f5064db3bf653a09999 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 22:07:28 +0100 Subject: [PATCH 1140/1718] TYP: ``ma.core.mvoid`` annotations --- numpy/ma/core.pyi | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 47e6e1a1fdf8..10234e51ea2b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,10 +1,9 @@ -# mypy: disable-error-code=no-untyped-def # pyright: reportIncompatibleMethodOverride=false import datetime as dt import types from _typeshed import Incomplete, SupportsLenAndGetItem -from collections.abc import Buffer, Callable, Sequence +from collections.abc import Buffer, Callable, Iterator, Sequence from typing import ( Any, Concatenate, @@ -2594,20 +2593,27 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( cls, - data, - mask=..., - dtype=..., - fill_value=..., - hardmask=..., - copy=..., - subok=..., - ): ... - def __getitem__(self, indx): ... - def __setitem__(self, indx, value): ... - def __iter__(self): ... - def __len__(self): ... - def filled(self, fill_value=None): ... - def tolist(self): ... # type: ignore[override] + /, + data: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + fill_value: _FillValue = None, + hardmask: bool = False, + copy: bool = False, + subok: bool = True, + ) -> Self: ... + @override + def __getitem__(self, indx: _ToIndices, /) -> Incomplete: ... # type: ignore[override] + @override + def __setitem__(self, indx: _ToIndices, value: ArrayLike, /) -> None: ... # type: ignore[override] + @override + def __iter__[ScalarT: np.generic](self: mvoid[Any, np.dtype[ScalarT]], /) -> Iterator[MaskedConstant | ScalarT]: ... + @override + def __len__(self, /) -> int: ... + @override + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> Self | np.void: ... # type: ignore[override] + @override # list or tuple + def tolist(self) -> Sequence[Incomplete]: ... # type: ignore[override] def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray From 5e66117708d449027c3367e6ff166482956d85c4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:36:25 +0100 Subject: [PATCH 1141/1718] TYP: ``ma.mrecords.fromarrays`` --- numpy/ma/mrecords.pyi | 103 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 90 insertions(+), 13 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 7b285d4fab8c..bb2afe6283ec 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -6,6 +6,7 @@ from typing_extensions import TypeVar import numpy as np from numpy import _ByteOrder, _ToIndices from numpy._typing import ( + ArrayLike, DTypeLike, _AnyShape, _ArrayLikeBool_co, @@ -27,6 +28,7 @@ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=Tr _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) type _Ignored = object +type _Names = str | Sequence[str] ### # mypy: disable-error-code=no-untyped-def @@ -43,8 +45,8 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DT offset: SupportsIndex = 0, strides: _ShapeLike | None = None, formats: DTypeLike | None = None, - names: str | Sequence[str] | None = None, - titles: str | Sequence[str] | None = None, + names: _Names | None = None, + titles: _Names | None = None, byteorder: _ByteOrder | None = None, aligned: bool = False, mask: _ArrayLikeBool_co = ..., @@ -103,18 +105,93 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DT mrecarray = MaskedRecords +@overload # known dtype, known shape +def fromarrays[DTypeT: np.dtype, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromarrays[DTypeT: np.dtype]( + arraylist: Sequence[ArrayLike], + dtype: DTypeT | _HasDType[DTypeT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromarrays[ScalarT: np.generic, ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromarrays[ScalarT: np.generic]( + arraylist: Sequence[ArrayLike], + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, known shape (keyword) +def fromarrays[ShapeT: _Shape]( + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords[ShapeT]: ... +@overload # unknown dtype, unknown shape def fromarrays( - arraylist, - dtype=None, - shape=None, - formats=None, - names=None, - titles=None, - aligned=False, - byteorder=None, - fill_value=None, -): ... - + arraylist: Sequence[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, +) -> MaskedRecords: ... + +# def fromrecords( reclist, dtype=None, From b5730c3eb3c264af0ff00647ea5eae0b9be4f4dd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:39:45 +0100 Subject: [PATCH 1142/1718] TYP: ``ma.mrecords.addfield`` --- numpy/ma/mrecords.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index bb2afe6283ec..a8910dd8e541 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -214,4 +214,9 @@ def fromtextfile( vartypes=None, ): ... -def addfield(mrecord, newfield, newfieldname=None): ... +# +def addfield[ShapeT: _Shape]( + mrecord: MaskedRecords[ShapeT], + newfield: ArrayLike, + newfieldname: str | None = None, +) -> np.recarray[ShapeT, np.dtype[np.void]]: ... From 465aad24d93ae42f0fcd77cefd5ef88e8d854a1a Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:43:27 +0100 Subject: [PATCH 1143/1718] TYP: ``ma.mrecords.fromtextfile`` --- numpy/ma/mrecords.pyi | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index a8910dd8e541..f347c6df2dba 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,4 +1,4 @@ -from _typeshed import Incomplete +from _typeshed import Incomplete, StrPath, SupportsReadline from collections.abc import Buffer, Sequence from typing import Any, Generic, Self, SupportsIndex, overload, override from typing_extensions import TypeVar @@ -205,14 +205,15 @@ def fromrecords( mask=..., ): ... +# def fromtextfile( - fname, - delimiter=None, - commentchar="#", - missingchar="", - varnames=None, - vartypes=None, -): ... + fname: StrPath | SupportsReadline[str], + delimiter: str | None = None, + commentchar: str = "#", + missingchar: str = "", + varnames: Sequence[str] | None = None, + vartypes: Sequence[DTypeLike] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... # def addfield[ShapeT: _Shape]( From 9b41bd2e92503ca2d34df5dffa7f853fd3af8b6d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:46:17 +0100 Subject: [PATCH 1144/1718] TYP: ``ma.mrecords.openfile`` (undocumented) --- numpy/ma/mrecords.pyi | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index f347c6df2dba..fe28ecd1cdac 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete, StrPath, SupportsReadline from collections.abc import Buffer, Sequence -from typing import Any, Generic, Self, SupportsIndex, overload, override +from typing import IO, Any, Generic, Self, SupportsIndex, overload, override from typing_extensions import TypeVar import numpy as np @@ -205,6 +205,12 @@ def fromrecords( mask=..., ): ... +# undocumented +@overload +def openfile(fname: StrPath) -> IO[str]: ... +@overload +def openfile[FileT: SupportsReadline[str]](fname: FileT) -> FileT: ... + # def fromtextfile( fname: StrPath | SupportsReadline[str], From 2d42db6a026ac2c1d30eee321dee80eb64a2f973 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 Jan 2026 23:52:11 +0100 Subject: [PATCH 1145/1718] TYP: ``ma.mrecords.fromrecords`` --- numpy/ma/mrecords.pyi | 102 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 91 insertions(+), 11 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index fe28ecd1cdac..d538ad1a28f7 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -192,18 +192,98 @@ def fromarrays( ) -> MaskedRecords: ... # +@overload # known dtype, known shape +def fromrecords[DTypeT: np.dtype, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeT, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, DTypeT]: ... +@overload # known dtype, unknown shape +def fromrecords[DTypeT: np.dtype]( + reclist: ArrayLike, + dtype: DTypeT, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, DTypeT]: ... +@overload # known scalar-type, known shape +def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[ScalarT]]: ... +@overload # known scalar-type, unknown shape +def fromrecords[ScalarT: np.generic]( + reclist: ArrayLike, + dtype: _DTypeLike[ScalarT], + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... +@overload # unknown dtype, known shape (positional) +def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, known shape (keyword) +def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( + reclist: ArrayLike, + dtype: DTypeLike | None = None, + *, + shape: ShapeT, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... +@overload # unknown dtype, unknown shape def fromrecords( - reclist, - dtype=None, - shape=None, - formats=None, - names=None, - titles=None, - aligned=False, - byteorder=None, - fill_value=None, - mask=..., -): ... + reclist: ArrayLike, + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: DTypeLike | None = None, + names: _Names | None = None, + titles: _Names | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + fill_value: _ScalarLike_co | None = None, + mask: _ArrayLikeBool_co = ..., +) -> MaskedRecords[_AnyShape, np.dtype[Incomplete]]: ... # undocumented @overload From 06719886c154ca59d22cb2f318186355bc0dcb74 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 00:01:46 +0100 Subject: [PATCH 1146/1718] TYP: ``ma.mrecords`` review suggestions --- numpy/ma/mrecords.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index d538ad1a28f7..597551d1bf7d 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -245,7 +245,7 @@ def fromrecords[ScalarT: np.generic]( mask: _ArrayLikeBool_co = ..., ) -> MaskedRecords[_AnyShape, np.dtype[ScalarT]]: ... @overload # unknown dtype, known shape (positional) -def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( +def fromrecords[ShapeT: _Shape]( reclist: ArrayLike, dtype: DTypeLike | None, shape: ShapeT, @@ -258,7 +258,7 @@ def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( mask: _ArrayLikeBool_co = ..., ) -> MaskedRecords[ShapeT, np.dtype[Incomplete]]: ... @overload # unknown dtype, known shape (keyword) -def fromrecords[ScalarT: np.generic, ShapeT: _Shape]( +def fromrecords[ShapeT: _Shape]( reclist: ArrayLike, dtype: DTypeLike | None = None, *, @@ -299,7 +299,7 @@ def fromtextfile( missingchar: str = "", varnames: Sequence[str] | None = None, vartypes: Sequence[DTypeLike] | None = None, -) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... # def addfield[ShapeT: _Shape]( From 43fae6a1e2142fd81b08f5f101106ef0ce3a2791 Mon Sep 17 00:00:00 2001 From: Harish E Date: Fri, 2 Jan 2026 12:20:41 +0530 Subject: [PATCH 1147/1718] MAINT: Update Doxygen version in config header Updated file header to match the current Doxygen version (1.13.2) used in the build system. --- doc/source/doxyfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/doxyfile b/doc/source/doxyfile index 3d85fb645488..60ab1058dbba 100644 --- a/doc/source/doxyfile +++ b/doc/source/doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.18 +# Doxyfile 1.13.2 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- From 6ae82e77bc18e8b8d9b1fc46856e36f6ca7f5b88 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 12:16:49 +0100 Subject: [PATCH 1148/1718] TYP: ``ma.extras.count_masked`` --- numpy/ma/extras.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 83f3bee761ce..e012df2efbab 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -7,6 +7,7 @@ from numpy import _CastingKind from numpy._typing import ( ArrayLike, DTypeLike, + NDArray, _AnyShape, _ArrayLike, _DTypeLike, @@ -243,10 +244,11 @@ def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray @overload def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... +def count_masked(arr: ArrayLike, axis: SupportsIndex | None = None) -> NDArray[np.intp]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def count_masked(arr, axis=None): ... def masked_all(shape, dtype=float): ... # noqa: PYI014 def masked_all_like(arr): ... From b5d14add60eac9134d0d2df5847f983b9b85c4d4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 12:17:21 +0100 Subject: [PATCH 1149/1718] TYP: ``ma.extras.masked_all`` --- numpy/ma/extras.pyi | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index e012df2efbab..05bd46090590 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -244,14 +244,19 @@ def diagflat[ScalarT: np.generic](v: _ArrayLike[ScalarT], k: int = 0) -> _MArray @overload def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... +# def count_masked(arr: ArrayLike, axis: SupportsIndex | None = None) -> NDArray[np.intp]: ... +# +@overload +def masked_all[ScalarT: np.generic](shape: _ShapeLike, dtype: _DTypeLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all(shape: _ShapeLike, dtype: DTypeLike = float) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def masked_all(shape, dtype=float): ... # noqa: PYI014 def masked_all_like(arr): ... - def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... From fe8e4f54fb3f8c0ed559993ee6d92e6c74cf4c13 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 12:18:48 +0100 Subject: [PATCH 1150/1718] TYP: ``ma.extras.masked_all_like`` --- numpy/ma/extras.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 05bd46090590..fbc41c54e74c 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -253,10 +253,15 @@ def masked_all[ScalarT: np.generic](shape: _ShapeLike, dtype: _DTypeLike[ScalarT @overload def masked_all(shape: _ShapeLike, dtype: DTypeLike = float) -> _MArray[Incomplete]: ... +# +@overload +def masked_all_like[ScalarT: np.generic](arr: _ArrayLike[ScalarT]) -> _MArray[ScalarT]: ... +@overload +def masked_all_like(arr: ArrayLike) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def masked_all_like(arr): ... def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... From 7e9e3c814fa0d9789522ccc68d9dcfc176532b91 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 12:25:20 +0100 Subject: [PATCH 1151/1718] TYP: ``ma.extras.apply_along_axis`` --- numpy/ma/extras.pyi | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index fbc41c54e74c..49488322183f 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete -from collections.abc import Sequence -from typing import SupportsIndex, overload +from collections.abc import Callable, Sequence +from typing import Concatenate, SupportsIndex, overload import numpy as np from numpy import _CastingKind @@ -259,10 +259,18 @@ def masked_all_like[ScalarT: np.generic](arr: _ArrayLike[ScalarT]) -> _MArray[Sc @overload def masked_all_like(arr: ArrayLike) -> _MArray[Incomplete]: ... +# +def apply_along_axis[**Tss]( + func1d: Callable[Concatenate[MaskedArray, Tss], ArrayLike], + axis: SupportsIndex, + arr: ArrayLike, + *args: Tss.args, + **kwargs: Tss.kwargs, +) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... def compress_nd(x, axis=None): ... From 24e9a3eaf1bda940c571144a3db621f6571951a0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 12:29:22 +0100 Subject: [PATCH 1152/1718] TYP: ``ma.extras.apply_over_axes`` --- numpy/ma/extras.pyi | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 49488322183f..74ddc220a3d1 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -268,10 +268,23 @@ def apply_along_axis[**Tss]( **kwargs: Tss.kwargs, ) -> _MArray[Incomplete]: ... +# +@overload +def apply_over_axes[ScalarT: np.generic]( + func: Callable[[MaskedArray, int], _ArrayLike[ScalarT]], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[ScalarT]: ... +@overload +def apply_over_axes( + func: Callable[[MaskedArray, int], ArrayLike], + a: np.ndarray, + axes: _ShapeLike, +) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def apply_over_axes(func, a, axes): ... def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... def compress_nd(x, axis=None): ... def compress_rowcols(x, axis=None): ... From 8886cb9d4970c6d4df13b7eb6778b40849a0a076 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 16:28:37 +0100 Subject: [PATCH 1153/1718] TYP: ``ma.extras.median`` --- numpy/ma/extras.pyi | 128 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 74ddc220a3d1..7168cc5d7eef 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete from collections.abc import Callable, Sequence -from typing import Concatenate, SupportsIndex, overload +from typing import Any, Concatenate, Literal as L, SupportsIndex, overload import numpy as np from numpy import _CastingKind @@ -10,7 +10,10 @@ from numpy._typing import ( NDArray, _AnyShape, _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeInt_co, _DTypeLike, + _NestedSequence, _ShapeLike, ) from numpy.lib._function_base_impl import average @@ -69,6 +72,9 @@ __all__ = [ type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] + ### # keep in sync with `numpy._core.shape_base.atleast_1d` @@ -282,10 +288,128 @@ def apply_over_axes( axes: _ShapeLike, ) -> _MArray[Incomplete]: ... +# keep in sync with `lib._function_base_impl.median` +@overload # known scalar-type, keepdims=False (default) +def median[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> ScalarT: ... +@overload # float array-like, keepdims=False (default) +def median( + a: _ArrayLikeInt_co | _NestedSequence[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median[ArrayT: NDArray[_ScalarNumeric]]( + a: ArrayT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> ArrayT: ... +@overload # known scalar-type, keepdims=True +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[ScalarT]: ... +@overload # known scalar-type, axis= +def median[ScalarT: _ScalarNumeric]( + a: _ArrayLike[ScalarT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[ScalarT]: ... +@overload # float array-like, keepdims=True +def median( + a: _NestedSequence[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _NestedSequence[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _MArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _MArray[np.complex128]: ... +@overload # out= (keyword) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # out= (positional) +def median[ArrayT: np.ndarray]( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... def compress_nd(x, axis=None): ... def compress_rowcols(x, axis=None): ... def compress_rows(a): ... From ad3c1ce4a668f20dd5acde6f03a61655a9c91ed9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 16:48:07 +0100 Subject: [PATCH 1154/1718] TYP: ``ma.extras.compress_nd`` --- numpy/ma/extras.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 7168cc5d7eef..171341c218e8 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -407,10 +407,15 @@ def median( keepdims: bool = False, ) -> Incomplete: ... +# +@overload +def compress_nd[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def compress_nd(x: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def compress_nd(x, axis=None): ... def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... From c960a8547cb5e6de8c1fc7e2089cb23ea2b4a24f Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 16:52:58 +0100 Subject: [PATCH 1155/1718] TYP: ``ma.extras.compress_rowcols`` --- numpy/ma/extras.pyi | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 171341c218e8..e841a8870b1b 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -71,6 +71,7 @@ __all__ = [ ] type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] @@ -413,10 +414,15 @@ def compress_nd[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: _ShapeLike | @overload def compress_nd(x: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Incomplete]: ... +# +@overload +def compress_rowcols[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: int | None = None) -> _Array2D[ScalarT]: ... +@overload +def compress_rowcols(x: ArrayLike, axis: int | None = None) -> _Array2D[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def compress_rowcols(x, axis=None): ... def compress_rows(a): ... def compress_cols(a): ... def mask_rows(a, axis=...): ... From dc9659831501af96400341cb152f2266dca32490 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 16:54:31 +0100 Subject: [PATCH 1156/1718] TYP: ``ma.extras.compress_rows`` --- numpy/ma/extras.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index e841a8870b1b..44ed8ae302da 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -420,10 +420,15 @@ def compress_rowcols[ScalarT: np.generic](x: _ArrayLike[ScalarT], axis: int | No @overload def compress_rowcols(x: ArrayLike, axis: int | None = None) -> _Array2D[Incomplete]: ... +# +@overload +def compress_rows[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_rows(x: ArrayLike) -> _Array2D[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def compress_rows(a): ... def compress_cols(a): ... def mask_rows(a, axis=...): ... def mask_cols(a, axis=...): ... From 8d11f3f2f21058df42836d68350cbbf3bf226512 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 18:19:41 +0100 Subject: [PATCH 1157/1718] TYP: ``ma.extras.compress_cols`` --- numpy/ma/extras.pyi | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 44ed8ae302da..6dda3bab97d2 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -422,14 +422,19 @@ def compress_rowcols(x: ArrayLike, axis: int | None = None) -> _Array2D[Incomple # @overload -def compress_rows[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +def compress_rows[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... @overload -def compress_rows(x: ArrayLike) -> _Array2D[Incomplete]: ... +def compress_rows(a: ArrayLike) -> _Array2D[Incomplete]: ... + +# +@overload +def compress_cols[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[ScalarT]: ... +@overload +def compress_cols(a: ArrayLike) -> _Array2D[Incomplete]: ... # TODO: everything below # mypy: disable-error-code=no-untyped-def -def compress_cols(a): ... def mask_rows(a, axis=...): ... def mask_cols(a, axis=...): ... def ediff1d(arr, to_end=None, to_begin=None): ... From b75ed19714a68bc52b8d6249e97cfa40bfff54e2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 18:29:47 +0100 Subject: [PATCH 1158/1718] TYP: ``ma.extras.mask_{rowcols,rows,cols}`` --- numpy/ma/extras.pyi | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 6dda3bab97d2..6c1895c4f74f 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -4,6 +4,7 @@ from typing import Any, Concatenate, Literal as L, SupportsIndex, overload import numpy as np from numpy import _CastingKind +from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, DTypeLike, @@ -432,11 +433,14 @@ def compress_cols[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array2D[Scala @overload def compress_cols(a: ArrayLike) -> _Array2D[Incomplete]: ... +# +def mask_rowcols(a: ArrayLike, axis: SupportsIndex | None = None) -> _MArray[Incomplete]: ... +def mask_rows(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... +def mask_cols(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def mask_rows(a, axis=...): ... -def mask_cols(a, axis=...): ... def ediff1d(arr, to_end=None, to_begin=None): ... def unique(ar1, return_index=False, return_inverse=False): ... def intersect1d(ar1, ar2, assume_unique=False): ... @@ -472,6 +476,3 @@ def clump_unmasked(a): ... def clump_masked(a): ... def vander(x, n=None): ... def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... - -# -def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... From 0d85f41c1b40fa39741869472e800ec9bad28a91 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 2 Jan 2026 18:43:32 +0100 Subject: [PATCH 1159/1718] TYP: ``ma.extras.ediff1d`` --- numpy/lib/_arraysetops_impl.pyi | 2 +- numpy/ma/extras.pyi | 32 +++++++++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index b91f8dee04ef..4290a79f1dcd 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -63,7 +63,7 @@ class UniqueInverseResult[ScalarT: np.generic](NamedTuple): values: _Array1D[ScalarT] inverse_indices: NDArray[np.intp] -# +# keep in sync with `ma.extras.ediff1d` @overload def ediff1d( ary: _ArrayLikeBool_co, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 6c1895c4f74f..5be45d8d727b 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -11,6 +11,7 @@ from numpy._typing import ( NDArray, _AnyShape, _ArrayLike, + _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeInt_co, _DTypeLike, @@ -72,6 +73,7 @@ __all__ = [ ] type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] +type _MArray1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ @@ -332,7 +334,7 @@ def median[ArrayT: NDArray[_ScalarNumeric]]( *, keepdims: L[True], ) -> ArrayT: ... -@overload # known scalar-type, keepdims=True +@overload # known scalar-type, keepdims=True_ArrayLikeNumber_co def median[ScalarT: _ScalarNumeric]( a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, @@ -438,10 +440,35 @@ def mask_rowcols(a: ArrayLike, axis: SupportsIndex | None = None) -> _MArray[Inc def mask_rows(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... def mask_cols(a: ArrayLike, axis: _NoValueType = ...) -> _MArray[Incomplete]: ... +# keep in sync with `lib._arraysetops_impl.ediff1d` +@overload +def ediff1d( + arr: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.int8]: ... +@overload +def ediff1d[NumericT: _ScalarNumeric]( + arr: _ArrayLike[NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[NumericT]: ... +@overload +def ediff1d( + arr: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[np.timedelta64]: ... +@overload +def ediff1d( + arr: _ArrayLikeComplex_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _MArray1D[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def ediff1d(arr, to_end=None, to_begin=None): ... def unique(ar1, return_index=False, return_inverse=False): ... def intersect1d(ar1, ar2, assume_unique=False): ... def setxor1d(ar1, ar2, assume_unique=False): ... @@ -454,7 +481,6 @@ def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): __slots__ = () - @staticmethod def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod From 19fabe8250ef3df7dfdbb9f5a49c284f49aa1809 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 2 Jan 2026 19:49:54 +0100 Subject: [PATCH 1160/1718] DEP: Deprecate ndarray.resize, first just in typing (#30181) Also ensures that we do not use it anywhere in our own code. Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/30181.deprecation.rst | 6 ++++++ numpy/__init__.pyi | 4 ++++ numpy/_core/memmap.py | 3 +-- numpy/_core/tests/test_memmap.py | 3 +-- .../tests/data/pass/ndarray_shape_manipulation.py | 12 ++++++------ 5 files changed, 18 insertions(+), 10 deletions(-) create mode 100644 doc/release/upcoming_changes/30181.deprecation.rst diff --git a/doc/release/upcoming_changes/30181.deprecation.rst b/doc/release/upcoming_changes/30181.deprecation.rst new file mode 100644 index 000000000000..c9ca61dd67f9 --- /dev/null +++ b/doc/release/upcoming_changes/30181.deprecation.rst @@ -0,0 +1,6 @@ +Resizing a Numpy array inplace is deprecated +-------------------------------------------- +Resizing a Numpy array inplace is deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a resized array via ``np.resize``. + diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 61d2b103db92..5fc769f7e65d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2248,8 +2248,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.4") def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.4") def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... # keep in sync with `ma.MaskedArray.squeeze` @@ -3778,8 +3780,10 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): # NOTE: this wont't raise, but won't do anything either @overload + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") def resize(self, /, *, refcheck: py_bool = True) -> None: ... @overload + @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: py_bool = True) -> None: ... # diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 2632933f583b..89a36808e6f1 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -130,8 +130,7 @@ class memmap(ndarray): Examples -------- >>> import numpy as np - >>> data = np.arange(12, dtype=np.float32) - >>> data.resize((3,4)) + >>> data = np.arange(12, dtype=np.float32).reshape((3, 4)) This example uses a temporary file so that doctest doesn't write files to your directory. You would use a 'normal' filename. diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 8e2aa0a507b1..c18a0201d789 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -36,8 +36,7 @@ def setup_method(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.shape = (3, 4) self.dtype = 'float32' - self.data = arange(12, dtype=self.dtype) - self.data.resize(self.shape) + self.data = arange(12, dtype=self.dtype).reshape(self.shape) def teardown_method(self): self.tmpfp.close() diff --git a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py index 0ca3dff392e1..1458339bf6ae 100644 --- a/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py +++ b/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -11,13 +11,13 @@ nd1.reshape(4, order="C") # resize -nd1.resize() -nd1.resize(4) -nd1.resize(2, 2) -nd1.resize((2, 2)) +nd1.resize() # type: ignore[deprecated] +nd1.resize(4) # type: ignore[deprecated] +nd1.resize(2, 2) # type: ignore[deprecated] +nd1.resize((2, 2)) # type: ignore[deprecated] -nd1.resize((2, 2), refcheck=True) -nd1.resize(4, refcheck=True) +nd1.resize((2, 2), refcheck=True) # type: ignore[deprecated] +nd1.resize(4, refcheck=True) # type: ignore[deprecated] nd2 = np.array([[1, 2], [3, 4]]) From c8fde8e87267a3b8d3dd142de4b06d036d2728ca Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 12:16:48 +0100 Subject: [PATCH 1161/1718] TYP: ``ma.extras.unique`` --- numpy/ma/extras.pyi | 66 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 5be45d8d727b..c8522e59941d 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -76,6 +76,7 @@ type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] type _MArray1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _IntArray = NDArray[np.intp] type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] @@ -466,10 +467,73 @@ def ediff1d( to_begin: ArrayLike | None = None, ) -> _MArray1D[Incomplete]: ... +# keep in sync with `lib._arraysetops_impl.unique`, minus `return_counts` +@overload # known scalar-type, FF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> NDArray[ScalarT]: ... +@overload # unknown scalar-type, FF +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, +) -> np.ndarray: ... +@overload # known scalar-type, TF +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, +) -> tuple[np.ndarray, _IntArray]: ... +@overload # known scalar-type, FT (positional) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False], + return_inverse: L[True], +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # known scalar-type, FT (keyword) +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[NDArray[ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FT (positional) +def unique( + ar1: ArrayLike, + return_index: L[False], + return_inverse: L[True], +) -> tuple[np.ndarray, _IntArray]: ... +@overload # unknown scalar-type, FT (keyword) +def unique( + ar1: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], +) -> tuple[np.ndarray, _IntArray]: ... +@overload # known scalar-type, TT +def unique[ScalarT: np.generic]( + ar1: _ArrayLike[ScalarT], + return_index: L[True], + return_inverse: L[True], +) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TT +def unique( + ar1: ArrayLike, + return_index: L[True], + return_inverse: L[True], +) -> tuple[np.ndarray, _IntArray, _IntArray]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def unique(ar1, return_index=False, return_inverse=False): ... def intersect1d(ar1, ar2, assume_unique=False): ... def setxor1d(ar1, ar2, assume_unique=False): ... def in1d(ar1, ar2, assume_unique=False, invert=False): ... From e4308d1ae202feb562757d3ad3361671072b06a1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 12:34:30 +0100 Subject: [PATCH 1162/1718] TYP: ``ma.extras.intersect1d`` --- numpy/ma/extras.pyi | 53 +++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index c8522e59941d..1e6186a4a3f5 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,7 @@ from _typeshed import Incomplete +from ast import In from collections.abc import Callable, Sequence -from typing import Any, Concatenate, Literal as L, SupportsIndex, overload +from typing import Any, Concatenate, Literal as L, SupportsIndex, TypeVar, overload import numpy as np from numpy import _CastingKind @@ -80,6 +81,23 @@ type _IntArray = NDArray[np.intp] type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_AnyScalarT = TypeVar( + "_AnyScalarT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + ### # keep in sync with `numpy._core.shape_base.atleast_1d` @@ -327,7 +345,7 @@ def median( keepdims: L[False] = False, ) -> np.complex128 | Any: ... @overload # known array-type, keepdims=True -def median[ArrayT: NDArray[_ScalarNumeric]]( +def median[ArrayT: _MArray[_ScalarNumeric]]( a: ArrayT, axis: _ShapeLike | None = None, out: None = None, @@ -335,7 +353,7 @@ def median[ArrayT: NDArray[_ScalarNumeric]]( *, keepdims: L[True], ) -> ArrayT: ... -@overload # known scalar-type, keepdims=True_ArrayLikeNumber_co +@overload # known scalar-type, keepdims=True def median[ScalarT: _ScalarNumeric]( a: _ArrayLike[ScalarT], axis: _ShapeLike | None = None, @@ -473,68 +491,75 @@ def unique[ScalarT: np.generic]( ar1: _ArrayLike[ScalarT], return_index: L[False] = False, return_inverse: L[False] = False, -) -> NDArray[ScalarT]: ... +) -> _MArray[ScalarT]: ... @overload # unknown scalar-type, FF def unique( ar1: ArrayLike, return_index: L[False] = False, return_inverse: L[False] = False, -) -> np.ndarray: ... +) -> _MArray[Incomplete]: ... @overload # known scalar-type, TF def unique[ScalarT: np.generic]( ar1: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[False] = False, -) -> tuple[NDArray[ScalarT], _IntArray]: ... +) -> tuple[_MArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, TFF def unique( ar1: ArrayLike, return_index: L[True], return_inverse: L[False] = False, -) -> tuple[np.ndarray, _IntArray]: ... +) -> tuple[_MArray[Incomplete], _IntArray]: ... @overload # known scalar-type, FT (positional) def unique[ScalarT: np.generic]( ar1: _ArrayLike[ScalarT], return_index: L[False], return_inverse: L[True], -) -> tuple[NDArray[ScalarT], _IntArray]: ... +) -> tuple[_MArray[ScalarT], _IntArray]: ... @overload # known scalar-type, FT (keyword) def unique[ScalarT: np.generic]( ar1: _ArrayLike[ScalarT], return_index: L[False] = False, *, return_inverse: L[True], -) -> tuple[NDArray[ScalarT], _IntArray]: ... +) -> tuple[_MArray[ScalarT], _IntArray]: ... @overload # unknown scalar-type, FT (positional) def unique( ar1: ArrayLike, return_index: L[False], return_inverse: L[True], -) -> tuple[np.ndarray, _IntArray]: ... +) -> tuple[_MArray[Incomplete], _IntArray]: ... @overload # unknown scalar-type, FT (keyword) def unique( ar1: ArrayLike, return_index: L[False] = False, *, return_inverse: L[True], -) -> tuple[np.ndarray, _IntArray]: ... +) -> tuple[_MArray[Incomplete], _IntArray]: ... @overload # known scalar-type, TT def unique[ScalarT: np.generic]( ar1: _ArrayLike[ScalarT], return_index: L[True], return_inverse: L[True], -) -> tuple[NDArray[ScalarT], _IntArray, _IntArray]: ... +) -> tuple[_MArray[ScalarT], _IntArray, _IntArray]: ... @overload # unknown scalar-type, TT def unique( ar1: ArrayLike, return_index: L[True], return_inverse: L[True], -) -> tuple[np.ndarray, _IntArray, _IntArray]: ... +) -> tuple[_MArray[Incomplete], _IntArray, _IntArray]: ... + +# keep in sync with `lib._arraysetops_impl.intersect1d` +@overload # known scalar-type, return_indices=False (default) +def intersect1d( + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... # TODO: everything below # mypy: disable-error-code=no-untyped-def -def intersect1d(ar1, ar2, assume_unique=False): ... def setxor1d(ar1, ar2, assume_unique=False): ... def in1d(ar1, ar2, assume_unique=False, invert=False): ... def isin(element, test_elements, assume_unique=False, invert=False): ... From e05c81e10a78e1b30e93051d16a4d69c0145c591 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 12:38:09 +0100 Subject: [PATCH 1163/1718] TYP: ``ma.extras.setxor1d`` --- numpy/ma/extras.pyi | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 1e6186a4a3f5..dd96d974c9cc 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -557,14 +557,21 @@ def intersect1d( @overload # unknown scalar-type, return_indices=False (default) def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... +# keep in sync with `lib._arraysetops_impl.setxor1d` +@overload +def setxor1d( + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def setxor1d(ar1, ar2, assume_unique=False): ... -def in1d(ar1, ar2, assume_unique=False, invert=False): ... -def isin(element, test_elements, assume_unique=False, invert=False): ... def union1d(ar1, ar2): ... def setdiff1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... From 3267ff961976a75c0d40ba064fa62f795f39c2b4 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 12:39:35 +0100 Subject: [PATCH 1164/1718] YP: ``ma.extras.setdiff1d`` --- numpy/ma/extras.pyi | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index dd96d974c9cc..6fcde6b053ed 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -565,11 +565,23 @@ def setxor1d( @overload def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... +# keep in sync with `lib._arraysetops_impl.union1d` +@overload +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _MArray1D[_AnyScalarT]: ... +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _MArray1D[Incomplete]: ... + +# keep in sync with `lib._arraysetops_impl.setdiff1d` +@overload +def setdiff1d( + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _MArray1D[_AnyScalarT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def union1d(ar1, ar2): ... -def setdiff1d(ar1, ar2, assume_unique=False): ... def in1d(ar1, ar2, assume_unique=False, invert=False): ... def isin(element, test_elements, assume_unique=False, invert=False): ... def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... From 4d34beca203cb57f50e8b4f56dc25f3a291808d5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 12:48:23 +0100 Subject: [PATCH 1165/1718] TYP: ``ma.extras.isin`` and ``ma.extras.in1d`` --- numpy/ma/extras.pyi | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 6fcde6b053ed..fe3579e660f2 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -579,11 +579,17 @@ def setdiff1d( @overload def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MArray1D[Incomplete]: ... +# +def in1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False, invert: bool = False) -> _MArray1D[np.bool]: ... + +# keep in sync with `lib._arraysetops_impl.isin` +def isin( + element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = False, invert: bool = False +) -> _MArray[np.bool]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def in1d(ar1, ar2, assume_unique=False, invert=False): ... -def isin(element, test_elements, assume_unique=False, invert=False): ... def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... From dbc83ca69fe84701082468d27a20ce91a7907097 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:07:23 +0100 Subject: [PATCH 1166/1718] TYP: ``ma.extras.cov`` --- numpy/ma/extras.pyi | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index fe3579e660f2..aa4bcd69f5cc 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -79,6 +79,7 @@ type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[Scalar type _IntArray = NDArray[np.intp] type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ +type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] # Explicitly set all allowed values to prevent accidental castings to @@ -587,10 +588,19 @@ def isin( element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = False, invert: bool = False ) -> _MArray[np.bool]: ... +# +def cov( + x: ArrayLike, + y: ArrayLike | None = None, + rowvar: bool = True, + bias: bool = False, + allow_masked: bool = True, + ddof: int | None = None +) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... class MAxisConcatenator(AxisConcatenator): From 74c62b56fa6a8bcd0a3468041a64224c8e662c36 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:12:23 +0100 Subject: [PATCH 1167/1718] TYP: ``ma.extras.corrcoef`` --- numpy/ma/extras.pyi | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index aa4bcd69f5cc..b69209b9e0d2 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -588,7 +588,7 @@ def isin( element: ArrayLike, test_elements: ArrayLike, assume_unique: bool = False, invert: bool = False ) -> _MArray[np.bool]: ... -# +# keep in sync with `corrcoef` def cov( x: ArrayLike, y: ArrayLike | None = None, @@ -598,11 +598,12 @@ def cov( ddof: int | None = None ) -> _MArray[Incomplete]: ... +# keep in sync with `cov` +def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allow_masked: bool = True) -> _MArray[Incomplete]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... - class MAxisConcatenator(AxisConcatenator): __slots__ = () @staticmethod From 58c0bfad9bf81fb3c2cac71540b2cc6e29705220 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:13:24 +0100 Subject: [PATCH 1168/1718] TYP: ``ma.extras.mr_`` --- numpy/ma/extras.pyi | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index b69209b9e0d2..2f8ae8e236dc 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,7 +1,7 @@ from _typeshed import Incomplete from ast import In from collections.abc import Callable, Sequence -from typing import Any, Concatenate, Literal as L, SupportsIndex, TypeVar, overload +from typing import Any, Concatenate, Final, Literal as L, SupportsIndex, TypeVar, overload import numpy as np from numpy import _CastingKind @@ -613,10 +613,9 @@ class MAxisConcatenator(AxisConcatenator): class mr_class(MAxisConcatenator): __slots__ = () - def __init__(self) -> None: ... -mr_: mr_class +mr_: Final[mr_class] = ... def ndenumerate(a, compressed=True): ... def flatnotmasked_edges(a): ... From 696a11327e55fc40e0ded267ef0375c48d484077 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:18:03 +0100 Subject: [PATCH 1169/1718] TYP: ``ma.extras.MAxisConcatenator.concatenate`` --- numpy/ma/extras.pyi | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 2f8ae8e236dc..a2c2fc097aaa 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,7 +1,8 @@ -from _typeshed import Incomplete +from _typeshed import Incomplete, SupportsLenAndGetItem from ast import In from collections.abc import Callable, Sequence from typing import Any, Concatenate, Final, Literal as L, SupportsIndex, TypeVar, overload +from typing_extensions import override import numpy as np from numpy import _CastingKind @@ -606,8 +607,17 @@ def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allo class MAxisConcatenator(AxisConcatenator): __slots__ = () + + # keep in sync with `ma.core.concatenate` + @override # type: ignore[override] + @overload + @staticmethod + def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MArray[ScalarT]: ... + @overload @staticmethod - def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MArray[Incomplete]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # @classmethod def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] From 0dc48c0640bbb2fde7d72db8b2694bc2156b8d18 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:20:47 +0100 Subject: [PATCH 1170/1718] TYP: ``ma.extras.MAxisConcatenator.makemat`` --- numpy/ma/extras.pyi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index a2c2fc097aaa..689c11f3012f 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -618,8 +618,9 @@ class MAxisConcatenator(AxisConcatenator): def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MArray[Incomplete]: ... # pyright: ignore[reportIncompatibleMethodOverride] # + @override @classmethod - def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] + def makemat(cls, /, arr: ArrayLike) -> _MArray[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): __slots__ = () From 2fcdd73a2f570a46644b1baf08fa7d4fac40212e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:23:15 +0100 Subject: [PATCH 1171/1718] TYP: ``ma.extras.ndenumerate`` --- numpy/ma/extras.pyi | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 689c11f3012f..8ef6e11b7699 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,6 @@ from _typeshed import Incomplete, SupportsLenAndGetItem from ast import In -from collections.abc import Callable, Sequence +from collections.abc import Callable, Iterator, Sequence from typing import Any, Concatenate, Final, Literal as L, SupportsIndex, TypeVar, overload from typing_extensions import override @@ -602,8 +602,6 @@ def cov( # keep in sync with `cov` def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allow_masked: bool = True) -> _MArray[Incomplete]: ... -# TODO: everything below -# mypy: disable-error-code=no-untyped-def class MAxisConcatenator(AxisConcatenator): __slots__ = () @@ -628,7 +626,12 @@ class mr_class(MAxisConcatenator): mr_: Final[mr_class] = ... -def ndenumerate(a, compressed=True): ... +# +def ndenumerate(a: ArrayLike, compressed: bool = True) -> Iterator[tuple[_AnyShape, Incomplete]]: ... + +# TODO: everything below +# mypy: disable-error-code=no-untyped-def + def flatnotmasked_edges(a): ... def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... From 21085821ee821fc417d6ae9fb8696f2642fdfc1e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:31:56 +0100 Subject: [PATCH 1172/1718] TYP: ``ma.extras.flatnotmasked_edges`` --- numpy/ma/extras.pyi | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 8ef6e11b7699..875223793789 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,8 +1,15 @@ from _typeshed import Incomplete, SupportsLenAndGetItem -from ast import In from collections.abc import Callable, Iterator, Sequence -from typing import Any, Concatenate, Final, Literal as L, SupportsIndex, TypeVar, overload -from typing_extensions import override +from typing import ( + Any, + Concatenate, + Final, + Literal as L, + SupportsIndex, + TypeVar, + overload, + override, +) import numpy as np from numpy import _CastingKind @@ -18,6 +25,7 @@ from numpy._typing import ( _ArrayLikeInt_co, _DTypeLike, _NestedSequence, + _Shape, _ShapeLike, ) from numpy.lib._function_base_impl import average @@ -602,7 +610,6 @@ def cov( # keep in sync with `cov` def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True, allow_masked: bool = True) -> _MArray[Incomplete]: ... - class MAxisConcatenator(AxisConcatenator): __slots__ = () @@ -627,12 +634,31 @@ class mr_class(MAxisConcatenator): mr_: Final[mr_class] = ... # -def ndenumerate(a: ArrayLike, compressed: bool = True) -> Iterator[tuple[_AnyShape, Incomplete]]: ... +@overload +def ndenumerate[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[ScalarT]], + compressed: bool = True, +) -> Iterator[tuple[ShapeT, ScalarT]]: ... +@overload +def ndenumerate[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, ScalarT]]: ... +@overload +def ndenumerate( + a: ArrayLike, + compressed: bool = True, +) -> Iterator[tuple[_AnyShape, Incomplete]]: ... + +# +@overload +def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _MArray1D[ScalarT] | None: ... +@overload +def flatnotmasked_edges(a: ArrayLike) -> _MArray1D[Incomplete] | None: ... # TODO: everything below # mypy: disable-error-code=no-untyped-def -def flatnotmasked_edges(a): ... def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... def notmasked_contiguous(a, axis=None): ... From f7fb3ac0a96c6c8639c6d3e77cbe2fbddc4b95ba Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:38:12 +0100 Subject: [PATCH 1173/1718] TYP: ``ma.extras.notmasked_edges`` --- numpy/ma/extras.pyi | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 875223793789..810079ade77a 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -656,10 +656,17 @@ def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _MArray1 @overload def flatnotmasked_edges(a: ArrayLike) -> _MArray1D[Incomplete] | None: ... +# +@overload +def notmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: None = None) -> _MArray1D[ScalarT] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: None = None) -> _MArray1D[Incomplete] | None: ... +@overload +def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def notmasked_edges(a, axis=None): ... def flatnotmasked_contiguous(a): ... def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... From e121a8401563c82b6d9a9775a1cbe10967eb206f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:43:46 +0100 Subject: [PATCH 1174/1718] TYP: ``ma.extras.flatnotmasked_contiguous`` --- numpy/ma/extras.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 810079ade77a..d482c56b9ee0 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -664,10 +664,12 @@ def notmasked_edges(a: ArrayLike, axis: None = None) -> _MArray1D[Incomplete] | @overload def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... +# +def flatnotmasked_contiguous(a: ArrayLike) -> Sequence[slice[int, int, None]]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def flatnotmasked_contiguous(a): ... def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... From c3fc19bbf0a3e52955d55d05dd875321a6d3c696 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:47:24 +0100 Subject: [PATCH 1175/1718] TYP: ``ma.extras.notmasked_contiguous`` --- numpy/ma/extras.pyi | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index d482c56b9ee0..6499747f5b02 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -665,12 +665,17 @@ def notmasked_edges(a: ArrayLike, axis: None = None) -> _MArray1D[Incomplete] | def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... # -def flatnotmasked_contiguous(a: ArrayLike) -> Sequence[slice[int, int, None]]: ... +def flatnotmasked_contiguous(a: ArrayLike) -> list[slice[int, int, None]]: ... + +# +@overload +def notmasked_contiguous(a: ArrayLike, axis: None = None) -> list[slice[int, int, None]]: ... +@overload +def notmasked_contiguous(a: ArrayLike, axis: SupportsIndex) -> list[Incomplete]: ... # TODO: everything below # mypy: disable-error-code=no-untyped-def -def notmasked_contiguous(a, axis=None): ... def clump_unmasked(a): ... def clump_masked(a): ... def vander(x, n=None): ... From 5d2421ff675c81552c98e98fe561f9b734abf329 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 13:48:26 +0100 Subject: [PATCH 1176/1718] TYP: ``ma.extras.clump_unmasked`` --- numpy/ma/extras.pyi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 6499747f5b02..ba4550388c25 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -673,10 +673,12 @@ def notmasked_contiguous(a: ArrayLike, axis: None = None) -> list[slice[int, int @overload def notmasked_contiguous(a: ArrayLike, axis: SupportsIndex) -> list[Incomplete]: ... +# +def clump_unmasked(a: np.ndarray) -> list[slice[int, int, None]]: ... + # TODO: everything below # mypy: disable-error-code=no-untyped-def -def clump_unmasked(a): ... def clump_masked(a): ... def vander(x, n=None): ... def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... From fdc917e44ea26f15f83c51e6e05e66ef55f87a2f Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 14:04:43 +0100 Subject: [PATCH 1177/1718] TYP: ``ma.extras.vander`` --- numpy/lib/_twodim_base_impl.pyi | 1 + numpy/ma/extras.pyi | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index de0d69670993..af8e3d72c4d8 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -207,6 +207,7 @@ def triu[ScalarT: np.generic](m: _ArrayLike[ScalarT], k: int = 0) -> NDArray[Sca def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... # we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +# keep in sync with `ma.extras.vander` @overload def vander[ScalarT: np.number | np.object_](x: _ArrayLike1D[ScalarT], N: int | None = None, increasing: bool = False) -> _Array2D[ScalarT]: ... @overload diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index ba4550388c25..c8767d79a765 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -25,6 +25,7 @@ from numpy._typing import ( _ArrayLikeInt_co, _DTypeLike, _NestedSequence, + _NumberLike_co, _Shape, _ShapeLike, ) @@ -84,6 +85,7 @@ __all__ = [ type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] type _MArray1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _MArray2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _IntArray = NDArray[np.intp] @@ -674,11 +676,23 @@ def notmasked_contiguous(a: ArrayLike, axis: None = None) -> list[slice[int, int def notmasked_contiguous(a: ArrayLike, axis: SupportsIndex) -> list[Incomplete]: ... # +def _ezclump(mask: np.ndarray) -> list[slice[int, int, None]]: ... # undocumented def clump_unmasked(a: np.ndarray) -> list[slice[int, int, None]]: ... +def clump_masked(a: np.ndarray) -> list[slice[int, int, None]]: ... + +# keep in sync with `lib._twodim_base_impl.vander` +@overload +def vander[ScalarT: np.number | np.object_](x: _ArrayLike[ScalarT], n: int | None = None) -> _MArray2D[ScalarT]: ... +@overload +def vander(x: _ArrayLike[np.bool] | list[int], n: int | None = None) -> _MArray2D[np.int_]: ... +@overload +def vander(x: list[float], n: int | None = None) -> _MArray2D[np.float64]: ... +@overload +def vander(x: list[complex], n: int | None = None) -> _MArray2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _MArray2D[Any]: ... # TODO: everything below # mypy: disable-error-code=no-untyped-def -def clump_masked(a): ... -def vander(x, n=None): ... def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... From b32e803117d2b7c613608d0ee7af14722ffc7b90 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 14:20:39 +0100 Subject: [PATCH 1178/1718] TYP: ``ma.extras.polyfit`` --- numpy/ma/extras.pyi | 74 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index c8767d79a765..7203ece3f86b 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -22,6 +22,7 @@ from numpy._typing import ( _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ArrayLikeInt_co, _DTypeLike, _NestedSequence, @@ -93,6 +94,10 @@ type _ScalarNumeric = np.inexact | np.timedelta64 | np.object_ type _InexactDouble = np.float64 | np.longdouble | np.complex128 | np.clongdouble type _ListSeqND[T] = list[T] | _NestedSequence[list[T]] +# helper aliases for polyfit +type _2Tup[T] = tuple[T, T] +type _5Tup[T] = tuple[T, NDArray[np.float64], NDArray[np.int32], NDArray[np.float64], NDArray[np.float64]] + # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) @@ -692,7 +697,68 @@ def vander(x: list[complex], n: int | None = None) -> _MArray2D[np.complex128]: @overload # fallback def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _MArray2D[Any]: ... -# TODO: everything below -# mypy: disable-error-code=no-untyped-def - -def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... +# keep roughly in sync with `lib._polynomial_impl.polyfit` +@overload # float dtype, cov: False (default) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[np.float64]: ... +@overload # float dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.float64]]: ... +@overload # float dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.float64]]: ... +@overload # complex dtype, cov: False (default) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False +) -> NDArray[Incomplete]: ... +@overload # complex dtype, cov: True | "unscaled" (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[np.complex128 | Any]]: ... +@overload # complex dtype, full: True (keyword) +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int, + rcond: bool | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[np.complex128 | Any]]: ... From 13dca89becbe56c31df59a3441df865444f2eb21 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 14:43:29 +0100 Subject: [PATCH 1179/1718] TYP: ``ma.extras`` review suggestions --- numpy/ma/extras.pyi | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 7203ece3f86b..f564fc13e9a0 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -85,8 +85,9 @@ __all__ = [ ] type _MArray[ScalarT: np.generic] = MaskedArray[_AnyShape, np.dtype[ScalarT]] -type _MArray1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] -type _MArray2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _MArray1D[ScalarT: np.generic] = MaskedArray[tuple[int], np.dtype[ScalarT]] +type _MArray2D[ScalarT: np.generic] = MaskedArray[tuple[int, int], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _IntArray = NDArray[np.intp] @@ -101,7 +102,7 @@ type _5Tup[T] = tuple[T, NDArray[np.float64], NDArray[np.int32], NDArray[np.floa # Explicitly set all allowed values to prevent accidental castings to # abstract dtypes (their common super-type). # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) -# which could result in, for example, `int64` and `float64`producing a +# which could result in, for example, `int64` and `float64` producing a # `number[_64Bit]` array _AnyScalarT = TypeVar( "_AnyScalarT", @@ -659,15 +660,15 @@ def ndenumerate( # @overload -def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _MArray1D[ScalarT] | None: ... +def flatnotmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT]) -> _Array1D[ScalarT] | None: ... @overload -def flatnotmasked_edges(a: ArrayLike) -> _MArray1D[Incomplete] | None: ... +def flatnotmasked_edges(a: ArrayLike) -> _Array1D[Incomplete] | None: ... # @overload -def notmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: None = None) -> _MArray1D[ScalarT] | None: ... +def notmasked_edges[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: None = None) -> _Array1D[ScalarT] | None: ... @overload -def notmasked_edges(a: ArrayLike, axis: None = None) -> _MArray1D[Incomplete] | None: ... +def notmasked_edges(a: ArrayLike, axis: None = None) -> _Array1D[Incomplete] | None: ... @overload def notmasked_edges(a: ArrayLike, axis: SupportsIndex) -> Incomplete: ... @@ -687,15 +688,15 @@ def clump_masked(a: np.ndarray) -> list[slice[int, int, None]]: ... # keep in sync with `lib._twodim_base_impl.vander` @overload -def vander[ScalarT: np.number | np.object_](x: _ArrayLike[ScalarT], n: int | None = None) -> _MArray2D[ScalarT]: ... +def vander[ScalarT: np.number | np.object_](x: _ArrayLike[ScalarT], n: int | None = None) -> _Array2D[ScalarT]: ... @overload -def vander(x: _ArrayLike[np.bool] | list[int], n: int | None = None) -> _MArray2D[np.int_]: ... +def vander(x: _ArrayLike[np.bool] | list[int], n: int | None = None) -> _Array2D[np.int_]: ... @overload -def vander(x: list[float], n: int | None = None) -> _MArray2D[np.float64]: ... +def vander(x: list[float], n: int | None = None) -> _Array2D[np.float64]: ... @overload -def vander(x: list[complex], n: int | None = None) -> _MArray2D[np.complex128]: ... +def vander(x: list[complex], n: int | None = None) -> _Array2D[np.complex128]: ... @overload # fallback -def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _MArray2D[Any]: ... +def vander(x: Sequence[_NumberLike_co], n: int | None = None) -> _Array2D[Any]: ... # keep roughly in sync with `lib._polynomial_impl.polyfit` @overload # float dtype, cov: False (default) From a2d95e690d4511d22710a9e843618d3b67f3f1a8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 15:21:09 +0100 Subject: [PATCH 1180/1718] DOC: update typing roadmap --- doc/neps/roadmap.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 01cd21158be0..b724c4b0eacf 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -110,8 +110,7 @@ User experience Type annotations ```````````````` -Type annotations for most NumPy functionality is complete (although some -submodules like ``numpy.ma`` are missing return types), so users can use tools +Type annotations for NumPy functionality is complete, so users can use tools like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating array shapes (see `gh-16544 `__), From d15ee45ccd6465e5b02d551f94556c9fcdaf5036 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 Jan 2026 15:34:53 +0100 Subject: [PATCH 1181/1718] DOC: relnotes for the ``numpy.ma`` typing improvements --- doc/release/upcoming_changes/30566.typing.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/30566.typing.rst diff --git a/doc/release/upcoming_changes/30566.typing.rst b/doc/release/upcoming_changes/30566.typing.rst new file mode 100644 index 000000000000..fd9aabf85b6f --- /dev/null +++ b/doc/release/upcoming_changes/30566.typing.rst @@ -0,0 +1,5 @@ +``numpy.ma`` typing annotations +------------------------------- +The ``numpy.ma`` module is now fully covered by typing annotations. +This includes annotations for masked arrays, masks, and various functions and methods. +With this, NumPy has achieved 100% typing coverage across all its submodules. From 3f4fb222eea25dcff0b340ae6363bd53287e9401 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 3 Jan 2026 15:47:14 +0100 Subject: [PATCH 1182/1718] DOC: apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- doc/neps/roadmap.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index b724c4b0eacf..55845b1913d4 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -110,7 +110,7 @@ User experience Type annotations ```````````````` -Type annotations for NumPy functionality is complete, so users can use tools +Type annotations for NumPy functionality are complete, so users can use tools like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating array shapes (see `gh-16544 `__), From 9526cde0ac964858dfb261f5ce25d6787500d05e Mon Sep 17 00:00:00 2001 From: Kadermiyanyedi Date: Sat, 3 Jan 2026 23:43:16 +0300 Subject: [PATCH 1183/1718] MAINT: Enable linting with ruff E501 --- numpy/_core/code_generators/generate_umath.py | 14 ++++++++++---- numpy/_core/tests/test_numerictypes.py | 18 +++++++++++------- ruff.toml | 2 -- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index f5d8530bbc58..02a3e27c99f2 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -57,7 +57,8 @@ class TypeDescription: If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y instead of PyUFunc_x_x/PyUFunc_xx_x. cfunc_alias : str or none, optional - Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. See make_arrays. + Appended to inner loop C function name, e.g., FLOAT_{cfunc_alias}. + See make_arrays. NOTE: it doesn't support 'astype' dispatch : str or None, optional Dispatch-able source name without its extension '.dispatch.c' that @@ -1483,10 +1484,15 @@ def make_arrays(funcdict): #include "{dname}.dispatch.h" """)) for (ufunc_name, func_idx, cfunc_name, inout) in funcs: - code2list.append(textwrap.dedent(f"""\ + call_text = ( + f"NPY_CPU_DISPATCH_CALL_XB(" + f"{ufunc_name}_functions[{func_idx}] = {cfunc_name});" + ) + text = f"""\ NPY_CPU_DISPATCH_TRACE("{ufunc_name}", "{''.join(inout)}"); - NPY_CPU_DISPATCH_CALL_XB({ufunc_name}_functions[{func_idx}] = {cfunc_name}); - """)) + {call_text} + """ + code2list.append(textwrap.dedent(text)) return "\n".join(code1list), "\n".join(code2list) def make_ufuncs(funcdict): diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 618e668dd53d..c47d86c39135 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -66,9 +66,9 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 + # depth1: x Info color info y z + # depth2: value y2 Info2 name z2 Name Value + # depth3: name value y3 z3 ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), @@ -543,10 +543,14 @@ def test_issctype(rep, expected): assert_equal(actual, expected) -@pytest.mark.skipif(sys.flags.optimize > 1, - reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") -@pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") +@pytest.mark.skipif( + sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1", +) +@pytest.mark.xfail( + IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready", +) class TestDocStrings: def test_platform_dependent_aliases(self): if np.int64 is np.int_: diff --git a/ruff.toml b/ruff.toml index ebbb29283622..f740fcad9ad5 100644 --- a/ruff.toml +++ b/ruff.toml @@ -86,11 +86,9 @@ ignore = [ "numpy/_core/tests/test_multiarray.py" = ["E501"] "numpy/_core/tests/test_nditer*py" = ["E501"] "numpy/_core/tests/test_umath.py" = ["E501"] -"numpy/_core/tests/test_numerictypes.py" = ["E501"] "numpy/_core/tests/test_regression.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] -"numpy/_core/code_generators/generate_umath.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] From 3edecae8762b555f9b4d9414352fd519f6f9075e Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 4 Jan 2026 15:36:37 +0100 Subject: [PATCH 1184/1718] BUG: Set correct deprecation version for ndarray.resize (#30569) --- numpy/__init__.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5fc769f7e65d..05f9f11f8c56 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2248,10 +2248,10 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def tolist(self, /) -> Any: ... @overload - @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.4") + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") def resize(self, new_shape: _ShapeLike, /, *, refcheck: py_bool = True) -> None: ... @overload - @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.4") + @deprecated("Resizing a NumPy array inplace has been deprecated in NumPy 2.5") def resize(self, /, *new_shape: SupportsIndex, refcheck: py_bool = True) -> None: ... # keep in sync with `ma.MaskedArray.squeeze` From 317113d1d35cafe7b267175981dcef37be746439 Mon Sep 17 00:00:00 2001 From: Kadermiyanyedi Date: Sun, 4 Jan 2026 18:31:15 +0300 Subject: [PATCH 1185/1718] MAINT: Enable linting with ruff E501 --- numpy/_core/tests/test_defchararray.py | 10 +- numpy/_core/tests/test_einsum.py | 29 ++-- numpy/_core/tests/test_multiarray.py | 181 +++++++++++++++++++------ numpy/_core/tests/test_regression.py | 24 +++- ruff.toml | 4 - 5 files changed, 186 insertions(+), 62 deletions(-) diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index e98632b62829..7200b9c97094 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -309,12 +309,18 @@ def test_islower(self): def test_isspace(self): A = self.A() assert_(issubclass(A.isspace().dtype.type, np.bool)) - assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) + assert_array_equal( + A.isspace(), + [[False, False], [False, False], [False, False]], + ) def test_istitle(self): A = self.A() assert_(issubclass(A.istitle().dtype.type, np.bool)) - assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) + assert_array_equal( + A.istitle(), + [[False, False], [False, False], [False, False]], + ) def test_isupper(self): A = self.A() diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 375ef03c1dd7..dd6ce7d7aea1 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -778,15 +778,21 @@ def __mul__(self, other): assert np.einsum("i,j", objMult, objMult) == 42 def test_subscript_range(self): - # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used - # when creating a subscript from arrays + # Issue #7741, make sure that all letters of Latin alphabet + # (both uppercase & lowercase) can be used when creating a subscript from arrays a = np.ones((2, 3)) b = np.ones((3, 4)) np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) - assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) - assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + assert_raises( + ValueError, + lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False), + ) + assert_raises( + ValueError, + lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False), + ) def test_einsum_broadcast(self): # Issue #2455 change in handling ellipsis @@ -802,7 +808,8 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) @@ -810,8 +817,9 @@ def test_einsum_broadcast(self): for opt in [True, False]: assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) @@ -819,16 +827,17 @@ def test_einsum_broadcast(self): ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) for opt in [True, False]: assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + # used to raise error + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) for opt in [True, False]: - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=opt), ref) # used to raise error + # used to raise error + assert_equal(np.einsum("...lmn,lmno->...o", A, B, optimize=opt), ref) def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 62f3bd4a77c4..84d28b1cd098 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1346,9 +1346,11 @@ def test_ndmax_greater_than_actual_dims(self): def test_ndmax_less_than_actual_dims(self): data = [[[1], [2]], [[3], [4]]] - with pytest.raises(ValueError, - match="setting an array element with a sequence. " - "The requested array would exceed the maximum number of dimension of 2."): + with pytest.raises( + ValueError, + match="setting an array element with a sequence. " + "The requested array would exceed the maximum number of dimension of 2.", + ): np.array(data, ndmax=2) def test_ndmax_is_zero(self): @@ -4559,9 +4561,13 @@ def test_test_zero_rank(self): class TestPickling: - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, - reason=('this tests the error messages when trying to' - 'protocol 5 although it is not available')) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL >= 5, + reason=( + "this tests the error messages when trying toprotocol 5 " + "although it is not available" + ), + ) def test_correct_protocol5_error_message(self): array = np.arange(10) @@ -4586,8 +4592,10 @@ def test_record_array_with_object_dtype(self): assert_equal(arr_without_object.dtype, depickled_arr_without_object.dtype) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, - reason="requires pickle protocol 5") + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) def test_f_contiguous_array(self): f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') buffers = [] @@ -4604,11 +4612,23 @@ def test_f_contiguous_array(self): assert_equal(f_contiguous_array, depickled_f_contiguous_array) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") - @pytest.mark.parametrize('transposed_contiguous_array', - [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), - np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + - [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) + @pytest.mark.parametrize( + "transposed_contiguous_array", + [ + np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2)), + ] + + [ + np.random.default_rng(42) + .random(np.arange(2, 7)) + .transpose(np.random.permutation(5)) + for _ in range(3) + ], + ) def test_transposed_contiguous_array(self, transposed_contiguous_array): buffers = [] # When using pickle protocol 5, arrays which can be transposed to c_contiguous @@ -4623,7 +4643,10 @@ def test_transposed_contiguous_array(self, transposed_contiguous_array): assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) - @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.skipif( + pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5", + ) def test_load_legacy_pkl_protocol5(self): # legacy byte strs are dumped in 2.2.1 c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 @@ -4648,9 +4671,14 @@ def test_non_contiguous_array(self): # using any protocol buffers = [] for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + buffer_callback = buffers.append if proto >= 5 else None depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto, - buffer_callback=buffers.append if proto >= 5 else None)) + pickle.dumps( + non_contiguous_array, + protocol=proto, + buffer_callback=buffer_callback, + ) + ) assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) @@ -4683,58 +4711,111 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_version0_int8(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + s = ( + b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" + b"ndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\n" + b"dtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff" + b"\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + ) a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", + ) def test_version0_float32(self): - s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01 Date: Sun, 4 Jan 2026 20:01:44 +0100 Subject: [PATCH 1186/1718] TYP: ``numpy.core._internal`` (deprecated) --- numpy/core/_internal.pyi | 1 + 1 file changed, 1 insertion(+) create mode 100644 numpy/core/_internal.pyi diff --git a/numpy/core/_internal.pyi b/numpy/core/_internal.pyi new file mode 100644 index 000000000000..449f01c97af7 --- /dev/null +++ b/numpy/core/_internal.pyi @@ -0,0 +1 @@ +# deprecated module From 15603c4a71e254d1a774b086e49fba0ad3bfac13 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:01:53 +0100 Subject: [PATCH 1187/1718] TYP: ``numpy.core.arrayprint`` (deprecated) --- numpy/core/arrayprint.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/arrayprint.pyi diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi new file mode 100644 index 000000000000..c4e5c5e5cc44 --- /dev/null +++ b/numpy/core/arrayprint.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.arrayprint import * +from numpy._core.arrayprint import __all__ as __all__ From c27174e5cffee40a81590eb384188bc2166c1721 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:01:59 +0100 Subject: [PATCH 1188/1718] TYP: ``numpy.core.defchararray`` (deprecated) --- numpy/core/defchararray.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/defchararray.pyi diff --git a/numpy/core/defchararray.pyi b/numpy/core/defchararray.pyi new file mode 100644 index 000000000000..4a2f369c1f7d --- /dev/null +++ b/numpy/core/defchararray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.defchararray import * +from numpy._core.defchararray import __all__ as __all__ From d30876598b236000621556f05b22f07b8d001399 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:02:09 +0100 Subject: [PATCH 1189/1718] TYP: ``numpy.core.einsumfunc`` (deprecated) --- numpy/core/einsumfunc.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/einsumfunc.pyi diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi new file mode 100644 index 000000000000..476c79bc2006 --- /dev/null +++ b/numpy/core/einsumfunc.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.einsumfunc import * +from numpy._core.einsumfunc import __all__ as __all__ From 6c56bfd86e8daa868ca12666639ee8dd7c3c940a Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:02:19 +0100 Subject: [PATCH 1190/1718] TYP: ``numpy.core.fromnumeric`` (deprecated) --- numpy/core/fromnumeric.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/fromnumeric.pyi diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi new file mode 100644 index 000000000000..8e5ac5b765f5 --- /dev/null +++ b/numpy/core/fromnumeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.fromnumeric import * +from numpy._core.fromnumeric import __all__ as __all__ From 5d0b6b7edad7908b686c95f8f62a91e0eaf33490 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:02:40 +0100 Subject: [PATCH 1191/1718] TYP: ``numpy.core.function_base`` (deprecated) --- numpy/core/function_base.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/function_base.pyi diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi new file mode 100644 index 000000000000..fa041a9d3d60 --- /dev/null +++ b/numpy/core/function_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.function_base import * +from numpy._core.function_base import __all__ as __all__ From 2c21389e1937f2b3cb38a04cf1bf4bded0de3efb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:02:46 +0100 Subject: [PATCH 1192/1718] TYP: ``numpy.core.getlimits`` (deprecated) --- numpy/core/getlimits.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/getlimits.pyi diff --git a/numpy/core/getlimits.pyi b/numpy/core/getlimits.pyi new file mode 100644 index 000000000000..91a9dec49d42 --- /dev/null +++ b/numpy/core/getlimits.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.getlimits import * +from numpy._core.getlimits import __all__ as __all__ From 704dd140802f112fbecb1753800b82307c7f8f0d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:00 +0100 Subject: [PATCH 1193/1718] TYP: ``numpy.core.multiarray`` (deprecated) --- numpy/core/multiarray.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/multiarray.pyi diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi new file mode 100644 index 000000000000..d58f20dcc4c8 --- /dev/null +++ b/numpy/core/multiarray.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.multiarray import * +from numpy._core.multiarray import __all__ as __all__ From eabc1c8200932396d44713c922e35f9088f85f2d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:09 +0100 Subject: [PATCH 1194/1718] TYP: ``numpy.core.numeric`` (deprecated) --- numpy/core/numeric.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/numeric.pyi diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi new file mode 100644 index 000000000000..dbb936364c46 --- /dev/null +++ b/numpy/core/numeric.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numeric import * +from numpy._core.numeric import __all__ as __all__ From a223f2908266e2d81c8ea77f41d8a5e49b2ffe10 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:15 +0100 Subject: [PATCH 1195/1718] TYP: ``numpy.core.numerictypes`` (deprecated) --- numpy/core/numerictypes.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/numerictypes.pyi diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi new file mode 100644 index 000000000000..5251eae02b6a --- /dev/null +++ b/numpy/core/numerictypes.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.numerictypes import * +from numpy._core.numerictypes import __all__ as __all__ From cef5e2b6c73f251969e95f18abb86e736efa0d07 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:21 +0100 Subject: [PATCH 1196/1718] TYP: ``numpy.core.records`` (deprecated) --- numpy/core/records.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/records.pyi diff --git a/numpy/core/records.pyi b/numpy/core/records.pyi new file mode 100644 index 000000000000..f6672b47ba6a --- /dev/null +++ b/numpy/core/records.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.records import * +from numpy._core.records import __all__ as __all__ From 54747100d5d75c6d810a13b6cc8dfbd905775fd6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:29 +0100 Subject: [PATCH 1197/1718] TYP: ``numpy.core.shape_base`` (deprecated) --- numpy/core/shape_base.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/shape_base.pyi diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi new file mode 100644 index 000000000000..0d4d077d7e64 --- /dev/null +++ b/numpy/core/shape_base.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.shape_base import * +from numpy._core.shape_base import __all__ as __all__ From d4ca25f63ca455a48336a6c73f8805692cb8f5fa Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:35 +0100 Subject: [PATCH 1198/1718] TYP: ``numpy.core.umath`` (deprecated) --- numpy/core/umath.pyi | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/core/umath.pyi diff --git a/numpy/core/umath.pyi b/numpy/core/umath.pyi new file mode 100644 index 000000000000..b32fc9b11d8f --- /dev/null +++ b/numpy/core/umath.pyi @@ -0,0 +1,4 @@ +# deprecated module + +from numpy._core.umath import * +from numpy._core.umath import __all__ as __all__ From 28aefdf0762e8c986bdc51c77a7473c7b6133416 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:03:47 +0100 Subject: [PATCH 1199/1718] TYP: ``numpy.core.__all__`` (deprecated) --- numpy/core/__init__.pyi | 45 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/numpy/core/__init__.pyi b/numpy/core/__init__.pyi index e69de29bb2d1..cecacb907939 100644 --- a/numpy/core/__init__.pyi +++ b/numpy/core/__init__.pyi @@ -0,0 +1,45 @@ +# deprecated module + +from types import ModuleType + +from . import ( + _dtype, + _dtype_ctypes, + _internal, + arrayprint, + defchararray, + einsumfunc, + fromnumeric, + function_base, + getlimits, + multiarray, + numeric, + numerictypes, + overrides, + records, + shape_base, + umath, +) + +__all__ = [ + "_dtype", + "_dtype_ctypes", + "_internal", + "_multiarray_umath", + "arrayprint", + "defchararray", + "einsumfunc", + "fromnumeric", + "function_base", + "getlimits", + "multiarray", + "numeric", + "numerictypes", + "overrides", + "records", + "shape_base", + "umath", +] + +# `numpy._core._multiarray_umath` has no stubs, so there's nothing to re-export +_multiarray_umath: ModuleType From 19d5a61fe7811557d959a001f34ae799b3567815 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 4 Jan 2026 20:04:06 +0100 Subject: [PATCH 1200/1718] TYP: remove unused stubtest allowlist entries --- tools/stubtest/allowlist.txt | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index e8a746c3a925..bafd2c795f88 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -16,7 +16,7 @@ numpy\._core\._simd\.\w+ # these are always either float96/complex192 or float128/complex256 numpy\.__all__ -numpy\._?core\.__all__ +numpy\._core\.__all__ numpy\._?core\.numeric\.__all__ numpy\._?core\.numerictypes\.__all__ numpy\.matlib\.__all__ @@ -43,22 +43,6 @@ numpy\.(\w+\.)*complexfloating\.__complex__ numpy\.(\w+\.)*generic\.__hash__ # intentionally missing deprecated module stubs -numpy\.core\._dtype -numpy\.core\._dtype_ctypes -numpy\.core\._internal -numpy\.core\._multiarray_umath.* -numpy\.core\.arrayprint.* -numpy\.core\.defchararray.* -numpy\.core\.einsumfunc.* -numpy\.core\.fromnumeric.* -numpy\.core\.function_base.* -numpy\.core\.getlimits.* -numpy\.core\.multiarray.* -numpy\.core\.numeric.* -numpy\.core\.overrides -numpy\.core\.records.* -numpy\.core\.shape_base.* -numpy\.core\.umath.* numpy\.typing\.mypy_plugin # false positive "... is not a Union" errors From 51057494114769333df313a171024185c7a8e382 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Sun, 4 Jan 2026 11:37:26 -0800 Subject: [PATCH 1201/1718] With sorter --- numpy/_core/src/npysort/binsearch.cpp | 72 ++++++++++++++------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index 0f8559ca5ef2..99e2d1771907 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -170,51 +170,55 @@ argbinsearch(const char *arr, const char *key, const char *sort, char *ret, { using T = typename Tag::type; auto cmp = side_to_cmp::value; - npy_intp min_idx = 0; - npy_intp max_idx = arr_len; - T last_key_val; - if (key_len == 0) { + // If the array length is 0 we return all 0s + if (arr_len <= 0) { + for (npy_intp i = 0; i < key_len; ++i) { + *(npy_intp *)(ret + i * ret_str) = 0; + } return 0; } - last_key_val = *(const T *)key; - for (; key_len > 0; key_len--, key += key_str, ret += ret_str) { - const T key_val = *(const T *)key; - /* - * Updating only one of the indices based on the previous key - * gives the search a big boost when keys are sorted, but slightly - * slows down things for purely random ones. - */ - if (cmp(last_key_val, key_val)) { - max_idx = arr_len; - } - else { - min_idx = 0; - max_idx = (max_idx < arr_len) ? (max_idx + 1) : arr_len; - } + npy_intp interval_length = arr_len; + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) - last_key_val = key_val; + npy_intp base = 0; + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; + } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); - while (min_idx < max_idx) { - const npy_intp mid_idx = min_idx + ((max_idx - min_idx) >> 1); - const npy_intp sort_idx = *(npy_intp *)(sort + mid_idx * sort_str); - T mid_val; + for (npy_intp i = 0; i < key_len; ++i) { + const T key_val = *(const T *)(key + i * key_str); + *(npy_intp *)(ret + i * ret_str) = cmp(mid_val, key_val) * half; + } - if (sort_idx < 0 || sort_idx >= arr_len) { + while (interval_length > 1) { + npy_intp half = interval_length >> 1; + interval_length -= half; // length -> ceil(length / 2) + + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + (base + half) * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { return -1; } + const T mid_val = *(const T *)(arr + mid_idx * arr_str); + const T key_val = *(const T *)(key + i * key_str); + base += cmp(mid_val, key_val) * half; + } + } - mid_val = *(const T *)(arr + sort_idx * arr_str); - - if (cmp(mid_val, key_val)) { - min_idx = mid_idx + 1; - } - else { - max_idx = mid_idx; - } + for (npy_intp i = 0; i < key_len; ++i) { + npy_intp &base = *(npy_intp *)(ret + i * ret_str); + npy_intp mid_idx = *(npy_intp *)(sort + base * sort_str); + if (mid_idx < 0 || mid_idx >= arr_len) { + return -1; } - *(npy_intp *)ret = min_idx; + const T key_val = *(const T *)(key + i * key_str); + base += cmp(*(const T *)(arr + mid_idx * arr_str), key_val); } return 0; } From 0ac9a005689369c8c305599a8d4543f27362a1e0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 5 Jan 2026 10:29:25 +0100 Subject: [PATCH 1202/1718] DOC: update security policy to be more explicit about bad assumptions (#30562) --- README.md | 2 +- doc/source/reference/security.rst | 26 +++++++++++++++++++------- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7bf1e13346ce..344631bc5601 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ NumPy is the fundamental package for scientific computing with Python. - **Source code:** https://github.com/numpy/numpy - **Contributing:** https://numpy.org/devdocs/dev/index.html - **Bug reports:** https://github.com/numpy/numpy/issues -- **Report a security vulnerability:** https://tidelift.com/docs/security +- **Report a security vulnerability:** https://github.com/numpy/numpy/security/policy (via Tidelift) It provides: diff --git a/doc/source/reference/security.rst b/doc/source/reference/security.rst index 6d8ba75b9d26..a0de8f9e0146 100644 --- a/doc/source/reference/security.rst +++ b/doc/source/reference/security.rst @@ -3,8 +3,6 @@ NumPy security Security issues can be reported privately as described in the project README and when opening a `new issue on the issue tracker `_. -The `Python security reporting guidelines `_ -are a good resource and its notes apply also to NumPy. NumPy's maintainers are not security experts. However, we are conscientious about security and experts of both the NumPy codebase and how it's used. @@ -14,17 +12,31 @@ A security advisory we are not aware of beforehand can lead to a lot of work for all involved parties. -Advice for using NumPy on untrusted data ----------------------------------------- +Important +--------- +NumPy is not designed to be exposed directly to untrusted users. A user who can freely execute NumPy (or Python) functions must be considered -to have the same privilege as the process/Python interpreter. +to have the same privileges as the process/Python interpreter. + +If one can already execute Python code, there are far worse things one can do +than use all available CPU cycles, or provoke a symptom of a bug in Code like +use-after-free or a segfault. Therefore, while such issues may be bugs, they +are not security issues. + +Before reporting a security issue, please consider and describe the attack +vector in detail - and in particular whether that attack vector assumes being +able to freely execute NumPy functions. + + +Advice for using NumPy on untrusted data +---------------------------------------- -That said, NumPy should be generally safe to use on *data* provided by +NumPy should be generally safe to use on *data* provided by unprivileged users and read through safe API functions (e.g. loaded from a text file or ``.npy`` file without pickle support). Malicious *values* or *data sizes* should never lead to privilege escalation. -Note that the above refers to array data. We do not currently consider for +Note that the above refers to *array data*. We do not currently consider for example ``f2py`` to be safe: it is typically used to compile a program that is then run. Any ``f2py`` invocation must thus use the same privilege as the later execution. From 83e2486dfa97023b1b993dc365073f30a1874c13 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Mon, 5 Jan 2026 15:03:40 +0530 Subject: [PATCH 1203/1718] ENH: enable alloc cache on free-threading builds (#30499) --- numpy/_core/code_generators/genapi.py | 2 +- numpy/_core/meson.build | 2 +- .../src/multiarray/{alloc.c => alloc.cpp} | 34 +++++++++++++++---- numpy/_core/src/multiarray/alloc.h | 7 ++++ numpy/_core/src/multiarray/multiarraymodule.h | 7 ++++ 5 files changed, 43 insertions(+), 9 deletions(-) rename numpy/_core/src/multiarray/{alloc.c => alloc.cpp} (95%) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 1087d176816b..bcd86dadfe88 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -37,7 +37,7 @@ def get_processor(): __docformat__ = 'restructuredtext' # The files under src/ that are scanned for API functions -API_FILES = [join('multiarray', 'alloc.c'), +API_FILES = [join('multiarray', 'alloc.cpp'), join('multiarray', 'abstractdtypes.c'), join('multiarray', 'arrayfunction_override.c'), join('multiarray', 'array_api_standard.c'), diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 0b3626d91965..7cab1b9fd167 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1106,7 +1106,7 @@ endif src_multiarray = multiarray_gen_headers + [ 'src/multiarray/abstractdtypes.c', - 'src/multiarray/alloc.c', + 'src/multiarray/alloc.cpp', 'src/multiarray/arrayobject.c', 'src/multiarray/array_coercion.c', 'src/multiarray/array_converter.c', diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.cpp similarity index 95% rename from numpy/_core/src/multiarray/alloc.c rename to numpy/_core/src/multiarray/alloc.cpp index 8061feed24e5..cd125c796168 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -1,5 +1,6 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +extern "C" { #define PY_SSIZE_T_CLEAN #include @@ -27,12 +28,11 @@ #endif #endif -/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN - * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + +/* Do not enable the alloc cache if ASAN or MSAN instrumentation is enabled. + * The cache makes ASAN use-after-free or MSAN * use-of-uninitialized-memory warnings less useful. */ -#ifdef Py_GIL_DISABLED -# define USE_ALLOC_CACHE 0 -#elif defined(__has_feature) +#if defined(__has_feature) # if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) # define USE_ALLOC_CACHE 0 # endif @@ -50,8 +50,26 @@ typedef struct { npy_uintp available; /* number of cached pointers */ void * ptrs[NCACHE]; } cache_bucket; -static cache_bucket datacache[NBUCKETS]; -static cache_bucket dimcache[NBUCKETS_DIM]; + +static NPY_TLS cache_bucket datacache[NBUCKETS]; +static NPY_TLS cache_bucket dimcache[NBUCKETS_DIM]; + +typedef struct cache_destructor { + ~cache_destructor() { + for (npy_uint i = 0; i < NBUCKETS; ++i) { + while (datacache[i].available > 0) { + free(datacache[i].ptrs[--datacache[i].available]); + } + } + for (npy_uint i = 0; i < NBUCKETS_DIM; ++i) { + while (dimcache[i].available > 0) { + PyArray_free(dimcache[i].ptrs[--dimcache[i].available]); + } + } + } +} cache_destructor; + +static NPY_TLS cache_destructor tls_cache_destructor; /* * This function tells whether NumPy attempts to call `madvise` with @@ -595,3 +613,5 @@ _Npy_MallocWithOverflowCheck(npy_intp size, npy_intp elsize) } return PyMem_MALLOC(total_size); } + +} /* extern "C" */ diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index bef6407a28a3..c7c0f6d2154e 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -1,6 +1,9 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ +#ifdef __cplusplus +extern "C" { +#endif #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/ndarraytypes.h" @@ -116,4 +119,8 @@ _npy_free_workspace(void *buf, void *static_buf) #define npy_free_workspace(NAME) \ _npy_free_workspace(NAME, NAME##_static) +#ifdef __cplusplus +} /* extern "C" */ +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index de234a8495d3..32decb4284fe 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ +#ifdef __cplusplus +extern "C" { +#endif + /* * A struct storing thread-unsafe global state for the _multiarray_umath * module. We should refactor so the global state is thread-safe, @@ -84,5 +88,8 @@ NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_st NPY_NO_EXPORT int get_legacy_print_mode(void); +#ifdef __cplusplus +} +#endif #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ From 3057e2dde6e075213e97b22cc79d0d283c692f9d Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Mon, 5 Jan 2026 17:26:36 +0530 Subject: [PATCH 1204/1718] DOC: Minor refine castbuf and DECREF explanation in ufuncs.rst (#30560) --- doc/ufuncs.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/ufuncs.rst b/doc/ufuncs.rst index 077195fa59b7..4344ff9ab121 100644 --- a/doc/ufuncs.rst +++ b/doc/ufuncs.rst @@ -78,10 +78,11 @@ If there are object arrays involved then loop->obj gets set to 1. Then there ar loop, then "remainder" DECREF's are needed). Outputs: - - castbuf contains a new reference as the result of the function call. This - gets converted to the type of interest and. This new reference in castbuf - will be DECREF'd by later calls to the function. Thus, only after the - inner most loop do we need to DECREF the remaining references in castbuf. + - castbuf contains a new reference as the result of the function call. + This is converted to the type of interest, and this new reference + in castbuf will be DECREF'd (its reference count decreased) by + later calls to the function. Thus, only after the innermost loop + finishes do we need to DECREF the remaining references in castbuf. 2) The loop function is of a different type: From 7cb5a0786ebd598a27afbfb11dda3dad664ad1fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 5 Jan 2026 15:43:04 +0100 Subject: [PATCH 1205/1718] TYP: move ``poly1d`` stubs from ``__init__.pyi`` to ``lib/_polynomial_impl.pyi`` --- numpy/__init__.pyi | 75 +---------------------- numpy/lib/_polynomial_impl.pyi | 109 ++++++++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 76 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 05f9f11f8c56..0f6cdef8d020 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -506,6 +506,7 @@ from numpy.lib._polynomial_impl import ( polymul, polydiv, polyval, + poly1d, polyfit, ) @@ -6166,80 +6167,6 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -class poly1d: - @property - def variable(self) -> LiteralString: ... - @property - def order(self) -> int: ... - @property - def o(self) -> int: ... - @property - def roots(self) -> NDArray[Any]: ... - @property - def r(self) -> NDArray[Any]: ... - - @property - def coeffs(self) -> NDArray[Any]: ... - @coeffs.setter - def coeffs(self, value: NDArray[Any]) -> None: ... - - @property - def c(self) -> NDArray[Any]: ... - @c.setter - def c(self, value: NDArray[Any]) -> None: ... - - @property - def coef(self) -> NDArray[Any]: ... - @coef.setter - def coef(self, value: NDArray[Any]) -> None: ... - - @property - def coefficients(self) -> NDArray[Any]: ... - @coefficients.setter - def coefficients(self, value: NDArray[Any]) -> None: ... - - __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - - @overload - def __array__(self, /, t: None = None, copy: py_bool | None = None) -> ndarray[tuple[int]]: ... - @overload - def __array__[DTypeT: dtype](self, /, t: DTypeT, copy: py_bool | None = None) -> ndarray[tuple[int], DTypeT]: ... - - @overload - def __call__(self, val: _ScalarLike_co) -> Any: ... - @overload - def __call__(self, val: poly1d) -> poly1d: ... - @overload - def __call__(self, val: ArrayLike) -> NDArray[Any]: ... - - def __init__( - self, - c_or_r: ArrayLike, - r: py_bool = False, - variable: str | None = None, - ) -> None: ... - def __len__(self) -> int: ... - def __neg__(self) -> poly1d: ... - def __pos__(self) -> poly1d: ... - def __mul__(self, other: ArrayLike, /) -> poly1d: ... - def __rmul__(self, other: ArrayLike, /) -> poly1d: ... - def __add__(self, other: ArrayLike, /) -> poly1d: ... - def __radd__(self, other: ArrayLike, /) -> poly1d: ... - def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted - def __sub__(self, other: ArrayLike, /) -> poly1d: ... - def __rsub__(self, other: ArrayLike, /) -> poly1d: ... - def __truediv__(self, other: ArrayLike, /) -> poly1d: ... - def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... - def __getitem__(self, val: int, /) -> Any: ... - def __setitem__(self, key: int, val: Any, /) -> None: ... - def __iter__(self) -> Iterator[Any]: ... - def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... - def integ( - self, - m: SupportsInt | SupportsIndex = 1, - k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, - ) -> poly1d: ... - def from_dlpack( x: _SupportsDLPack[None], /, diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 4899b868071c..a88581f2228f 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,4 +1,15 @@ -from typing import Any, Literal as L, NoReturn, SupportsIndex, SupportsInt, overload +from _typeshed import ConvertibleToInt, Incomplete +from collections.abc import Iterator +from typing import ( + Any, + ClassVar, + Literal as L, + NoReturn, + Self, + SupportsIndex, + SupportsInt, + overload, +) import numpy as np from numpy import ( @@ -9,7 +20,6 @@ from numpy import ( int32, int64, object_, - poly1d, signedinteger, unsignedinteger, ) @@ -22,6 +32,9 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, + _FloatLike_co, + _NestedSequence, + _ScalarLike_co, ) type _2Tup[T] = tuple[T, T] @@ -43,6 +56,98 @@ __all__ = [ "polyfit", ] +class poly1d: + __module__: L["numpy"] = "numpy" + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Incomplete]: ... + @property + def r(self) -> NDArray[Incomplete]: ... + + # + @property + def coeffs(self) -> NDArray[Incomplete]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coef(self) -> NDArray[Incomplete]: ... + @coef.setter + def coef(self, value: NDArray[Incomplete], /) -> None: ... + + # + @property + def coefficients(self) -> NDArray[Incomplete]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Incomplete], /) -> None: ... + + # + def __init__(self, /, c_or_r: ArrayLike, r: bool = False, variable: str | None = None) -> None: ... + + # + @overload + def __array__(self, /, t: None = None, copy: bool | None = None) -> np.ndarray[tuple[int], np.dtype[Incomplete]]: ... + @overload + def __array__[DTypeT: np.dtype](self, /, t: DTypeT, copy: bool | None = None) -> np.ndarray[tuple[int], DTypeT]: ... + + # + @overload + def __call__(self, /, val: _ScalarLike_co) -> Incomplete: ... + @overload + def __call__(self, /, val: poly1d) -> Self: ... + @overload + def __call__(self, /, val: NDArray[Incomplete] | _NestedSequence[_ScalarLike_co]) -> NDArray[Incomplete]: ... + + # + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[Incomplete]: ... + + # + def __getitem__(self, val: int, /) -> Incomplete: ... + def __setitem__(self, key: int, val: Incomplete, /) -> None: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + + # + def __add__(self, other: ArrayLike, /) -> Self: ... + def __radd__(self, other: ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: ArrayLike, /) -> Self: ... + def __rsub__(self, other: ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: ArrayLike, /) -> Self: ... + def __rmul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, val: _FloatLike_co, /) -> Self: ... # Integral floats are accepted + + # + def __truediv__(self, other: ArrayLike, /) -> Self: ... + def __rtruediv__(self, other: ArrayLike, /) -> Self: ... + + # + def deriv(self, /, m: ConvertibleToInt = 1) -> Self: ... + def integ(self, /, m: ConvertibleToInt = 1, k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0) -> poly1d: ... + +# def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... # Returns either a float or complex array depending on the input values. From be4e3cb385e68c0b544f526dc8abc4573e5b444c Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 5 Jan 2026 17:24:47 +0100 Subject: [PATCH 1206/1718] TYP: move ``busdaycalendar`` stubs from ``__init__.pyi`` to ``_core/multiarray.pyi`` --- numpy/__init__.pyi | 14 +------------- numpy/_core/multiarray.pyi | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 05f9f11f8c56..37515e8fb26f 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -316,6 +316,7 @@ from numpy._core.multiarray import ( ascontiguousarray, asfortranarray, arange, + busdaycalendar, busday_count, busday_offset, datetime_as_string, @@ -6063,19 +6064,6 @@ class broadcast: def __iter__(self) -> Self: ... def reset(self) -> None: ... -@final -class busdaycalendar: - def __init__( - self, - /, - weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", - holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, - ) -> None: ... - @property - def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... - @property - def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... - @final class nditer: @overload diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 029494c2e4e7..33452492b001 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -25,7 +25,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 _OrderKACF, _SupportsFileMethods, broadcast, - busdaycalendar, complexfloating, correlate, count_nonzero, @@ -1129,6 +1128,23 @@ def arange( # def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... +# +@final +class busdaycalendar: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + + def __init__( + self, + /, + weekmask: str | Sequence[_IntLike_co] | _SupportsArray[NDArray[np.bool | np.integer]] = "1111100", + holidays: Sequence[dt.date | np.datetime64[dt.date]] | _SupportsArray[NDArray[np.datetime64[dt.date]]] | None = None, + ) -> None: ... + @property + def weekmask(self) -> _Array1D[np.bool]: ... + @property + def holidays(self) -> _Array1D[np.datetime64[dt.date]]: ... + +# @overload def busday_count( begindates: _ScalarLike_co | dt.date, From 455d4d0aba1f14128009a49113213549ca4c853a Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 5 Jan 2026 22:10:07 +0100 Subject: [PATCH 1207/1718] TYP: gradual generic scalar item type parameter defaults --- numpy/__init__.pyi | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 37515e8fb26f..b542a4a6f75e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -714,15 +714,10 @@ _NBitT2 = TypeVar("_NBitT2", bound=NBitBase, default=_NBitT1) # pyright: ignore _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) _BoolItemT_co = TypeVar("_BoolItemT_co", bound=py_bool, default=py_bool, covariant=True) -_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) -_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) -_FlexibleItemT_co = TypeVar( - "_FlexibleItemT_co", - bound=_CharLike_co | tuple[Any, ...], - default=_CharLike_co | tuple[Any, ...], - covariant=True, -) -_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=Any, covariant=True) # either int, float, or complex +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=Any, covariant=True) # either float or complex +_FlexibleItemT_co = TypeVar("_FlexibleItemT_co", bound=bytes | str | tuple[Any, ...], default=Any, covariant=True) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=bytes | str, default=Any, covariant=True) _TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=_TD64Item, default=Any, covariant=True) _DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=_DT64Item, default=Any, covariant=True) From 2a94668d377a6ab1d1fc5dde5a6f2f8571430a3e Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Mon, 5 Jan 2026 13:37:01 -0800 Subject: [PATCH 1208/1718] Adding sorter benchmarks --- benchmarks/benchmarks/bench_searchsorted.py | 30 ++++++++++++--------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py index d56fd6f1142a..fe1dcd9d3169 100644 --- a/benchmarks/benchmarks/bench_searchsorted.py +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -4,28 +4,32 @@ class SearchSortedInt64(Benchmark): - # Benchmark for np.searchsorted with int64 arrays params = [ - # 1B u64 is 8gb - [100, 10_000, 1_000_000, 1_000_000_000], # array sizes - [1, 2, 100, 100_000], # number of query elements - ['ordered', 'random'], # query order - [42, 18122022], # seed + [100, 10_000, 1_000_000, 100_000_000], # array sizes + [1, 10, 100_000], # number of query elements + ['ordered', 'random'], # query order + [False, True], # use sorted + [42, 18122022], # seed ] - param_names = ['array_size', 'n_queries', 'query_order', 'seed'] + param_names = ['array_size', 'n_queries', 'query_order', 'use_sorter', 'seed'] - def setup(self, array_size, n_queries, query_order, seed): - self.arr = np.arange(array_size, dtype=np.int64) + def setup(self, array_size, n_queries, query_order, use_sorter, seed): + self.arr = np.arange(array_size, dtype=np.int32) rng = np.random.default_rng(seed) low = -array_size // 10 high = array_size + array_size // 10 - self.queries = rng.integers(low, high, size=n_queries, dtype=np.int64) - # Generate queries + self.queries = rng.integers(low, high, size=n_queries, dtype=np.int32) if query_order == 'ordered': self.queries.sort() - def time_searchsorted(self, array_size, n_queries, query_order, seed): - np.searchsorted(self.arr, self.queries) + if use_sorter: + rng.shuffle(self.arr) + self.sorter = self.arr.argsort() + else: + self.sorter = None + + def time_searchsorted(self, array_size, n_queries, query_order, use_sorter, seed): + np.searchsorted(self.arr, self.queries, sorter=self.sorter) From 45f62b648321afabf8378093cc4d8c3f8950c426 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 5 Jan 2026 17:52:18 +0100 Subject: [PATCH 1209/1718] TYP: move ``flatiter`` stubs from ``__init__.pyi`` to ``_core/multiarray.pyi`` --- numpy/__init__.pyi | 59 +-------------------------------- numpy/_core/multiarray.pyi | 68 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 60 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2c0d3d524973..3906c74c851c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -328,6 +328,7 @@ from numpy._core.multiarray import ( promote_types, fromstring, frompyfunc, + flatiter, nested_iters, flagsobj, ) @@ -1598,64 +1599,6 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def type(self) -> type[_ScalarT_co]: ... -@final -class flatiter(Generic[_ArrayT_co]): - __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - - @property - def base(self, /) -> _ArrayT_co: ... - @property - def coords[ShapeT: _Shape](self: flatiter[ndarray[ShapeT]], /) -> ShapeT: ... - @property - def index(self, /) -> int: ... - - # iteration - def __len__(self, /) -> int: ... - def __iter__(self, /) -> Self: ... - def __next__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... - - # indexing - @overload # nd: _[()] - def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... - @overload # 0d; _[] - def __getitem__[ScalarT: generic](self: flatiter[NDArray[ScalarT]], key: int | integer, /) -> ScalarT: ... - @overload # 1d; _[[*]], _[:], _[...] - def __getitem__[DTypeT: dtype]( - self: flatiter[ndarray[Any, DTypeT]], - key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], - /, - ) -> ndarray[tuple[int], DTypeT]: ... - @overload # 2d; _[[*[*]]] - def __getitem__[DTypeT: dtype]( - self: flatiter[ndarray[Any, DTypeT]], - key: list[list[int]], - /, - ) -> ndarray[tuple[int, int], DTypeT]: ... - @overload # ?d - def __getitem__[DTypeT: dtype]( - self: flatiter[ndarray[Any, DTypeT]], - key: NDArray[integer] | _NestedSequence[int], - /, - ) -> ndarray[_AnyShape, DTypeT]: ... - - # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any - # type accepted by the relevant underlying `np.generic` constructor, which isn't - # known statically. So we cannot meaningfully annotate the value parameter. - def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... - - # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to - # avoid confusion - def __array__[DTypeT: dtype]( - self: flatiter[ndarray[Any, DTypeT]], - dtype: None = None, - /, - *, - copy: None = None, - ) -> ndarray[tuple[int], DTypeT]: ... - - # This returns a flat copy of the underlying array, not of the iterator itself - def copy[DTypeT: dtype](self: flatiter[ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... - @type_check_only class _ArrayOrScalarCommon: @property diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 33452492b001..4ddfbbe36087 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,18 +1,21 @@ import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Buffer, Callable, Iterable, Sequence +from types import EllipsisType from typing import ( Any, ClassVar, Final, + Generic, Literal as L, Protocol, + Self, SupportsIndex, final, overload, type_check_only, ) -from typing_extensions import CapsuleType +from typing_extensions import CapsuleType, TypeVar import numpy as np from numpy import ( # type: ignore[attr-defined] # Python >=3.12 @@ -31,7 +34,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 datetime64, dtype, einsum as c_einsum, - flatiter, float64, floating, from_dlpack, @@ -60,6 +62,7 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeDT64_co, _ArrayLikeFloat_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeStr_co, @@ -176,6 +179,8 @@ __all__ = [ "zeros", ] +_ArrayT_co = TypeVar("_ArrayT_co", bound=np.ndarray, default=np.ndarray, covariant=True) + type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] @@ -1387,6 +1392,65 @@ class flagsobj: def __getitem__(self, key: _GetItemKeys) -> bool: ... def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... +@final +class flatiter(Generic[_ArrayT_co]): + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def base(self, /) -> _ArrayT_co: ... + @property + def coords[ShapeT: _Shape](self: flatiter[np.ndarray[ShapeT]], /) -> ShapeT: ... + @property + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], /) -> ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__[ScalarT: np.generic](self: flatiter[NDArray[ScalarT]], key: int | np.integer, /) -> ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[np.integer]], + /, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # ?d + def __getitem__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + key: NDArray[np.integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__[DTypeT: dtype]( + self: flatiter[np.ndarray[Any, DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy[DTypeT: dtype](self: flatiter[np.ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... + def nested_iters( op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], From 715a579e62b8c8b668c8180eed004e8ba160a546 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 5 Jan 2026 20:27:16 +0100 Subject: [PATCH 1210/1718] TYP: move ``nditer`` stubs from ``__init__.pyi`` to ``_core/multiarray.pyi`` --- numpy/__init__.pyi | 91 +-------------------------------- numpy/_core/multiarray.pyi | 101 ++++++++++++++++++++++++++++++++++++- 2 files changed, 100 insertions(+), 92 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 3906c74c851c..064f01f3e708 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -329,6 +329,7 @@ from numpy._core.multiarray import ( fromstring, frompyfunc, flatiter, + nditer, nested_iters, flagsobj, ) @@ -6008,96 +6009,6 @@ class broadcast: def __iter__(self) -> Self: ... def reset(self) -> None: ... -@final -class nditer: - @overload - def __init__( - self, - /, - op: ArrayLike, - flags: Sequence[_NDIterFlagsKind] | None = None, - op_flags: Sequence[_NDIterFlagsOp] | None = None, - op_dtypes: DTypeLike | None = None, - order: _OrderKACF = "K", - casting: _CastingKind = "safe", - op_axes: Sequence[SupportsIndex] | None = None, - itershape: _ShapeLike | None = None, - buffersize: SupportsIndex = 0, - ) -> None: ... - @overload - def __init__( - self, - /, - op: Sequence[ArrayLike | None], - flags: Sequence[_NDIterFlagsKind] | None = None, - op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, - op_dtypes: Sequence[DTypeLike | None] | None = None, - order: _OrderKACF = "K", - casting: _CastingKind = "safe", - op_axes: Sequence[Sequence[SupportsIndex]] | None = None, - itershape: _ShapeLike | None = None, - buffersize: SupportsIndex = 0, - ) -> None: ... - - def __enter__(self) -> nditer: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - traceback: TracebackType | None, - ) -> None: ... - def __iter__(self) -> nditer: ... - def __next__(self) -> tuple[NDArray[Any], ...]: ... - def __len__(self) -> int: ... - def __copy__(self) -> nditer: ... - @overload - def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... - @overload - def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... - def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... - def close(self) -> None: ... - def copy(self) -> nditer: ... - def debug_print(self) -> None: ... - def enable_external_loop(self) -> None: ... - def iternext(self) -> py_bool: ... - def remove_axis(self, i: SupportsIndex, /) -> None: ... - def remove_multi_index(self) -> None: ... - def reset(self) -> None: ... - @property - def dtypes(self) -> tuple[dtype, ...]: ... - @property - def finished(self) -> py_bool: ... - @property - def has_delayed_bufalloc(self) -> py_bool: ... - @property - def has_index(self) -> py_bool: ... - @property - def has_multi_index(self) -> py_bool: ... - @property - def index(self) -> int: ... - @property - def iterationneedsapi(self) -> py_bool: ... - @property - def iterindex(self) -> int: ... - @property - def iterrange(self) -> tuple[int, ...]: ... - @property - def itersize(self) -> int: ... - @property - def itviews(self) -> tuple[NDArray[Any], ...]: ... - @property - def multi_index(self) -> tuple[int, ...]: ... - @property - def ndim(self) -> int: ... - @property - def nop(self) -> int: ... - @property - def operands(self) -> tuple[NDArray[Any], ...]: ... - @property - def shape(self) -> tuple[int, ...]: ... - @property - def value(self) -> tuple[NDArray[Any], ...]: ... - def from_dlpack( x: _SupportsDLPack[None], /, diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 4ddfbbe36087..8e41ac23579b 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,7 +1,7 @@ import datetime as dt from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Buffer, Callable, Iterable, Sequence -from types import EllipsisType +from types import EllipsisType, TracebackType from typing import ( Any, ClassVar, @@ -42,7 +42,6 @@ from numpy import ( # type: ignore[attr-defined] # Python >=3.12 intp, matmul, ndarray, - nditer, signedinteger, str_, timedelta64, @@ -1451,6 +1450,104 @@ class flatiter(Generic[_ArrayT_co]): # This returns a flat copy of the underlying array, not of the iterator itself def copy[DTypeT: dtype](self: flatiter[np.ndarray[Any, DTypeT]], /) -> ndarray[tuple[int], DTypeT]: ... +@final +class nditer: + __module__: ClassVar[L["numpy"]] = "numpy" # type: ignore[misc] # pyright: ignore[reportIncompatibleVariableOverride] + + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + + # + def __enter__(self, /) -> nditer: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ... + + # + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Incomplete], ...]: ... + def __len__(self) -> int: ... + + # + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Incomplete], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + + # + def __copy__(self) -> Self: ... + def copy(self) -> Self: ... + + # + def close(self) -> None: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + + # + @property + def dtypes(self) -> tuple[np.dtype[Incomplete], ...]: ... + @property + def finished(self) -> bool: ... + @property + def has_delayed_bufalloc(self) -> bool: ... + @property + def has_index(self) -> bool: ... + @property + def has_multi_index(self) -> bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Incomplete], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Incomplete], ...]: ... + def nested_iters( op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], From 7f804860112f31d92771c97201246b9c425f4b79 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 6 Jan 2026 02:45:51 -0700 Subject: [PATCH 1211/1718] MAINT: avoid passing ints to random functions that take double (#30580) This avoids type unsafe function calls in cython random. --- numpy/random/_common.pxd | 5 ++++- numpy/random/_common.pyx | 7 ++++--- numpy/random/_generator.pyx | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd index 0de4456d778f..7b6ae56bfe12 100644 --- a/numpy/random/_common.pxd +++ b/numpy/random/_common.pxd @@ -26,12 +26,15 @@ cdef enum ConstraintType: LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG ctypedef ConstraintType constraint_type +ctypedef fused double_or_int64: + double + int64_t cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) cdef object prepare_cffi(bitgen_t *bitgen) cdef object prepare_ctypes(bitgen_t *bitgen) -cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 cdef extern from "include/aligned_malloc.h": diff --git a/numpy/random/_common.pyx b/numpy/random/_common.pyx index 1fc2f7a02e11..22e0b028e703 100644 --- a/numpy/random/_common.pyx +++ b/numpy/random/_common.pyx @@ -425,12 +425,13 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con return 0 -cdef int check_constraint(double val, object name, constraint_type cons) except -1: +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: - if not isnan(val) and signbit(val): + if ((double_or_int64 is double and not isnan(val) and signbit(val)) or + (double_or_int64 is int64_t and val < 0)): raise ValueError(f"{name} < 0") elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN: - if cons == CONS_POSITIVE_NOT_NAN and isnan(val): + if cons == CONS_POSITIVE_NOT_NAN and double_or_int64 is double and isnan(val): raise ValueError(f"{name} must not be NaN") elif val <= 0: raise ValueError(f"{name} <= 0") diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 41f6673623f6..27cf0e16e70f 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -955,7 +955,7 @@ cdef class Generator: cutoff = 20 if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)): # Tail shuffle size elements - idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64) + idx = np.arange(0, pop_size_i, dtype=np.int64) idx_data = (idx).data with self.lock, nogil: _shuffle_int(&self._bitgen, pop_size_i, From 94d9ae22aebb0970ea5c4b5159c44e3f7292ab2c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 6 Jan 2026 11:51:12 +0100 Subject: [PATCH 1212/1718] MAINT: Towards removing `SeedSequence` (#30584) --- numpy/random/bit_generator.pxd | 5 ----- numpy/random/bit_generator.pyx | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd index 4ba18f17ecb2..dbaab4721fec 100644 --- a/numpy/random/bit_generator.pxd +++ b/numpy/random/bit_generator.pxd @@ -33,8 +33,3 @@ cdef class SeedSequence(): cdef class SeedlessSeedSequence: pass - -# NOTE: This has no implementation and should not be used. It purely exists for -# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. -cdef class SeedlessSequence: - pass diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 01b35a7a621a..919fccdd7bb6 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -709,3 +709,8 @@ cdef class BitGenerator: if self._cffi is None: self._cffi = prepare_cffi(&self._bitgen) return self._cffi + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: + pass From ada8e4da0d0d249df048f7922ce78402432a3cc0 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Tue, 6 Jan 2026 03:39:20 -0800 Subject: [PATCH 1213/1718] Fix benchmark name --- benchmarks/benchmarks/bench_searchsorted.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py index fe1dcd9d3169..720c5ac7a597 100644 --- a/benchmarks/benchmarks/bench_searchsorted.py +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -3,7 +3,7 @@ from .common import Benchmark -class SearchSortedInt64(Benchmark): +class SearchSorted(Benchmark): params = [ [100, 10_000, 1_000_000, 100_000_000], # array sizes [1, 10, 100_000], # number of query elements From 9d48ad437b2d3585040fc370ce8bf442422b12ac Mon Sep 17 00:00:00 2001 From: Alexander Shadchin Date: Tue, 6 Jan 2026 14:41:22 +0300 Subject: [PATCH 1214/1718] BLD: Avoiding conflict with pygit2 for static build (#30585) Need to use `static` for local functions and struct definitions. --- numpy/_core/src/umath/_rational_tests.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index d257bc22d051..c00c8468b651 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1097,7 +1097,7 @@ rational_ufunc_test_add_rationals(char** args, npy_intp const *dimensions, } -PyMethodDef module_methods[] = { +static PyMethodDef module_methods[] = { {0} /* sentinel */ }; From 6804472730b00dcd0eb0b0096dd53f6adc6c473f Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 6 Jan 2026 18:21:49 +0530 Subject: [PATCH 1215/1718] MAINT: cleanup global state of multiarray (#30573) This PR cleans up the global state in multiarray, the imports are cached in npy_runtime_imports_struct so the PyObjects fields in npy_thread_unsafe_state are redundant. Also renames it to npy_global_state as the leftover state is thread safe and initialized on module import. Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/alloc.cpp | 8 +-- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 22 +++---- numpy/_core/src/multiarray/multiarraymodule.h | 65 +++---------------- numpy/_core/src/umath/_scaled_float_dtype.c | 4 +- 5 files changed, 27 insertions(+), 74 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.cpp index cd125c796168..8684cb1b71b7 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -82,7 +82,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (npy_thread_unsafe_state.madvise_hugepage) { + if (npy_global_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -100,12 +100,12 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = npy_thread_unsafe_state.madvise_hugepage; + int was_enabled = npy_global_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - npy_thread_unsafe_state.madvise_hugepage = enabled; + npy_global_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -118,7 +118,7 @@ indicate_hugepages(void *p, size_t size) { #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ if (NPY_UNLIKELY(size >= ((1u<<22u))) && - npy_thread_unsafe_state.madvise_hugepage) { + npy_global_state.madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = size - offset; /** diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 6f520fd6abbb..4156bd4179cf 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -427,7 +427,7 @@ array_dealloc(PyArrayObject *self) } } if (fa->mem_handler == NULL) { - if (npy_thread_unsafe_state.warn_if_no_mem_policy) { + if (npy_global_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8bede253a22f..e775760617e5 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4383,7 +4383,7 @@ _populate_finfo_constants(PyObject *NPY_UNUSED(self), PyObject *args) buffer_data = PyArray_BYTES(buffer_array); npy_intp elsize = PyArray_DESCR(buffer_array)->elsize; - for (int i = 0; i < n_finfo_constants; i++) + for (int i = 0; i < n_finfo_constants; i++) { PyObject *value_obj; if (!finfo_constants[i].is_int) { @@ -4436,8 +4436,8 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) if (res < 0) { return NULL; } - int old_value = npy_thread_unsafe_state.warn_if_no_mem_policy; - npy_thread_unsafe_state.warn_if_no_mem_policy = res; + int old_value = npy_global_state.warn_if_no_mem_policy; + npy_global_state.warn_if_no_mem_policy = res; if (old_value) { Py_RETURN_TRUE; } @@ -4483,11 +4483,11 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - npy_thread_unsafe_state.reload_guard_initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } #endif - if (npy_thread_unsafe_state.reload_guard_initialized) { + if (npy_global_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4495,7 +4495,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - npy_thread_unsafe_state.reload_guard_initialized = 1; + npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4886,16 +4886,16 @@ set_flaginfo(PyObject *d) } // static variables are automatically zero-initialized -NPY_VISIBILITY_HIDDEN npy_thread_unsafe_state_struct npy_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN npy_global_state_struct npy_global_state; static int -initialize_thread_unsafe_state(void) { +initialize_global_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { - npy_thread_unsafe_state.warn_if_no_mem_policy = 1; + npy_global_state.warn_if_no_mem_policy = 1; } else { - npy_thread_unsafe_state.warn_if_no_mem_policy = 0; + npy_global_state.warn_if_no_mem_policy = 0; } return 0; @@ -4954,7 +4954,7 @@ _multiarray_umath_exec(PyObject *m) { return -1; } - if (initialize_thread_unsafe_state() < 0) { + if (initialize_global_state() < 0) { return -1; } diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 32decb4284fe..4ce211f4339b 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -6,60 +6,14 @@ extern "C" { #endif /* - * A struct storing thread-unsafe global state for the _multiarray_umath - * module. We should refactor so the global state is thread-safe, - * e.g. by adding locking. + * A struct storing global state for the _multiarray_umath + * module. The state is initialized when the module is imported + * so no locking is necessary to access it. + * + * These globals will need to move to per-module state to + * support reloading or subinterpreters. */ -typedef struct npy_thread_unsafe_state_struct { - /* - * Cached references to objects obtained via an import. All of these are - * can be initialized at any time by npy_cache_import. - * - * Currently these are not initialized in a thread-safe manner but the - * failure mode is a reference leak for references to imported immortal - * modules so it will never lead to a crash unless users are doing something - * janky that we don't support like reloading. - * - * TODO: maybe make each entry a struct that looks like: - * - * struct { - * atomic_int initialized; - * PyObject *value; - * } - * - * so the initialization is thread-safe and the only possible lock - * contention happens before the cache is initialized, not on every single - * read. - */ - PyObject *_add_dtype_helper; - PyObject *_all; - PyObject *_amax; - PyObject *_amin; - PyObject *_any; - PyObject *array_function_errmsg_formatter; - PyObject *array_ufunc_errmsg_formatter; - PyObject *_clip; - PyObject *_commastring; - PyObject *_convert_to_stringdtype_kwargs; - PyObject *_default_array_repr; - PyObject *_default_array_str; - PyObject *_dump; - PyObject *_dumps; - PyObject *_getfield_is_safe; - PyObject *internal_gcd_func; - PyObject *_mean; - PyObject *NO_NEP50_WARNING; - PyObject *npy_ctypes_check; - PyObject *numpy_matrix; - PyObject *_prod; - PyObject *_promote_fields; - PyObject *_std; - PyObject *_sum; - PyObject *_ufunc_doc_signature_formatter; - PyObject *_var; - PyObject *_view_is_safe; - PyObject *_void_scalar_to_string; - +typedef struct npy_global_state_struct { /* * Used to test the internal-only scaled float test dtype */ @@ -80,11 +34,10 @@ typedef struct npy_thread_unsafe_state_struct { * if there is no memory policy set */ int warn_if_no_mem_policy; - -} npy_thread_unsafe_state_struct; +} npy_global_state_struct; -NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; +NPY_VISIBILITY_HIDDEN extern npy_global_state_struct npy_global_state; NPY_NO_EXPORT int get_legacy_print_mode(void); diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 020e903b5fc8..9bf318d97b10 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -1077,7 +1077,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - if (npy_thread_unsafe_state.get_sfloat_dtype_initialized) { + if (npy_global_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -1106,6 +1106,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; + npy_global_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } From efc4e78cb1407b800763533f6b70c450333ff925 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:18:44 +0000 Subject: [PATCH 1216/1718] update --- .github/workflows/mypy.yml | 2 +- numpy/typing/mypy_plugin.py | 4 ++-- ...pyright_cov.py => pyright_completeness.py} | 19 ++++++++----------- 3 files changed, 11 insertions(+), 14 deletions(-) rename tools/{pyright_cov.py => pyright_completeness.py} (81%) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 95aed336ed97..3a477b25e76f 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -86,4 +86,4 @@ jobs: # Pyright reports different percentages on different platforms if: runner.os == 'Linux' run: | - spin run python tools/pyright_cov.py --verifytypes numpy --ignoreexternal --fail-under 80 --exclude-like '*.tests.*' + spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal --exclude-like '*.tests.*' '*.conftest.*' diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 21aca2bc69ef..04014a9e867b 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -108,10 +108,10 @@ def _get_c_intp_name() -> str: from mypy.nodes import ImportFrom, MypyFile, Statement from mypy.plugin import AnalyzeTypeContext, Plugin -except ModuleNotFoundError as e: +except ModuleNotFoundError as _exc: def plugin(version: str) -> type: - raise e + raise _exc else: diff --git a/tools/pyright_cov.py b/tools/pyright_completeness.py similarity index 81% rename from tools/pyright_cov.py rename to tools/pyright_completeness.py index c38d65fff7e8..a19d3e7427c0 100644 --- a/tools/pyright_cov.py +++ b/tools/pyright_completeness.py @@ -4,14 +4,11 @@ Example usage: - spin run python tools/pyright_cov.py --verifytypes numpy --ignoreexternal \ - --fail-under 80 --exclude-like '*.tests.*' + spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal \ + --fail-under 80 --exclude-like '*.tests.*' '*.conftest.*' We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib `numbers` module, see https://github.com/microsoft/pyright/discussions/9911. - -It might be possible to replace this with `basedpyright` -https://github.com/DetachHead/basedpyright/issues/125 in the future. """ from __future__ import annotations @@ -34,6 +31,7 @@ def main(argv: Sequence[str] | None = None) -> int: parser.add_argument( "--exclude-like", required=False, + nargs='*', type=str, help="Exclude symbols whose names matches this glob pattern", ) @@ -47,7 +45,7 @@ def main(argv: Sequence[str] | None = None) -> int: def run_pyright_with_coverage( pyright_args: list[str], cov_fail_under: float, - exclude_like: str | None, + exclude_like: Sequence[str], ) -> int: result = subprocess.run( ["pyright", *pyright_args], capture_output=True, text=True @@ -60,10 +58,10 @@ def run_pyright_with_coverage( sys.stderr.write(result.stderr) return 1 - if exclude_like is not None: + if exclude_like: symbols = data["typeCompleteness"]["symbols"] matched_symbols = [ - x for x in symbols if not fnmatch.fnmatch(x["name"], exclude_like) + x for x in symbols if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like) and x['isExported'] ] cov_percent = ( @@ -73,16 +71,15 @@ def run_pyright_with_coverage( cov_percent = data["typeCompleteness"]["completenessScore"] * 100 sys.stderr.write(result.stderr) - sys.stdout.write(result.stdout) if cov_percent < cov_fail_under: sys.stdout.write( f"Coverage {cov_percent:.1f}% is below minimum required " - f"{cov_fail_under:.1f}%" + f"{cov_fail_under:.1f}%\n" ) return 1 sys.stdout.write( f"Coverage {cov_percent:.1f}% is at or above minimum required " - f"{cov_fail_under:.1f}%" + f"{cov_fail_under:.1f}%\n" ) return 0 From 170d1af958a5fdbba2c4cafc57f0d6b2743ddfe7 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:19:49 +0000 Subject: [PATCH 1217/1718] post-merge fixup --- tools/pyright_completeness.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index a19d3e7427c0..29db4d68b46e 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -10,6 +10,7 @@ We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib `numbers` module, see https://github.com/microsoft/pyright/discussions/9911. """ + from __future__ import annotations import argparse @@ -31,7 +32,7 @@ def main(argv: Sequence[str] | None = None) -> int: parser.add_argument( "--exclude-like", required=False, - nargs='*', + nargs="*", type=str, help="Exclude symbols whose names matches this glob pattern", ) @@ -47,9 +48,7 @@ def run_pyright_with_coverage( cov_fail_under: float, exclude_like: Sequence[str], ) -> int: - result = subprocess.run( - ["pyright", *pyright_args], capture_output=True, text=True - ) + result = subprocess.run(["pyright", *pyright_args], capture_output=True, text=True) try: data = json.loads(result.stdout) @@ -61,8 +60,10 @@ def run_pyright_with_coverage( if exclude_like: symbols = data["typeCompleteness"]["symbols"] matched_symbols = [ - x for x in symbols if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like) - and x['isExported'] + x + for x in symbols + if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like) + and x["isExported"] ] cov_percent = ( sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols) * 100 From b64172347a16e7013be2eecc7ce4c592f206742b Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:21:34 +0000 Subject: [PATCH 1218/1718] typo, remove unnecessary future annotations import --- .github/workflows/mypy.yml | 2 +- tools/pyright_completeness.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 3a477b25e76f..f11e15c5ee2b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -82,7 +82,7 @@ jobs: - name: Run Mypy run: | spin mypy - - name: Check Pyright's type compelteness is above 80% + - name: Check Pyright's type completeness is above 80% # Pyright reports different percentages on different platforms if: runner.os == 'Linux' run: | diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index 29db4d68b46e..a6640aa740ad 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -10,9 +10,6 @@ We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib `numbers` module, see https://github.com/microsoft/pyright/discussions/9911. """ - -from __future__ import annotations - import argparse import fnmatch import json From 5444684de716fe7dacab0fb8f41f6e99e807e132 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 Jan 2026 16:04:04 +0000 Subject: [PATCH 1219/1718] typo, try windows again --- .github/workflows/mypy.yml | 4 +--- tools/pyright_completeness.py | 19 +++++-------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index f11e15c5ee2b..4c489e7fd49f 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -82,8 +82,6 @@ jobs: - name: Run Mypy run: | spin mypy - - name: Check Pyright's type completeness is above 80% - # Pyright reports different percentages on different platforms - if: runner.os == 'Linux' + - name: Check Pyright's type completeness is at least 100% run: | spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal --exclude-like '*.tests.*' '*.conftest.*' diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index a6640aa740ad..a921abcc1e6f 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -5,7 +5,7 @@ Example usage: spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal \ - --fail-under 80 --exclude-like '*.tests.*' '*.conftest.*' + --exclude-like '*.tests.*' '*.conftest.*' We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib `numbers` module, see https://github.com/microsoft/pyright/discussions/9911. @@ -20,12 +20,6 @@ def main(argv: Sequence[str] | None = None) -> int: parser = argparse.ArgumentParser() - parser.add_argument( - "--fail-under", - type=float, - default=100.0, - help="Fail if coverage is below this percentage", - ) parser.add_argument( "--exclude-like", required=False, @@ -37,12 +31,11 @@ def main(argv: Sequence[str] | None = None) -> int: pyright_args = list(unknownargs) if "--outputjson" not in pyright_args: pyright_args.append("--outputjson") - return run_pyright_with_coverage(pyright_args, args.fail_under, args.exclude_like) + return run_pyright_with_coverage(pyright_args, args.exclude_like) def run_pyright_with_coverage( pyright_args: list[str], - cov_fail_under: float, exclude_like: Sequence[str], ) -> int: result = subprocess.run(["pyright", *pyright_args], capture_output=True, text=True) @@ -69,15 +62,13 @@ def run_pyright_with_coverage( cov_percent = data["typeCompleteness"]["completenessScore"] * 100 sys.stderr.write(result.stderr) - if cov_percent < cov_fail_under: + if cov_percent < 100: sys.stdout.write( - f"Coverage {cov_percent:.1f}% is below minimum required " - f"{cov_fail_under:.1f}%\n" + f"Coverage {cov_percent:.1f}% is below minimum required 100%" ) return 1 sys.stdout.write( - f"Coverage {cov_percent:.1f}% is at or above minimum required " - f"{cov_fail_under:.1f}%\n" + f"Coverage {cov_percent:.1f}% is at or above minimum required 100%" ) return 0 From ead9d8554a25015e4b99e5cd90516ddf29524f61 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jan 2026 17:07:03 +0000 Subject: [PATCH 1220/1718] MAINT: Bump pypa/cibuildwheel from 3.3.0 to 3.3.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.3.0 to 3.3.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/63fd63b352a9a8bdcc24791c9dbee952ee9a8abc...298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.3.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 4684537a9e8f..d1c730f5a732 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -36,7 +36,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 + - uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index f9882f6fcbc3..bb15b79da4e8 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -99,7 +99,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@63fd63b352a9a8bdcc24791c9dbee952ee9a8abc # v3.3.0 + uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 472373d61dc4fd83f1d6efe5e2ef0613d0cf1204 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 6 Jan 2026 22:51:54 +0530 Subject: [PATCH 1221/1718] TST: expose identity hashtable as capsule for easier testing (#30575) --- .../src/multiarray/_multiarray_tests.c.src | 164 +++++++++++------- numpy/_core/tests/test_hashtable.py | 55 +++--- 2 files changed, 139 insertions(+), 80 deletions(-) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index b79908e1d5e4..6f6d4bf8940d 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -923,89 +923,131 @@ get_all_cast_information(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } +static void +identity_cache_destructor(PyObject *capsule) +{ + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + PyArrayIdentityHash_Dealloc(tb); +} /* - * Helper to test the identity cache, takes a list of values and adds - * all to the cache except the last key/value pair. The last value is - * ignored, instead the last key is looked up. - * None is returned, if the key is not found. - * If `replace` is True, duplicate entries are ignored when adding to the - * hashtable. + * Create an identity hash table with the given key length and return it + * as a capsule. */ static PyObject * -identityhash_tester(PyObject *NPY_UNUSED(mod), - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +create_identity_hash(PyObject *NPY_UNUSED(self), PyObject *args) +{ + int key_len; + + if (!PyArg_ParseTuple(args, "i", &key_len)) { + return NULL; + } + + if (key_len < 1 || key_len >= NPY_MAXARGS) { + PyErr_SetString(PyExc_ValueError, "must have 1 to max-args keys."); + return NULL; + } + + PyArrayIdentityHash *tb = PyArrayIdentityHash_New(key_len); + if (tb == NULL) { + return NULL; + } + + PyObject *capsule = PyCapsule_New((void *)tb, "PyArrayIdentityHash", + identity_cache_destructor); + if (capsule == NULL) { + PyArrayIdentityHash_Dealloc(tb); + return NULL; + } + + return capsule; +} + +/* + * Set item in identity hash table provided as capsule and key as tuple. + * If replace is False and the key already exists, RuntimeError is raised else + * the value is replaced. + */ +static PyObject * +identity_hash_set_item(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { NPY_PREPARE_ARGPARSER; - int key_len; - int replace; + PyObject *capsule, *key_tuple, *value; + int replace = 0; PyObject *replace_obj = Py_False; - PyObject *sequence; - PyObject *result = NULL; - if (npy_parse_arguments("identityhash_tester", args, len_args, kwnames, - "key_len", &PyArray_PythonPyIntFromInt, &key_len, - "sequence", NULL, &sequence, + if (npy_parse_arguments("identity_hash_set_item", args, len_args, kwnames, + "capsule", NULL, &capsule, + "key", NULL, &key_tuple, + "value", NULL, &value, "|replace", NULL, &replace_obj, NULL, NULL, NULL) < 0) { return NULL; } + + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } + replace = PyObject_IsTrue(replace_obj); if (error_converting(replace)) { return NULL; } - if (key_len < 1 || key_len >= NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, "must have 1 to max-args keys."); + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); return NULL; } - PyArrayIdentityHash *tb = PyArrayIdentityHash_New(key_len); - if (tb == NULL) { + + if (PyArrayIdentityHash_SetItem(tb, &PyTuple_GET_ITEM(key_tuple, 0), value, replace) < 0) { return NULL; } - /* Replace the sequence with a guaranteed fast-sequence */ - sequence = PySequence_Fast(sequence, "converting sequence."); // noqa: borrowed-ref OK - if (sequence == NULL) { - goto finish; + Py_RETURN_NONE; +} + + +/* + * Get item from identity hash table provided as capsule and key as tuple. + */ +static PyObject * +identity_hash_get_item(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *capsule, *key_tuple; + + if (!PyArg_ParseTuple(args, "OO", &capsule, &key_tuple)) { + return NULL; } - Py_ssize_t length = PySequence_Fast_GET_SIZE(sequence); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *key_val = PySequence_Fast_GET_ITEM(sequence, i); - if (!PyTuple_CheckExact(key_val) || PyTuple_GET_SIZE(key_val) != 2) { - PyErr_SetString(PyExc_TypeError, "bad key-value pair."); - goto finish; - } - PyObject *key = PyTuple_GET_ITEM(key_val, 0); - PyObject *value = PyTuple_GET_ITEM(key_val, 1); - if (!PyTuple_CheckExact(key) || PyTuple_GET_SIZE(key) != key_len) { - PyErr_SetString(PyExc_TypeError, "bad key tuple."); - goto finish; - } + if (!PyCapsule_IsValid(capsule, "PyArrayIdentityHash")) { + PyErr_SetString(PyExc_TypeError, + "First argument must be a valid PyArrayIdentityHash capsule."); + return NULL; + } - PyObject *keys[NPY_MAXARGS]; - for (int j = 0; j < key_len; j++) { - keys[j] = PyTuple_GET_ITEM(key, j); - } - if (i != length - 1) { - if (PyArrayIdentityHash_SetItem(tb, keys, value, replace) < 0) { - goto finish; - } - } - else { - result = PyArrayIdentityHash_GetItem(tb, keys); - if (result == NULL) { - result = Py_None; - } - Py_INCREF(result); - } + PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); + assert(tb != NULL); + + if (!PyTuple_CheckExact(key_tuple) || PyTuple_GET_SIZE(key_tuple) != tb->key_len) { + PyErr_Format(PyExc_TypeError, + "key must be a tuple of length %d", tb->key_len); + return NULL; } - finish: - Py_DECREF(sequence); - PyArrayIdentityHash_Dealloc(tb); + PyObject *result = PyArrayIdentityHash_GetItem(tb, &PyTuple_GET_ITEM(key_tuple, 0)); + if (result == NULL) { + Py_RETURN_NONE; + } + Py_INCREF(result); return result; } @@ -2309,9 +2351,15 @@ static PyMethodDef Multiarray_TestsMethods[] = { "Return a list with info on all available casts. Some of the info" "may differ for an actual cast if it uses value-based casting " "(flexible types)."}, - {"identityhash_tester", - (PyCFunction)identityhash_tester, - METH_KEYWORDS | METH_FASTCALL, NULL}, + {"create_identity_hash", + create_identity_hash, + METH_VARARGS, "Create a new PyArrayIdentityHash wrapped in a PyCapsule."}, + {"identity_hash_set_item", + (PyCFunction)identity_hash_set_item, + METH_KEYWORDS | METH_FASTCALL, "Set an item in a PyArrayIdentityHash capsule."}, + {"identity_hash_get_item", + identity_hash_get_item, + METH_VARARGS, "Get an item from a PyArrayIdentityHash capsule."}, {"array_indexing", array_indexing, METH_VARARGS, NULL}, diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 25a7158aaf6f..19c3392e86d8 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,36 +1,47 @@ -import random - import pytest -from numpy._core._multiarray_tests import identityhash_tester +from numpy._core._multiarray_tests import ( + create_identity_hash, + identity_hash_get_item, + identity_hash_set_item, +) @pytest.mark.parametrize("key_length", [1, 3, 6]) @pytest.mark.parametrize("length", [1, 16, 2000]) -def test_identity_hashtable(key_length, length): - # use a 30 object pool for everything (duplicates will happen) - pool = [object() for i in range(20)] +def test_identity_hashtable_get_set(key_length, length): + # no collisions expected keys_vals = [] for i in range(length): - keys = tuple(random.choices(pool, k=key_length)) - keys_vals.append((keys, random.choice(pool))) + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) + + ht = create_identity_hash(key_length) - dictionary = dict(keys_vals) + for i in range(length): + key, value = keys_vals[i] + identity_hash_set_item(ht, key, value) - # add a random item at the end: - keys_vals.append(random.choice(keys_vals)) - # the expected one could be different with duplicates: - expected = dictionary[keys_vals[-1][0]] + for key, value in keys_vals: + got = identity_hash_get_item(ht, key) + assert got is value - res = identityhash_tester(key_length, keys_vals, replace=True) - assert res is expected - if length == 1: - return +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_replace(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + val2 = object() + + identity_hash_set_item(ht, key, val1) + got = identity_hash_get_item(ht, key) + assert got is val1 - # add a new item with a key that is already used and a new value, this - # should error if replace is False, see gh-26690 - new_key = (keys_vals[1][0], object()) - keys_vals[0] = new_key with pytest.raises(RuntimeError): - identityhash_tester(key_length, keys_vals) + identity_hash_set_item(ht, key, val2) + + identity_hash_set_item(ht, key, val2, replace=True) + got = identity_hash_get_item(ht, key) + assert got is val2 From 38bad87c0cddad523d417bd2d8b9862b7920dab5 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 7 Jan 2026 10:37:22 +0000 Subject: [PATCH 1222/1718] ruff format compat, use basedpyright, simplify error message and conditional --- requirements/test_requirements.txt | 2 +- tools/pyright_completeness.py | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 784deac871b6..a482f882f1da 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -8,7 +8,7 @@ pytest-xdist pytest-timeout # For testing types mypy==1.19.1 -pyright +basedpyright # for optional f2py encoding detection charset-normalizer tzdata diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index a921abcc1e6f..4938b8ccad81 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -10,6 +10,7 @@ We use `--ignoreexternal` to avoid "partially unknown" reports coming from the stdlib `numbers` module, see https://github.com/microsoft/pyright/discussions/9911. """ + import argparse import fnmatch import json @@ -38,7 +39,9 @@ def run_pyright_with_coverage( pyright_args: list[str], exclude_like: Sequence[str], ) -> int: - result = subprocess.run(["pyright", *pyright_args], capture_output=True, text=True) + result = subprocess.run( + ["basedpyright", *pyright_args], capture_output=True, text=True + ) try: data = json.loads(result.stdout) @@ -55,21 +58,19 @@ def run_pyright_with_coverage( if not any(fnmatch.fnmatch(x["name"], pattern) for pattern in exclude_like) and x["isExported"] ] - cov_percent = ( - sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols) * 100 - ) + covered = sum(x["isTypeKnown"] for x in matched_symbols) / len(matched_symbols) else: - cov_percent = data["typeCompleteness"]["completenessScore"] * 100 + covered = data["typeCompleteness"]["completenessScore"] + sys.stderr.write(result.stderr) + if covered < 1: + sys.stdout.write(f"Coverage {covered:.1%} is below minimum required 100%") + return 1 sys.stderr.write(result.stderr) - if cov_percent < 100: - sys.stdout.write( - f"Coverage {cov_percent:.1f}% is below minimum required 100%" - ) + if covered < 100: + sys.stdout.write(f"Coverage {covered:.1f}% is below minimum required 100%") return 1 - sys.stdout.write( - f"Coverage {cov_percent:.1f}% is at or above minimum required 100%" - ) + sys.stdout.write("Coverage is at 100%") return 0 From 62d3661302251ed9ff89c325c3a16b55a7fedae0 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 7 Jan 2026 10:56:37 +0000 Subject: [PATCH 1223/1718] fixup --- tools/pyright_completeness.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index 4938b8ccad81..e660368e5184 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -63,14 +63,9 @@ def run_pyright_with_coverage( covered = data["typeCompleteness"]["completenessScore"] sys.stderr.write(result.stderr) if covered < 1: - sys.stdout.write(f"Coverage {covered:.1%} is below minimum required 100%") + sys.stdout.write(f"Coverage {covered:.1%} is below minimum required 100%\n") return 1 - - sys.stderr.write(result.stderr) - if covered < 100: - sys.stdout.write(f"Coverage {covered:.1f}% is below minimum required 100%") - return 1 - sys.stdout.write("Coverage is at 100%") + sys.stdout.write("Coverage is at 100%\n") return 0 From 2ac159b91c46047b6aefc39e4fc43d131fe333da Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Wed, 7 Jan 2026 11:09:54 +0000 Subject: [PATCH 1224/1718] back to pyright --- requirements/test_requirements.txt | 2 +- tools/pyright_completeness.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a482f882f1da..784deac871b6 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -8,7 +8,7 @@ pytest-xdist pytest-timeout # For testing types mypy==1.19.1 -basedpyright +pyright # for optional f2py encoding detection charset-normalizer tzdata diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index e660368e5184..b861328c8657 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -40,7 +40,7 @@ def run_pyright_with_coverage( exclude_like: Sequence[str], ) -> int: result = subprocess.run( - ["basedpyright", *pyright_args], capture_output=True, text=True + ["pyright", *pyright_args], capture_output=True, text=True ) try: From f1cb7c5ee489f2b3a063869a60e14b9ebcb28537 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 7 Jan 2026 15:43:57 +0100 Subject: [PATCH 1225/1718] BUG: Fix result type change and make weak q safe Defer cast to input type to the end of the computation by passing through the `weak_q` information. This fixes gh-30586, but I am not sure it is great to backport. Although, compared to the other PR it should just align the nan-functions fully and otherwise revert anything that isn't just a precision fix. --- numpy/lib/_function_base_impl.py | 38 +++++++++++++++++--------------- numpy/lib/_nanfunctions_impl.py | 27 ++++++++++++++--------- 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 3e0005079104..5e0c7b206948 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4227,6 +4227,7 @@ def percentile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") + weak_q = type(q) in (int, float) # use weak promotion for final result type q = np.true_divide(q, 100, out=...) if not _quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -4243,7 +4244,7 @@ def percentile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -4475,6 +4476,7 @@ def quantile(a, if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") + weak_q = type(q) in (int, float) # use weak promotion for final result type q = np.asanyarray(q) if not _quantile_is_valid(q): @@ -4492,7 +4494,7 @@ def quantile(a, raise ValueError("Weights must be non-negative.") return _quantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _quantile_unchecked(a, @@ -4502,7 +4504,8 @@ def _quantile_unchecked(a, overwrite_input=False, method="linear", keepdims=False, - weights=None): + weights=None, + weak_q=False): """Assumes that q is in [0, 1], and is an ndarray""" return _ureduce(a, func=_quantile_ureduce_func, @@ -4512,7 +4515,8 @@ def _quantile_unchecked(a, axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _quantile_is_valid(q): @@ -4551,7 +4555,7 @@ def _compute_virtual_index(n, quantiles, alpha: float, beta: float): ) - 1 -def _get_gamma(virtual_indexes, previous_indexes, method, dtype): +def _get_gamma(virtual_indexes, previous_indexes, method): """ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation of quantiles. @@ -4572,7 +4576,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method, dtype): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma, dtype=dtype) + return np.asanyarray(gamma) def _lerp(a, b, t, out=None): @@ -4640,6 +4644,7 @@ def _quantile_ureduce_func( out: np.ndarray | None = None, overwrite_input: bool = False, method: str = "linear", + weak_q: bool = False, ) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful @@ -4666,7 +4671,8 @@ def _quantile_ureduce_func( axis=axis, method=method, out=out, - weights=wgt) + weights=wgt, + weak_q=weak_q) return result @@ -4712,6 +4718,7 @@ def _quantile( method: str = "linear", out: np.ndarray | None = None, weights: "np.typing.ArrayLike | None" = None, + weak_q: bool = False, ) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. @@ -4790,18 +4797,13 @@ def _quantile( previous = arr[previous_indexes] next = arr[next_indexes] # --- Linear interpolation - if arr.dtype.kind in "iu": - gtype = None - elif arr.dtype.kind == "f": - # make sure the return value matches the input array type - gtype = arr.dtype - else: - gtype = virtual_indexes.dtype - gamma = _get_gamma(virtual_indexes, previous_indexes, - method_props, gtype) - result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) - gamma = gamma.reshape(result_shape) + method_props) + if weak_q: + gamma = float(gamma) + else: + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) result = _lerp(previous, next, gamma, diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index f030d74c5c11..d5a01a35f372 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1377,7 +1377,8 @@ def nanpercentile( if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) if not fnb._quantile_is_valid(q): raise ValueError("Percentiles must be in the range [0, 100]") @@ -1393,7 +1394,7 @@ def nanpercentile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, @@ -1552,11 +1553,8 @@ def nanquantile( if a.dtype.kind == "c": raise TypeError("a must be an array of real numbers") - # Use dtype of array if possible (e.g., if q is a python int or float). - if isinstance(q, (int, float)) and a.dtype.kind == "f": - q = np.asanyarray(q, dtype=a.dtype) - else: - q = np.asanyarray(q) + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) if not fnb._quantile_is_valid(q): raise ValueError("Quantiles must be in the range [0, 1]") @@ -1573,7 +1571,7 @@ def nanquantile( raise ValueError("Weights must be non-negative.") return _nanquantile_unchecked( - a, q, axis, out, overwrite_input, method, keepdims, weights) + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) def _nanquantile_unchecked( @@ -1585,6 +1583,7 @@ def _nanquantile_unchecked( method="linear", keepdims=np._NoValue, weights=None, + weak_q=False, ): """Assumes that q is in [0, 1], and is an ndarray""" # apply_along_axis in _nanpercentile doesn't handle empty arrays well, @@ -1599,7 +1598,8 @@ def _nanquantile_unchecked( axis=axis, out=out, overwrite_input=overwrite_input, - method=method) + method=method, + weak_q=weak_q) def _nanquantile_ureduce_func( @@ -1610,6 +1610,7 @@ def _nanquantile_ureduce_func( out=None, overwrite_input: bool = False, method="linear", + weak_q=False, ): """ Private function that doesn't support extended axis or keepdims. @@ -1619,11 +1620,12 @@ def _nanquantile_ureduce_func( if axis is None or a.ndim == 1: part = a.ravel() wgt = None if weights is None else weights.ravel() - result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + result = _nanquantile_1d(part, q, overwrite_input, method, + weights=wgt, weak_q=weak_q) # Note that this code could try to fill in `out` right away elif weights is None: result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) + overwrite_input, method, weights, weak_q) # apply_along_axis fills in collapsed axis with results. # Move those axes to the beginning to match percentile's # convention. @@ -1647,6 +1649,7 @@ def _nanquantile_ureduce_func( result[(...,) + ii] = _nanquantile_1d( a[ii], q, weights=weights[ii], overwrite_input=overwrite_input, method=method, + weak_q=weak_q, ) # This path dealt with `out` already... return result @@ -1658,6 +1661,7 @@ def _nanquantile_ureduce_func( def _nanquantile_1d( arr1d, q, overwrite_input=False, method="linear", weights=None, + weak_q=False, ): """ Private function for rank 1 arrays. Compute quantile ignoring NaNs. @@ -1676,6 +1680,7 @@ def _nanquantile_1d( overwrite_input=overwrite_input, method=method, weights=weights, + weak_q=weak_q, ) From 479ebf1102afebe0928296b473eee1ff78a07588 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 7 Jan 2026 16:19:24 +0100 Subject: [PATCH 1226/1718] TST: Add test showing that q promotes (for "interpolating ones") --- numpy/lib/tests/test_function_base.py | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 412f06d07e20..a40d61a0d48d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3283,6 +3283,11 @@ def test_period(self): 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', 'midpoint'] +# Note: Technically, averaged_inverted_cdf and midpoint are not interpolated. +# but NumPy doesn't currently make a difference (at least w.r.t. to promotion). +interpolating_quantile_methods = [ + 'averaged_inverted_cdf', 'interpolated_inverted_cdf', 'hazen', 'weibull', + 'linear', 'median_unbiased', 'normal_unbiased', 'midpoint'] methods_supporting_weights = ["inverted_cdf"] @@ -3913,6 +3918,22 @@ def test_percentile_gh_29003_Fraction(self): assert z == one assert np.array(z).dtype == a.dtype + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [50, 10.0]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, np.float64(50), method=method) + assert value.dtype == np.float64 + class TestQuantile: # most of this is already tested by TestPercentile @@ -4335,6 +4356,22 @@ def test_float16_gh_29003(self): assert value == q * 50_000 assert value.dtype == np.float16 + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [0.5, 1]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, np.float64(0.5), method=method) + assert value.dtype == np.float64 + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, From 4c5b66614ef1573db1c216c628b7938be4b4e76f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jan 2026 17:54:28 +0000 Subject: [PATCH 1227/1718] MAINT: Bump astral-sh/setup-uv from 7.1.6 to 7.2.0 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.1.6 to 7.2.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/681c641aba71e4a1c380be3ab5e12ad51f415867...61cb8a9741eeb8a550a1b8544337180c0fc8476b) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy.yml | 2 +- .github/workflows/stubtest.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 4c489e7fd49f..da311699482d 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 526471f799c7..f3673b11b7bd 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 with: python-version: ${{ matrix.py }} activate-environment: true From 8dd2e426160ff68a93d19ef00faf41cab68ab246 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 7 Jan 2026 22:10:44 +0100 Subject: [PATCH 1228/1718] TST: remove ``python<3.12`` tests --- numpy/_core/tests/test_dtype.py | 110 --------------------------- numpy/_core/tests/test_regression.py | 16 ---- numpy/lib/tests/test_format.py | 10 --- 3 files changed, 136 deletions(-) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index b1f965d5164b..a77b59261c06 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,6 +1,5 @@ import contextlib import ctypes -import gc import inspect import operator import pickle @@ -822,115 +821,6 @@ def iter_struct_object_dtypes(): yield pytest.param(dt, p, 12, obj, id="") -@pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Python 3.12 has immortal refcounts, this test will no longer " - "work. See gh-23986" -) -@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") -class TestStructuredObjectRefcounting: - """These tests cover various uses of complicated structured types which - include objects and thus require reference counting. - """ - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize(["creation_func", "creation_obj"], [ - pytest.param(np.empty, None, - # None is probably used for too many things - marks=pytest.mark.skip("unreliable due to python's behaviour")), - (np.ones, 1), - (np.zeros, 0)]) - def test_structured_object_create_delete(self, dt, pat, count, singleton, - creation_func, creation_obj): - """Structured object reference counting in creation and deletion""" - # The test assumes that 0, 1, and None are singletons. - gc.collect() - before = sys.getrefcount(creation_obj) - arr = creation_func(3, dt) - - now = sys.getrefcount(creation_obj) - assert now - before == count * 3 - del arr - now = sys.getrefcount(creation_obj) - assert now == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_item_setting(self, dt, pat, count, singleton): - """Structured object reference counting for simple item setting""" - one = 1 - - gc.collect() - before = sys.getrefcount(singleton) - arr = np.array([pat] * 3, dt) - assert sys.getrefcount(singleton) - before == count * 3 - # Fill with `1` and check that it was replaced correctly: - before2 = sys.getrefcount(one) - arr[...] = one - after2 = sys.getrefcount(one) - assert after2 - before2 == count * 3 - del arr - gc.collect() - assert sys.getrefcount(one) == before2 - assert sys.getrefcount(singleton) == before - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - @pytest.mark.parametrize( - ['shape', 'index', 'items_changed'], - [((3,), ([0, 2],), 2), - ((3, 2), ([0, 2], slice(None)), 4), - ((3, 2), ([0, 2], [1]), 2), - ((3,), ([True, False, True]), 2)]) - def test_structured_object_indexing(self, shape, index, items_changed, - dt, pat, count, singleton): - """Structured object reference counting for advanced indexing.""" - # Use two small negative values (should be singletons, but less likely - # to run into race-conditions). This failed in some threaded envs - # When using 0 and 1. If it fails again, should remove all explicit - # checks, and rely on `pytest-leaks` reference count checker only. - val0 = -4 - val1 = -5 - - arr = np.full(shape, val0, dt) - - gc.collect() - before_val0 = sys.getrefcount(val0) - before_val1 = sys.getrefcount(val1) - # Test item getting: - part = arr[index] - after_val0 = sys.getrefcount(val0) - assert after_val0 - before_val0 == count * items_changed - del part - # Test item setting: - arr[index] = val1 - gc.collect() - after_val0 = sys.getrefcount(val0) - after_val1 = sys.getrefcount(val1) - assert before_val0 - after_val0 == count * items_changed - assert after_val1 - before_val1 == count * items_changed - - @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], - iter_struct_object_dtypes()) - def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): - """Structured object reference counting for specialized functions. - The older functions such as take and repeat use different code paths - then item setting (when writing this). - """ - indices = [0, 1] - - arr = np.array([pat] * 3, dt) - gc.collect() - before = sys.getrefcount(singleton) - res = arr.take(indices) - after = sys.getrefcount(singleton) - assert after - before == count * 2 - new = res.repeat(10) - gc.collect() - after_repeat = sys.getrefcount(singleton) - assert after_repeat - after == count * 2 * 10 - - class TestStructuredDtypeSparseFields: """Tests subarray fields which contain sparse dtypes so that not all memory is used by the dtype work. Such dtype's should diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 7cd3cdec91d9..60e19a638531 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1462,22 +1462,6 @@ def test_structured_arrays_with_objects1(self): x[x.nonzero()] = x.ravel()[:1] assert_(x[0, 1] == x[0, 0]) - @pytest.mark.skipif( - sys.version_info >= (3, 12), - reason="Python 3.12 has immortal refcounts, this test no longer works." - ) - @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") - def test_structured_arrays_with_objects2(self): - # Ticket #1299 second test - stra = 'aaaa' - strb = 'bbbb' - numb = sys.getrefcount(strb) - numa = sys.getrefcount(stra) - x = np.array([[(0, stra), (1, strb)]], 'i8,O') - x[x.nonzero()] = x.ravel()[:1] - assert_(sys.getrefcount(strb) == numb) - assert_(sys.getrefcount(stra) == numa + 2) - def test_duplicate_title_and_name(self): # Ticket #1254 dtspec = [(('a', 'a'), 'i'), ('b', 'i')] diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 52994f13bd05..7bbded153725 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -552,16 +552,6 @@ def test_load_padded_dtype(tmpdir, dt): assert_array_equal(arr, arr1) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988") -@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") -def test_python2_python3_interoperability(): - fname = 'win64python2.npy' - path = os.path.join(os.path.dirname(__file__), 'data', fname) - with pytest.warns(UserWarning, match="Reading.*this warning\\."): - data = np.load(path) - assert_array_equal(data, np.ones(2)) - - @pytest.mark.filterwarnings( "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") def test_pickle_python2_python3(): From 5bb51070a19a56008adc1529b6e5f121aa4dae35 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 7 Jan 2026 22:24:59 +0100 Subject: [PATCH 1229/1718] MAINT: consistent ``sys.version_info`` checks --- numpy/__init__.pyi | 14 +++++++------- numpy/testing/_private/utils.py | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d0e53f3ac621..a3e13730eb11 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1337,11 +1337,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 ) -> dtype[longdouble]: ... # `complexfloating` string-based representations and ctypes - if sys.version_info >= (3, 14) and sys.platform != "win32": + if sys.version_info < (3, 14) or sys.platform == "win32": @overload def __new__( cls, - dtype: _Complex64Codes | type[ct.c_float_complex], + dtype: _Complex64Codes, align: py_bool = False, copy: py_bool = False, *, @@ -1350,7 +1350,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Complex128Codes | type[ct.c_double_complex], + dtype: _Complex128Codes, align: py_bool = False, copy: py_bool = False, *, @@ -1359,7 +1359,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], + dtype: _CLongDoubleCodes, align: py_bool = False, copy: py_bool = False, *, @@ -1369,7 +1369,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Complex64Codes, + dtype: _Complex64Codes | type[ct.c_float_complex], align: py_bool = False, copy: py_bool = False, *, @@ -1378,7 +1378,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _Complex128Codes, + dtype: _Complex128Codes | type[ct.c_double_complex], align: py_bool = False, copy: py_bool = False, *, @@ -1387,7 +1387,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: _CLongDoubleCodes, + dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], align: py_bool = False, copy: py_bool = False, *, diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 527885308dd2..2ec9dc8dea2d 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -64,9 +64,7 @@ class KnownFailureException(Exception): else: IS_INSTALLED = True try: - if sys.version_info >= (3, 13): - IS_EDITABLE = np_dist.origin.dir_info.editable - else: + if sys.version_info < (3, 13): # Backport importlib.metadata.Distribution.origin import json # noqa: E401 import types @@ -75,6 +73,8 @@ class KnownFailureException(Exception): object_hook=lambda data: types.SimpleNamespace(**data), ) IS_EDITABLE = origin.dir_info.editable + else: + IS_EDITABLE = np_dist.origin.dir_info.editable except AttributeError: IS_EDITABLE = False From 6a165ab3bede1a6ac070af5be32e50241a48e1e6 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Wed, 7 Jan 2026 14:48:46 -0800 Subject: [PATCH 1230/1718] Fix typo Co-authored-by: Pieter Eendebak --- benchmarks/benchmarks/bench_searchsorted.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks/bench_searchsorted.py b/benchmarks/benchmarks/bench_searchsorted.py index 720c5ac7a597..86cc625161d8 100644 --- a/benchmarks/benchmarks/bench_searchsorted.py +++ b/benchmarks/benchmarks/bench_searchsorted.py @@ -8,7 +8,7 @@ class SearchSorted(Benchmark): [100, 10_000, 1_000_000, 100_000_000], # array sizes [1, 10, 100_000], # number of query elements ['ordered', 'random'], # query order - [False, True], # use sorted + [False, True], # use sorter [42, 18122022], # seed ] param_names = ['array_size', 'n_queries', 'query_order', 'use_sorter', 'seed'] From cd20e7c08150de8e8bb2da85bb2c69b52a6dea48 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 8 Jan 2026 10:21:48 +0100 Subject: [PATCH 1231/1718] DEP: Deprecate setting the shape attribute of a numpy array (#29536) Shape setting is generally unsafe, it may mutate arrays that are expected to be not mutated (in this way) and could lead to crashes in threaded environments (free-threading or not). As metadata, we prefer this to never be modified. Historically, one reason to use shape setting was to enforce no-copy, nowadays `arr.reshape(copy=False)` works for this, hopefully reducing the pressure here. There is still a private `._set_shape()` function now and if downstream/user notice this, we could make the error/DeprecationWarning point to that as a work-around that will stay indefinitely but with documentation explaining that it is a bad idea almost always (and the underscore discouraging it also). Co-authored-by: Sebastian Berg --- .../upcoming_changes/29536.deprecation.rst | 11 +++++++ doc/source/user/basics.copies.rst | 16 ---------- doc/source/user/basics.indexing.rst | 2 +- numpy/__init__.pyi | 2 +- numpy/_core/_add_newdocs.py | 16 +--------- numpy/_core/getlimits.py | 6 ++-- numpy/_core/records.py | 2 +- numpy/_core/src/multiarray/getset.c | 29 ++++++++++++----- numpy/_core/src/multiarray/getset.h | 2 ++ numpy/_core/src/multiarray/methods.c | 15 +++++++++ numpy/_core/tests/test_deprecations.py | 3 ++ numpy/_core/tests/test_item_selection.py | 4 +-- numpy/_core/tests/test_multiarray.py | 8 +++-- numpy/_core/tests/test_records.py | 2 +- numpy/_core/tests/test_regression.py | 9 +++--- numpy/_core/tests/test_ufunc.py | 2 +- numpy/_core/tests/test_umath.py | 6 ++-- numpy/lib/_function_base_impl.py | 4 +-- numpy/linalg/_linalg.py | 3 +- numpy/linalg/tests/test_linalg.py | 4 +-- numpy/ma/core.py | 31 +++++++++---------- numpy/ma/mrecords.py | 2 +- numpy/ma/tests/test_core.py | 20 ++++++------ numpy/ma/tests/test_old_ma.py | 10 +++--- numpy/matrixlib/defmatrix.py | 10 +++--- numpy/matrixlib/tests/test_defmatrix.py | 6 ++-- numpy/random/_generator.pyx | 5 ++- numpy/random/mtrand.pyx | 5 ++- 28 files changed, 125 insertions(+), 110 deletions(-) create mode 100644 doc/release/upcoming_changes/29536.deprecation.rst diff --git a/doc/release/upcoming_changes/29536.deprecation.rst b/doc/release/upcoming_changes/29536.deprecation.rst new file mode 100644 index 000000000000..7367a135cdd1 --- /dev/null +++ b/doc/release/upcoming_changes/29536.deprecation.rst @@ -0,0 +1,11 @@ +Setting the ``shape`` attribute is deprecated +--------------------------------------------- +Setting the shape attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a new view via +`np.reshape` or `np.ndarray.reshape`. For example: ``x = np.arange(15); x = np.reshape(x, (3, 5))``. +To ensure no copy is made from the data, one can use ``np.reshape(..., copy=False)``. + +Directly setting the shape on an array is discouraged, but for cases where it is difficult to work +around, e.g., in ``__array_finalize__`` possible with the private method `np.ndarray._set_shape`. + diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index 6d8e78488e7e..c0dbc8e8fb51 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -106,22 +106,6 @@ otherwise. In most cases, the strides can be modified to reshape the array with a view. However, in some cases where the array becomes non-contiguous (perhaps after a :meth:`.ndarray.transpose` operation), the reshaping cannot be done by modifying strides and requires a copy. -In these cases, we can raise an error by assigning the new shape to the -shape attribute of the array. For example:: - - >>> import numpy as np - >>> x = np.ones((2, 3)) - >>> y = x.T # makes the array non-contiguous - >>> y - array([[1., 1.], - [1., 1.], - [1., 1.]]) - >>> z = y.view() - >>> z.shape = 6 - Traceback (most recent call last): - ... - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. Taking the example of another operation, :func:`numpy.ravel` returns a contiguous flattened view of the array wherever possible. On the other hand, diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index 7481468fe6db..51d126f8183b 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -54,7 +54,7 @@ and accepts negative indices for indexing from the end of the array. :: It is not necessary to separate each dimension's index into its own set of square brackets. :: - >>> x.shape = (2, 5) # now x is 2-dimensional + >>> x = x.reshape((2, 5)) # now x is 2-dimensional >>> x[1, 3] 8 >>> x[1, -1] diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a3e13730eb11..39d80c4f172c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2149,7 +2149,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @property def shape(self) -> _ShapeT_co: ... @shape.setter - @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) + @deprecated("In-place shape modification has been deprecated in NumPy 2.5.") def shape(self, value: _ShapeLike) -> None: ... # diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index b37014b6a648..0e3cee869fc9 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2885,7 +2885,7 @@ .. warning:: - Setting ``arr.shape`` is discouraged and may be deprecated in the + Setting ``arr.shape`` is deprecated and may be removed in the future. Using `ndarray.reshape` is the preferred approach. Examples @@ -2897,20 +2897,6 @@ >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) - >>> y.shape = (3, 8) - >>> y - array([[ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.], - [ 0., 0., 0., 0., 0., 0., 0., 0.]]) - >>> y.shape = (3, 6) - Traceback (most recent call last): - File "", line 1, in - ValueError: cannot reshape array of size 24 into shape (3,6) - >>> np.zeros((4,2))[::2].shape = (-1,) - Traceback (most recent call last): - File "", line 1, in - AttributeError: Incompatible shape for in-place modification. Use - `.reshape()` to make a copy with the desired shape. See Also -------- diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index e821048c5da1..7d2b9966fcd7 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -16,16 +16,14 @@ def _fr0(a): """fix rank-0 --> rank-1""" if a.ndim == 0: - a = a.copy() - a.shape = (1,) + a = a.reshape((1,)) return a def _fr1(a): """fix rank > 0 --> rank-0""" if a.size == 1: - a = a.copy() - a.shape = () + a = a.reshape(()) return a diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 65d5a0b85f2b..a022fde116cd 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -740,7 +740,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, return _array else: if shape is not None and retval.shape != shape: - retval.shape = shape + retval = retval.reshape(shape) res = retval.view(recarray) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 1aff38476d50..1df9af5d0031 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -49,17 +49,13 @@ array_shape_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } -static int -array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +NPY_NO_EXPORT int +array_shape_set_internal(PyArrayObject *self, PyObject *val) { int nd; PyArrayObject *ret; + assert(val); - if (val == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array shape"); - return -1; - } /* Assumes C-order */ ret = (PyArrayObject *)PyArray_Reshape(self, val); if (ret == NULL) { @@ -106,6 +102,25 @@ array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) return 0; } +static int +array_shape_set(PyArrayObject *self, PyObject *val, void* NPY_UNUSED(ignored)) +{ + if (val == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array shape"); + return -1; + } + + /* Deprecated NumPy 2.5, 2026-01-05 */ + if (DEPRECATE("Setting the shape on a NumPy array has been deprecated" + " in NumPy 2.5.\nAs an alternative, you can create a new" + " view using np.reshape (with copy=False if needed)." + ) < 0 ) { + return -1; + } + + return array_shape_set_internal(self, val); +} static PyObject * array_strides_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) diff --git a/numpy/_core/src/multiarray/getset.h b/numpy/_core/src/multiarray/getset.h index a95c98020a18..5436efaa325b 100644 --- a/numpy/_core/src/multiarray/getset.h +++ b/numpy/_core/src/multiarray/getset.h @@ -3,4 +3,6 @@ extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; +NPY_NO_EXPORT int array_shape_set_internal(PyArrayObject *self, PyObject *val); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ */ diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 6dcc349dcd03..24972fec8975 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -25,6 +25,7 @@ #include "dtypemeta.h" #include "item_selection.h" #include "conversion_utils.h" +#include "getset.h" #include "shape.h" #include "strfuncs.h" #include "array_assign.h" @@ -2875,6 +2876,16 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } +static PyObject* array__set_shape(PyObject *self, PyObject *args) +{ + int r = array_shape_set_internal((PyArrayObject *)self, args); + + if (r < 0) { + return NULL; + } + Py_RETURN_NONE; +} + NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ @@ -3099,6 +3110,10 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction)array_dlpack_device, METH_NOARGS, NULL}, + // For deprecation of ndarray setters + {"_set_shape", + (PyCFunction)array__set_shape, + METH_O, NULL}, // For Array API compatibility {"__array_namespace__", (PyCFunction)array_array_namespace, diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2dd14d9e00de..8bd406c1b0ff 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -320,6 +320,9 @@ def test_deprecated_strides_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + def test_deprecated_shape_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" diff --git a/numpy/_core/tests/test_item_selection.py b/numpy/_core/tests/test_item_selection.py index 79fb82dde591..0e08b7cfd8e0 100644 --- a/numpy/_core/tests/test_item_selection.py +++ b/numpy/_core/tests/test_item_selection.py @@ -29,8 +29,8 @@ def test_simple(self): tresult = list(ta.T.copy()) for index_array in index_arrays: if index_array.size != 0: - tresult[0].shape = (2,) + index_array.shape - tresult[1].shape = (2,) + index_array.shape + tresult[0] = tresult[0].reshape((2,) + index_array.shape) + tresult[1] = tresult[1].reshape((2,) + index_array.shape) for mode in modes: for index in indices: real_index = real_indices[mode][index] diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 84d28b1cd098..371cd6bae765 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -326,9 +326,13 @@ def test_attributes(self): assert_equal(one.shape, (10,)) assert_equal(two.shape, (4, 5)) assert_equal(three.shape, (2, 5, 6)) - three.shape = (10, 3, 2) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (10, 3, 2) assert_equal(three.shape, (10, 3, 2)) - three.shape = (2, 5, 6) + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings('ignore', category=DeprecationWarning) + three.shape = (2, 5, 6) assert_equal(one.strides, (one.itemsize,)) num = two.itemsize assert_equal(two.strides, (5 * num, num)) diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 7ed6ea7687ff..1fbaa0024f38 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -269,7 +269,7 @@ def test_recarray_conflict_fields(self): ra.mean = [1.1, 2.2, 3.3] assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3]) assert_(type(ra.mean) is type(ra.var)) - ra.shape = (1, 3) + ra = ra.reshape((1, 3)) assert_(ra.shape == (1, 3)) ra.shape = ['A', 'B', 'C'] assert_array_equal(ra['shape'], [['A', 'B', 'C']]) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 60e19a638531..f16935bc5dec 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -112,7 +112,8 @@ def test_noncontiguous_fill(self): def rs(): b.shape = (10,) - assert_raises(AttributeError, rs) + with pytest.warns(DeprecationWarning): # gh-29536 + assert_raises(AttributeError, rs) def test_bool(self): # Ticket #60 @@ -653,7 +654,8 @@ def test_reshape_zero_strides(self): def test_reshape_zero_size(self): # GitHub Issue #2700, setting shape failed for 0-sized arrays a = np.ones((0, 2)) - a.shape = (-1, 2) + with pytest.warns(DeprecationWarning): + a.shape = (-1, 2) def test_reshape_trailing_ones_strides(self): # GitHub issue gh-2949, bad strides for trailing ones of new shape @@ -1569,8 +1571,7 @@ class Subclass(np.ndarray): @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_take_refcount(self): # ticket #939 - a = np.arange(16, dtype=float) - a.shape = (4, 4) + a = np.arange(16, dtype=float).reshape((4, 4)) lut = np.ones((5 + 3, 4), float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) c1 = sys.getrefcount(rgba) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 55ed0d881002..c93873e96610 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1765,7 +1765,7 @@ def identityless_reduce_arrs(): # Not contiguous and not aligned a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) + a = a.reshape((3, 4, 5)) a = a[1:, 1:, 1:] yield a diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index b8c46d636870..2407a0267ed6 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4842,18 +4842,18 @@ class BadArr1(np.ndarray): def __array_finalize__(self, obj): # The outer call reshapes to 3 dims, try to do a bad reshape. if self.ndim == 3: - self.shape = self.shape + (1,) + self._set_shape(self.shape + (1,)) class BadArr2(np.ndarray): def __array_finalize__(self, obj): if isinstance(obj, BadArr2): # outer inserts 1-sized dims. In that case disturb them. if self.shape[-1] == 1: - self.shape = self.shape[::-1] + self._set_shape(self.shape[::-1]) for cls in [BadArr1, BadArr2]: arr = np.ones((2, 3)).view(cls) - with assert_raises(TypeError) as a: + with pytest.raises(TypeError): # The first array gets reshaped (not the second one) np.add.outer(arr, [1, 2]) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 7d4d86b20d50..df47ad165745 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5183,8 +5183,8 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if indexing == 'xy' and ndim > 1: # switch first and second axis - output[0].shape = (1, -1) + s0[2:] - output[1].shape = (-1, 1) + s0[2:] + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) if not sparse: # Return the full N-D matrix (not only the 1-D vector) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index b07ea5873d9f..d11699bd1c5e 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -352,8 +352,7 @@ def tensorsolve(a, b, axes=None): a = a.reshape(prod, prod) b = b.ravel() res = wrap(solve(a, b)) - res.shape = oldshape - return res + return res.reshape(oldshape) def _solve_dispatcher(a, b): diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 9c06e04a5ec9..f93e2bdeeb6c 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1000,8 +1000,8 @@ def do(self, a, b, tags): np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) expect_resids = np.asarray(expect_resids) if np.asarray(b).ndim == 1: - expect_resids.shape = (1,) - assert_equal(residuals.shape, expect_resids.shape) + expect_resids = expect_resids.reshape((1,)) + assert_equal(residuals.shape, expect_resids.shape) else: expect_resids = np.array([]).view(type(x)) assert_almost_equal(residuals, expect_resids) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 13ad3bbf3751..01d9c5557f8e 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1105,8 +1105,7 @@ def reduce(self, target, axis=0, dtype=None): if t.shape == (): t = t.reshape(1) if m is not nomask: - m = make_mask(m, copy=True) - m.shape = (1,) + m = make_mask(m, copy=True).reshape((1,)) if m is nomask: tr = self.f.reduce(t, axis) @@ -2597,7 +2596,7 @@ def flatten_sequence(iterable): if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape - out.shape = tuple(flatten_sequence(newshape)) + out = out.reshape(tuple(flatten_sequence(newshape))) return out @@ -2713,8 +2712,7 @@ def __getitem__(self, indx): _mask = self.maskiter.__getitem__(indx) if isinstance(_mask, ndarray): # set shape to match that of data; this is needed for matrices - _mask.shape = result.shape - result._mask = _mask + result._mask = _mask.reshape(result.shape) elif isinstance(_mask, np.void): return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) elif _mask: # Just a scalar, masked @@ -2941,7 +2939,7 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, # the shapes were the same, so we can at least # avoid that path if data._mask.shape != data.shape: - data._mask.shape = data.shape + data._mask = data._mask.reshape(data.shape) else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False @@ -3116,7 +3114,7 @@ def __array_finalize__(self, obj): # Finalize the mask if self._mask is not nomask: try: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) except ValueError: self._mask = nomask except (TypeError, AttributeError): @@ -3491,7 +3489,7 @@ def dtype(self, dtype): # Try to reset the shape of the mask (if we don't have a void). # This raises a ValueError if the dtype change won't work. try: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) except (AttributeError, TypeError): pass @@ -3505,7 +3503,7 @@ def shape(self, shape): # Cannot use self._mask, since it may not (yet) exist when a # masked matrix sets the shape. if getmask(self) is not nomask: - self._mask.shape = self.shape + self._mask = self._mask.reshape(self.shape) def __setmask__(self, mask, copy=False): """ @@ -3574,7 +3572,7 @@ def __setmask__(self, mask, copy=False): current_mask.flat = mask # Reshape if needed if current_mask.shape: - current_mask.shape = self.shape + self._mask = current_mask.reshape(self.shape) return _set_mask = __setmask__ @@ -4779,8 +4777,9 @@ def reshape(self, *s, **kwargs): Notes ----- - The reshaping operation cannot guarantee that a copy will not be made, - to modify the shape in place, use ``a.shape = s`` + By default, the reshaping operation will make a copy if a view + with different strides is not possible. To ensure a view, + pass ``copy=False``. Examples -------- @@ -5710,7 +5709,7 @@ def argmin(self, axis=None, fill_value=None, out=None, *, -------- >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) - >>> x.shape = (2,2) + >>> x = x.reshape((2,2)) >>> x masked_array( data=[[--, --], @@ -6345,7 +6344,7 @@ def tolist(self, fill_value=None): inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None - result.shape = inishape + result = result.reshape(inishape) return result.tolist() def tobytes(self, fill_value=None, order='C'): @@ -8247,9 +8246,9 @@ def inner(a, b): fa = filled(a, 0) fb = filled(b, 0) if fa.ndim == 0: - fa.shape = (1,) + fa = fa.reshape((1,)) if fb.ndim == 0: - fb.shape = (1,) + fb = fb.reshape((1,)) return np.inner(fa, fb).view(MaskedArray) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index bb4a2707fec1..d35bb9b79925 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -385,7 +385,7 @@ def view(self, dtype=None, type=None): if (getattr(output, '_mask', ma.nomask) is not ma.nomask): mdtype = ma.make_mask_descr(output.dtype) output._mask = self._mask.view(mdtype, np.ndarray) - output._mask.shape = output.shape + output._mask = output._mask.reshape(output.shape) return output def harden_mask(self): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index a082f8aa7450..fc10472faacd 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1371,8 +1371,7 @@ def test_minmax_reduce(self): def test_minmax_funcs_with_output(self): # Tests the min/max functions with explicit outputs mask = np.random.rand(12).round() - xm = array(np.random.uniform(0, 10, 12), mask=mask) - xm.shape = (3, 4) + xm = array(np.random.uniform(0, 10, 12), mask=mask).reshape((3, 4)) for funcname in ('min', 'max'): # Initialize npfunc = getattr(np, funcname) @@ -1394,7 +1393,7 @@ def test_minmax_funcs_with_output(self): def test_minmax_methods(self): # Additional tests on max/min xm = self._create_data()[5] - xm.shape = (xm.size,) + xm = xm.reshape((xm.size,)) assert_equal(xm.max(), 10) assert_(xm[0].max() is masked) assert_(xm[0].max(0) is masked) @@ -1506,7 +1505,10 @@ def test_addsumprod(self): assert_equal(np.prod(x, 0), product(x, 0)) assert_equal(np.prod(filled(xm, 1), axis=0), product(xm, axis=0)) s = (3, 4) - x.shape = y.shape = xm.shape = ym.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) if len(s) > 1: assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) @@ -3633,7 +3635,7 @@ def test_ravel(self): assert_equal(a.ravel()._mask, [0, 0, 0, 0]) # Test that the fill_value is preserved a.fill_value = -99 - a.shape = (2, 2) + a = a.reshape((2, 2)) ar = a.ravel() assert_equal(ar._mask, [0, 0, 0, 0]) assert_equal(ar._data, [1, 2, 3, 4]) @@ -3907,7 +3909,7 @@ def test_tolist(self): assert_(xlist[1] is None) assert_(xlist[-2] is None) # ... on 2D - x.shape = (3, 4) + x = x.reshape((3, 4)) xlist = x.tolist() ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] assert_equal(xlist[0], [0, None, 2, 3]) @@ -5060,8 +5062,7 @@ def test_on_ndarray(self): def test_compress(self): # Test compress function on ndarray and masked array # Address Github #2495. - arr = np.arange(8) - arr.shape = 4, 2 + arr = np.arange(8).reshape(4, 2) cond = np.array([True, False, True, True]) control = arr[[0, 2, 3]] test = np.ma.compress(cond, arr, axis=0) @@ -5875,7 +5876,8 @@ def test_fieldless_void(): def test_mask_shape_assignment_does_not_break_masked(): a = np.ma.masked b = np.ma.array(1, mask=a.mask) - b.shape = (1,) + with pytest.warns(DeprecationWarning): # gh-29492 + b.shape = (1,) assert_equal(a.mask.shape, ()) diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index fcf02fa2dccb..2866cc0d6fb0 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -130,11 +130,11 @@ def test_testBasic1d(self): def test_testBasic2d(self, s): # Test of basic array creation and properties in 2 dimensions. x, y, _, m1, _, xm, ym, _, _, xf, s = self._create_data() - x.shape = s - y.shape = s - xm.shape = s - ym.shape = s - xf.shape = s + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) assert_(not isMaskedArray(x)) assert_(isMaskedArray(xm)) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 9da6bcae2b64..2e63d50cb3a6 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -180,16 +180,16 @@ def __array_finalize__(self, obj): newshape = tuple(x for x in self.shape if x > 1) ndim = len(newshape) if ndim == 2: - self.shape = newshape + self._set_shape(newshape) return elif (ndim > 2): raise ValueError("shape too large to be a matrix.") else: newshape = self.shape if ndim == 0: - self.shape = (1, 1) + self._set_shape((1, 1)) elif ndim == 1: - self.shape = (1, newshape[0]) + self._set_shape((1, newshape[0])) return def __getitem__(self, index): @@ -213,9 +213,9 @@ def __getitem__(self, index): except Exception: n = 0 if n > 1 and isscalar(index[1]): - out.shape = (sh, 1) + out = out.reshape((sh, 1)) else: - out.shape = (1, sh) + out = out.reshape((1, sh)) return out def __mul__(self, other): diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index a0e868f5fe2c..b538973726b5 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -373,15 +373,13 @@ def test_row_column_indexing(self): assert_array_equal(x[:, 1], [[0], [1]]) def test_boolean_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, np.array([True, False])], x[:, 0]) assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) def test_list_indexing(self): - A = np.arange(6) - A.shape = (3, 2) + A = np.arange(6).reshape((3, 2)) x = asmatrix(A) assert_array_equal(x[:, [1, 0]], x[:, ::-1]) assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 27cf0e16e70f..e3692c5b9904 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -988,7 +988,7 @@ cdef class Generator: idx_data[j - pop_size_i + size_i] = j if shuffle: _shuffle_int(&self._bitgen, size_i, 1, idx_data) - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -3952,8 +3952,7 @@ cdef class Generator: _factor = u * np.sqrt(s) x = mean + x @ _factor.T - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, object n, object pvals, size=None): """ diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index c69284d0df9a..a74b4cf6941b 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -1049,7 +1049,7 @@ cdef class RandomState: idx = found else: idx = self.permutation(pop_size)[:size] - idx.shape = shape + idx = idx.reshape(shape) if is_scalar and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array @@ -4268,8 +4268,7 @@ cdef class RandomState: x = np.dot(x, np.sqrt(s)[:, None] * v) x += mean - x.shape = tuple(final_shape) - return x + return x.reshape(tuple(final_shape)) def multinomial(self, long n, object pvals, size=None): """ From f1fdcff232ac6ecbd539cb61e949d356857ad1e6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 8 Jan 2026 13:00:18 +0100 Subject: [PATCH 1232/1718] DEP: remove `numpy.chararray` (#30604) Finalize the deprecation of `numpy.chararray` note that `numpy.char.chararray` is still available. --- doc/release/upcoming_changes/30604.expired.rst | 1 + numpy/__init__.py | 8 -------- numpy/_core/tests/test_deprecations.py | 6 ------ 3 files changed, 1 insertion(+), 14 deletions(-) create mode 100644 doc/release/upcoming_changes/30604.expired.rst diff --git a/doc/release/upcoming_changes/30604.expired.rst b/doc/release/upcoming_changes/30604.expired.rst new file mode 100644 index 000000000000..50cab89f3c3a --- /dev/null +++ b/doc/release/upcoming_changes/30604.expired.rst @@ -0,0 +1 @@ +* The ``numpy.chararray`` re-export of ``numpy.char.chararray`` has been removed (deprecated since 2.0). diff --git a/numpy/__init__.py b/numpy/__init__.py index 668f203d4b0d..3f2652a306fd 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -766,14 +766,6 @@ def __getattr__(attr): name=None ) - if attr == "chararray": - warnings.warn( - "`np.chararray` is deprecated and will be removed from " - "the main namespace in the future. Use an array with a string " - "or bytes dtype instead.", DeprecationWarning, stacklevel=2) - import numpy.char as char - return char.chararray - raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") def __dir__(): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 8bd406c1b0ff..2853a5052b59 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -265,12 +265,6 @@ def test_deprecated_np_lib_math(self): self.assert_deprecated(lambda: np.lib.math) -class TestLibImports(_DeprecationTestCase): - # Deprecated in Numpy 1.26.0, 2023-09 - def test_lib_functions_deprecation_call(self): - self.assert_deprecated(lambda: np.chararray) - - class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): From 41d21add7914d81cc140884d8e0b426795b3e049 Mon Sep 17 00:00:00 2001 From: Justin Kunimune Date: Thu, 8 Jan 2026 10:56:51 -0500 Subject: [PATCH 1233/1718] clarify genfromtxt behavior when dtype=None As copilot pointed out, calling genfromtxt when dtype=None does not actually necessarily produce a structured array; if all columns have the same dtype then it will produce a 1D or 2D non-structured array just like if dtype is primitive. --- numpy/lib/_npyio_impl.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index d68b0e538236..3a5e6d903ff7 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1759,9 +1759,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, dtype : dtype, optional Data type of the resulting array. If a structured dtype, the output array will be 1D and structured where - each field corresponds to one column. If None, the output array will be - structured, and the dtype of each field will be determined by the - contents of each column, individually. + each field corresponds to one column. + If None, the dtype of each column will be inferred automatically, and + the output array will be structured only if either the dtypes are not + all the same or if `names` is not None. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded. From 1dd1b4455bec15e5c43976f9c6c5c4423e567a43 Mon Sep 17 00:00:00 2001 From: Justin Kunimune Date: Thu, 8 Jan 2026 10:59:57 -0500 Subject: [PATCH 1234/1718] fix grammar it's unclear to me if this actually matters since the hyphen was present in the previous version, but changing it seems easier than arguing with Copilot. --- numpy/lib/_npyio_impl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 3a5e6d903ff7..d525f5ecbbd4 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1795,7 +1795,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, names are read from the first line after the first `skip_header` lines. This line can optionally be preceded by a comment delimiter. Any content before the comment delimiter is discarded. - If `names` is a sequence or a single-string of comma-separated names, + If `names` is a sequence or a single string of comma-separated names, the output is a structured array whose field names are taken from `names`. If `names` is None, the output is structured only if `dtype` is From e111fb7c550fa396f24354cdf9d06144948c49af Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Thu, 8 Jan 2026 22:50:22 +0530 Subject: [PATCH 1235/1718] fix:recarray, masked_array and datetime break fill value calculation --- numpy/ma/core.py | 11 ++++++++--- numpy/ma/tests/test_core.py | 19 +++++++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 53d29fc9b22c..896f312a24f9 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -20,7 +20,7 @@ """ import builtins -import datetime +import datetime as dt import functools import inspect import operator @@ -242,11 +242,16 @@ def _recursive_fill_value(dtype, f): field_dtype = dtype[name] val = _recursive_fill_value(field_dtype, f) if np.issubdtype(field_dtype, np.datetime64): - if isinstance(val, (datetime.date, datetime.datetime)): + if isinstance(val, dt.date): val = np.datetime64(val) + val = np.array(val) elif isinstance(val, (int, np.integer)): val = np.array(val).astype(field_dtype) - vals.append(np.array(val)) + else: + val = np.array(val) + else: + val = np.array(val) + vals.append(val) return np.array(tuple(vals), dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 4a903df0cfbe..514cff1c4ec0 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2278,6 +2278,23 @@ def test_fill_value_datetime_structured(self): res = np.ma.minimum_fill_value(ma) assert isinstance(res['foo'], np.datetime64) + def test_fill_value_datetime_structured_datetime(self): + # gh-29818 + rec = np.array([(dt.datetime(2025, 4, 1, 12, 0),)], + dtype=[('foo', ' Date: Thu, 8 Jan 2026 23:12:26 +0530 Subject: [PATCH 1236/1718] fix:recarray, masked_array and datetime break fill value calculation --- numpy/ma/tests/test_core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 514cff1c4ec0..5eb925d951b4 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2280,7 +2280,7 @@ def test_fill_value_datetime_structured(self): def test_fill_value_datetime_structured_datetime(self): # gh-29818 - rec = np.array([(dt.datetime(2025, 4, 1, 12, 0),)], + rec = np.array([(dt.datetime(2025, 4, 1, 12, 0),)], dtype=[('foo', ' Date: Thu, 8 Jan 2026 21:27:55 +0100 Subject: [PATCH 1237/1718] TST: remove broken & outdated deprecation tests --- numpy/_core/tests/test_deprecations.py | 45 -------------------------- 1 file changed, 45 deletions(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2853a5052b59..1710492e233a 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -145,51 +145,6 @@ def test_bincount_bad_list(self, badlist): self.assert_deprecated(lambda: np.bincount(badlist)) -class BuiltInRoundComplexDType(_DeprecationTestCase): - # 2020-03-31 1.19.0 - deprecated_types = [np.csingle, np.cdouble, np.clongdouble] - not_deprecated_types = [ - np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64, - np.float16, np.float32, np.float64, - ] - - def test_deprecated(self): - for scalar_type in self.deprecated_types: - scalar = scalar_type(0) - self.assert_deprecated(round, args=(scalar,)) - self.assert_deprecated(round, args=(scalar, 0)) - self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - def test_not_deprecated(self): - for scalar_type in self.not_deprecated_types: - scalar = scalar_type(0) - self.assert_not_deprecated(round, args=(scalar,)) - self.assert_not_deprecated(round, args=(scalar, 0)) - self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) - - -class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): - # NumPy 1.20, 2020-09-03 - message = "concatenate with `axis=None` will use same-kind casting" - - def test_deprecated(self): - self.assert_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) - - def test_not_deprecated(self): - self.assert_not_deprecated(np.concatenate, - args=(([0.], [1.]),), - kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), - 'casting': "unsafe"}) - - with assert_raises(TypeError): - # Tests should notice if the deprecation warning is given first... - np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), - casting="same_kind") - - class TestCtypesGetter(_DeprecationTestCase): ctypes = np.array([1]).ctypes From 267bb1af77941e0979588ead2f73c3b60a628ebc Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 21:50:15 +0100 Subject: [PATCH 1238/1718] DEP: remove ``lib.math`` (deprecated since 1.25) --- doc/release/upcoming_changes/30612.expired.rst | 1 + numpy/_core/tests/test_deprecations.py | 5 ----- numpy/lib/__init__.py | 9 +-------- numpy/tests/test_public_api.py | 1 - 4 files changed, 2 insertions(+), 14 deletions(-) create mode 100644 doc/release/upcoming_changes/30612.expired.rst diff --git a/doc/release/upcoming_changes/30612.expired.rst b/doc/release/upcoming_changes/30612.expired.rst new file mode 100644 index 000000000000..1e29d3c96d53 --- /dev/null +++ b/doc/release/upcoming_changes/30612.expired.rst @@ -0,0 +1 @@ +* The ``numpy.lib.math`` alias for the standard library ``math`` module has been removed (deprecated since 1.25). diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2853a5052b59..7a36f3877876 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -260,11 +260,6 @@ def test_attributeerror_includes_info(self, name): getattr(np, name) -class TestMathAlias(_DeprecationTestCase): - def test_deprecated_np_lib_math(self): - self.assert_deprecated(lambda: np.lib.math) - - class TestDeprecatedDTypeAliases(_DeprecationTestCase): def _check_for_warning(self, func): diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index a248d048f0ec..e14827b5de37 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -60,16 +60,9 @@ def __getattr__(attr): # Warn for deprecated/removed aliases - import math import warnings - if attr == "math": - warnings.warn( - "`np.lib.math` is a deprecated alias for the standard library " - "`math` module (Deprecated Numpy 1.25). Replace usages of " - "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) - return math - elif attr == "emath": + if attr == "emath": raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 3ccba81ebaff..5565fad6cb7f 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -226,7 +226,6 @@ def test_all_modules_are_expected(): # Stuff that clearly shouldn't be in the API and is detected by the next test # below SKIP_LIST_2 = [ - 'numpy.lib.math', 'numpy.matlib.char', 'numpy.matlib.rec', 'numpy.matlib.emath', From e4abcad7811614c6f336fcba84bb481ec0faa7ef Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 22:21:02 +0100 Subject: [PATCH 1239/1718] DEP: remove ``'a'`` dtype alias (deprecated since 2.0) --- .../upcoming_changes/30613.expired.rst | 1 + numpy/__init__.pyi | 1 - numpy/_core/_type_aliases.py | 1 - numpy/_core/include/numpy/ndarraytypes.h | 1 - numpy/_core/src/multiarray/conversion_utils.c | 14 ------------- numpy/_core/src/multiarray/convert_datatype.c | 1 - numpy/_core/src/multiarray/descriptor.c | 15 -------------- numpy/_core/tests/test_deprecations.py | 20 +++++-------------- numpy/conftest.py | 1 - 9 files changed, 6 insertions(+), 49 deletions(-) create mode 100644 doc/release/upcoming_changes/30613.expired.rst diff --git a/doc/release/upcoming_changes/30613.expired.rst b/doc/release/upcoming_changes/30613.expired.rst new file mode 100644 index 000000000000..89610f3577e6 --- /dev/null +++ b/doc/release/upcoming_changes/30613.expired.rst @@ -0,0 +1 @@ +* Data type alias ``'a'`` was removed in favor of ``'S'`` (deprecated since 2.0). diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 39d80c4f172c..210edd322ad8 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -823,7 +823,6 @@ type _DTypeChar = L[ "G", # clongdouble "O", # object "S", # bytes_ (S0) - "a", # bytes_ (deprecated) "U", # str_ "V", # void "M", # datetime64 diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 51c8e6ca2677..943955705083 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -72,7 +72,6 @@ "complex": "complex128", "object": "object_", "bytes": "bytes_", - "a": "bytes_", "int": "int_", "str": "str_", "unicode": "str_", diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index f740788f3720..6afdcf821a6b 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -127,7 +127,6 @@ enum NPY_TYPECHAR { NPY_CLONGDOUBLELTR = 'G', NPY_OBJECTLTR = 'O', NPY_STRINGLTR = 'S', - NPY_DEPRECATED_STRINGLTR2 = 'a', NPY_UNICODELTR = 'U', NPY_VOIDLTR = 'V', NPY_DATETIMELTR = 'M', diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 164aa2e4c8b4..41405beef9a8 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1332,20 +1332,6 @@ PyArray_TypestrConvert(int itemsize, int gentype) newtype = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - { - /* - * raise a deprecation warning, which might be an exception - * if warnings are errors, so leave newtype unset in that - * case - */ - int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead."); - if (ret == 0) { - newtype = NPY_STRING; - } - break; - } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index dbab8b4253d8..33162ac377e5 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -669,7 +669,6 @@ dtype_kind_to_ordering(char kind) return 5; /* String kind */ case 'S': - case 'a': return 6; /* Unicode kind */ case 'U': diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1fc5b76d1f00..f2fb313cf442 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1829,14 +1829,6 @@ _convert_from_str(PyObject *obj, int align) check_num = NPY_STRING; break; - case NPY_DEPRECATED_STRINGLTR2: - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - check_num = NPY_STRING; - break; - /* * When specifying length of UNICODE * the number of characters is given to match @@ -1907,13 +1899,6 @@ _convert_from_str(PyObject *obj, int align) goto fail; } - if (strcmp(type, "a") == 0) { - if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " - "Use the 'S' alias instead.") < 0) { - return NULL; - } - } - /* * Probably only ever dispatches to `_convert_from_type`, but who * knows what users are injecting into `np.typeDict`. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2853a5052b59..0d9338b47158 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -266,21 +266,11 @@ def test_deprecated_np_lib_math(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): - - def _check_for_warning(self, func): - with pytest.warns(DeprecationWarning, - match="alias 'a' was deprecated in NumPy 2.0") as w: - func() - assert len(w) == 1 - - def test_a_dtype_alias(self): - for dtype in ["a", "a10"]: - f = lambda: np.dtype(dtype) - self._check_for_warning(f) - self.assert_deprecated(f) - f = lambda: np.array(["hello", "world"]).astype("a10") - self._check_for_warning(f) - self.assert_deprecated(f) + @pytest.mark.parametrize("dtype_code", ["a", "a10"]) + def test_a_dtype_alias(self, dtype_code: str): + # Deprecated in 2.0, removed in 2.5, 2025-12 + with pytest.raises(TypeError): + np.dtype(dtype_code) class TestDeprecatedArrayWrap(_DeprecationTestCase): diff --git a/numpy/conftest.py b/numpy/conftest.py index c3c96ef3bc39..ad256b56200c 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -183,7 +183,6 @@ def warnings_errors_and_rng(test=None): "numpy.core", "Importing from numpy.matlib", "This function is deprecated.", # random_integers - "Data type alias 'a'", # numpy.rec.fromfile "Arrays of 2-dimensional vectors", # matlib.cross "NumPy warning suppression and assertion utilities are deprecated." ] From d4b645490c91a6ed2759ef1424f7025e5efa743b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 22:42:42 +0100 Subject: [PATCH 1240/1718] DEP: update doctests to use 'S5' instead of 'a5' --- numpy/_core/records.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/records.py b/numpy/_core/records.py index a022fde116cd..8ad0f1fb07a3 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -106,10 +106,10 @@ class format_parser: titles will simply not appear. If `names` is empty, default field names will be used. - >>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + >>> np.rec.format_parser(['f8', 'i4', 'S5'], ['col1', 'col2', 'col3'], ... []).dtype dtype([('col1', '>> np.rec.format_parser(['>> np.rec.format_parser(['>> from tempfile import TemporaryFile - >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a = np.empty(10,dtype='f8,i4,S5') >>> a[5] = (0.5,10,'abcde') >>> >>> fd=TemporaryFile() @@ -869,7 +869,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> a.tofile(fd) >>> >>> _ = fd.seek(0) - >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + >>> r=np.rec.fromfile(fd, formats='f8,i4,S5', shape=10, ... byteorder='<') >>> print(r[5]) (0.5, 10, b'abcde') From b7a31aeaa53444385a063f65d02fb952a50596d7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 23:10:31 +0100 Subject: [PATCH 1241/1718] DEP: remove ``_add_newdoc_ufunc`` (deprecated since 2.2) --- .../upcoming_changes/30614.expired.rst | 1 + numpy/_core/_add_newdocs.py | 26 ---------- numpy/_core/src/common/umathmodule.h | 1 - numpy/_core/src/multiarray/multiarraymodule.c | 2 - numpy/_core/src/umath/umathmodule.c | 47 ------------------- numpy/_core/tests/test_deprecations.py | 20 -------- numpy/_core/tests/test_umath.py | 10 ---- numpy/_core/umath.py | 1 - 8 files changed, 1 insertion(+), 107 deletions(-) create mode 100644 doc/release/upcoming_changes/30614.expired.rst diff --git a/doc/release/upcoming_changes/30614.expired.rst b/doc/release/upcoming_changes/30614.expired.rst new file mode 100644 index 000000000000..e0d95d2a75fc --- /dev/null +++ b/doc/release/upcoming_changes/30614.expired.rst @@ -0,0 +1 @@ +* ``_add_newdoc_ufunc(ufunc, newdoc)`` has been removed in favor of ``ufunc.__doc__ = newdoc`` (deprecated in 2.2) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 0e3cee869fc9..9a0534cdf2fc 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4954,32 +4954,6 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: raise a TypeError """) -add_newdoc('numpy._core.umath', '_add_newdoc_ufunc', - """ - add_ufunc_docstring(ufunc, new_docstring) - - Replace the docstring for a ufunc with new_docstring. - This method will only work if the current docstring for - the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) - - Parameters - ---------- - ufunc : numpy.ufunc - A ufunc whose current doc is NULL. - new_docstring : string - The new docstring for the ufunc. - - Notes - ----- - This method allocates memory for new_docstring on - the heap. Technically this creates a memory leak, since this - memory will not be reclaimed until the end of the program - even if the ufunc itself is removed. However this will only - be a problem if the user is repeatedly creating ufuncs with - no documentation, adding documentation via add_newdoc_ufunc, - and then throwing away the ufunc. - """) - add_newdoc('numpy._core.multiarray', 'get_handler_name', """ get_handler_name(a: ndarray) -> str | None diff --git a/numpy/_core/src/common/umathmodule.h b/numpy/_core/src/common/umathmodule.h index 73d853341cda..9fc693685e70 100644 --- a/numpy/_core/src/common/umathmodule.h +++ b/numpy/_core/src/common/umathmodule.h @@ -9,7 +9,6 @@ NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)); -PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args); PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e775760617e5..2c45508e9e66 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4698,8 +4698,6 @@ static struct PyMethodDef array_module_methods[] = { {"_set_numpy_warn_if_no_mem_policy", (PyCFunction)_set_numpy_warn_if_no_mem_policy, METH_O, "Change the warn if no mem policy flag for testing."}, - {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc, - METH_VARARGS, NULL}, {"_get_sfloat_dtype", get_sfloat_dtype, METH_NOARGS, NULL}, {"_get_madvise_hugepage", (PyCFunction)_get_madvise_hugepage, diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 3efb02bd4a49..eac1283b95ff 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -163,53 +163,6 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { return (PyObject *)self; } -/* docstring in numpy.add_newdocs.py */ -PyObject * -add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - - /* 2024-11-12, NumPy 2.2 */ - if (DEPRECATE("_add_newdoc_ufunc is deprecated. " - "Use `ufunc.__doc__ = newdoc` instead.") < 0) { - return NULL; - } - - PyUFuncObject *ufunc; - PyObject *str; - if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc, - &PyUnicode_Type, &str)) { - return NULL; - } - if (ufunc->doc != NULL) { - PyErr_SetString(PyExc_ValueError, - "Cannot change docstring of ufunc with non-NULL docstring"); - return NULL; - } - - PyObject *tmp = PyUnicode_AsUTF8String(str); - if (tmp == NULL) { - return NULL; - } - char *docstr = PyBytes_AS_STRING(tmp); - - /* - * This introduces a memory leak, as the memory allocated for the doc - * will not be freed even if the ufunc itself is deleted. In practice - * this should not be a problem since the user would have to - * repeatedly create, document, and throw away ufuncs. - */ - char *newdocstr = malloc(strlen(docstr) + 1); - if (!newdocstr) { - Py_DECREF(tmp); - return PyErr_NoMemory(); - } - strcpy(newdocstr, docstr); - ufunc->doc = newdocstr; - - Py_DECREF(tmp); - Py_RETURN_NONE; -} - /* ***************************************************************************** diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 2853a5052b59..29d31e5d75cc 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -326,26 +326,6 @@ def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) -class TestAddNewdocUFunc(_DeprecationTestCase): - # Deprecated in Numpy 2.2, 2024-11 - @pytest.mark.thread_unsafe( - reason="modifies and checks docstring which is global state" - ) - def test_deprecated(self): - doc = struct_ufunc.add_triplet.__doc__ - # gh-26718 - # This test mutates the C-level docstring pointer for add_triplet, - # which is permanent once set. Skip when re-running tests. - if doc is not None and "new docs" in doc: - pytest.skip("Cannot retest deprecation, otherwise ValueError: " - "Cannot change docstring of ufunc with non-NULL docstring") - self.assert_deprecated( - lambda: np._core.umath._add_newdoc_ufunc( - struct_ufunc.add_triplet, "new docs" - ) - ) - - class TestDTypeAlignBool(_VisibleDeprecationTestCase): # Deprecated in Numpy 2.4, 2025-07 # NOTE: As you can see, finalizing this deprecation breaks some (very) old diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 2407a0267ed6..f7a4dae20bf5 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4942,16 +4942,6 @@ def func(): ncu.add_docstring(func, "different docstring") -class TestAdd_newdoc_ufunc: - @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") - def test_ufunc_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") - assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") - - @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") - def test_string_arg(self): - assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) - class TestHypotErrorMessages: def test_hypot_error_message_single_arg(self): with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index cc4b8a1238f0..0f37d41749df 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -17,7 +17,6 @@ # These imports are needed for the strip & replace implementations from ._multiarray_umath import ( _UFUNC_API, - _add_newdoc_ufunc, _center, _expandtabs, _expandtabs_length, From 4438d2b59deec6a412e9b161321bfdd77e0b1eab Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 23:13:50 +0100 Subject: [PATCH 1242/1718] STY: remove unused import --- numpy/_core/tests/test_deprecations.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 29d31e5d75cc..e37a45eeb92e 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -9,7 +9,6 @@ import pytest import numpy as np -import numpy._core._struct_ufunc_tests as struct_ufunc from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 from numpy.testing import assert_raises From eb9fbb162a43c48188e61ac40c81d519410efd42 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Fri, 9 Jan 2026 09:50:06 +0530 Subject: [PATCH 1243/1718] fix:recarray, masked_array and datetime break fill value calculation --- numpy/ma/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 896f312a24f9..49ee486907cc 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -236,7 +236,6 @@ def _recursive_fill_value(dtype, f): # We wrap into `array` here, which ensures we use NumPy cast rules # for integer casts, this allows the use of 99999 as a fill value # for int8. - # TODO: This is probably a mess, but should best preserve behavior? vals = [] for name in dtype.names: field_dtype = dtype[name] From 55b8a77cb3c709c66f83146ca0049edc4a678a07 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 20:54:22 +0100 Subject: [PATCH 1244/1718] DEP: ``bincount``: disallow non-integer input --- doc/release/upcoming_changes/30610.expired.rst | 1 + numpy/_core/src/multiarray/compiled_base.c | 15 +-------------- numpy/_core/tests/test_deprecations.py | 8 -------- numpy/lib/tests/test_function_base.py | 5 +++++ 4 files changed, 7 insertions(+), 22 deletions(-) create mode 100644 doc/release/upcoming_changes/30610.expired.rst diff --git a/doc/release/upcoming_changes/30610.expired.rst b/doc/release/upcoming_changes/30610.expired.rst new file mode 100644 index 000000000000..eb806c954b16 --- /dev/null +++ b/doc/release/upcoming_changes/30610.expired.rst @@ -0,0 +1 @@ +* ``bincount`` now raises a ``TypeError`` for non-integer inputs (deprecated since 2.1). diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index e6a45554555f..23e922d470d0 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -158,20 +158,7 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, lst = (PyArrayObject *)PyArray_FromAny((PyObject *)tmp1, local_dtype, 1, 1, flags, NULL); Py_DECREF(tmp1); if (lst == NULL) { - /* Failed converting to NPY_INTP. */ - if (PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - /* Deprecated 2024-08-02, NumPy 2.1 */ - if (DEPRECATE("Non-integer input passed to bincount. In a " - "future version of NumPy, this will be an " - "error. (Deprecated NumPy 2.1)") < 0) { - goto fail; - } - } - else { - /* Failure was not a TypeError. */ - goto fail; - } + goto fail; } } else { diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index ce9edad6ddb0..3fad93a8f3b1 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -137,14 +137,6 @@ def foo(): test_case_instance.assert_deprecated(foo) -class TestBincount(_DeprecationTestCase): - # 2024-07-29, 2.1.0 - @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], - ['0', '1', '1']]) - def test_bincount_bad_list(self, badlist): - self.assert_deprecated(lambda: np.bincount(badlist)) - - class TestCtypesGetter(_DeprecationTestCase): ctypes = np.array([1]).ctypes diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f586813208e9..200aa7e02f00 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3065,6 +3065,11 @@ def test_error_not_1d(self, vals): with assert_raises(ValueError): np.bincount(vals) + @pytest.mark.parametrize("vals", [[1.0], [1j], ["1"], [b"1"]]) + def test_error_not_int(self, vals): + with assert_raises(TypeError): + np.bincount(vals) + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) def test_gh_28354(self, dt): a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) From 1d5eb192cf6d9709153320dfc54e46648e0c75c3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 20:55:40 +0100 Subject: [PATCH 1245/1718] TYP: ``bincount`` only accept int array-likes --- numpy/_core/multiarray.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 8e41ac23579b..cc2ff92ead16 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -709,7 +709,7 @@ def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... # -def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> _Array1D[intp]: ... +def bincount(x: _ArrayLikeInt_co, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> _Array1D[intp]: ... # def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... From 5c8b029343146113b7ecfe022f4d954c19ff1ae3 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Fri, 9 Jan 2026 16:55:06 +0530 Subject: [PATCH 1246/1718] Merge pull request #30609 from kumaraditya303/fast-ci CI: build numpy with 2 jobs for faster sanitizers CI --- .github/workflows/compiler_sanitizers.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 9db9313c34e3..962646122e17 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -69,7 +69,7 @@ jobs: pip uninstall -y pytest-xdist - name: Build run: - python -m spin build -- -Db_sanitize=address,undefined -Db_lundef=false + python -m spin build -j2 -- -Db_sanitize=address,undefined -Db_lundef=false - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them @@ -99,7 +99,7 @@ jobs: run: pip install -U spin - name: Build NumPy with ThreadSanitizer - run: python -m spin build -- -Db_sanitize=thread + run: python -m spin build -j2 -- -Db_sanitize=thread - name: Run tests under prebuilt TSAN container run: | From 4b70393e294d9d5c2b1b5b69d6a5b806c12b310c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 9 Jan 2026 14:56:16 +0100 Subject: [PATCH 1247/1718] CI/TYP: use basedpyright for type-completeness checking --- .github/workflows/mypy.yml | 10 +++++++--- requirements/test_requirements.txt | 1 - tools/pyright_completeness.py | 6 +++++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index da311699482d..1e37b0e777ab 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -76,12 +76,16 @@ jobs: -r requirements/build_requirements.txt -r requirements/test_requirements.txt orjson + basedpyright - name: Build run: | spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv - name: Run Mypy run: | spin mypy - - name: Check Pyright's type completeness is at least 100% - run: | - spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal --exclude-like '*.tests.*' '*.conftest.*' + - name: Check basedpyright's type completeness is at least 100% + run: >- + spin run python tools/pyright_completeness.py + --verifytypes numpy + --ignoreexternal + --exclude-like '*.tests.*' '*.conftest.*' diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 784deac871b6..e3b17f0fc856 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -8,7 +8,6 @@ pytest-xdist pytest-timeout # For testing types mypy==1.19.1 -pyright # for optional f2py encoding detection charset-normalizer tzdata diff --git a/tools/pyright_completeness.py b/tools/pyright_completeness.py index b861328c8657..f1c52913a9c5 100644 --- a/tools/pyright_completeness.py +++ b/tools/pyright_completeness.py @@ -2,6 +2,8 @@ Run PyRight's `--verifytypes` and check that its reported type completeness is above a minimum threshold. +Requires `basedpyright` to be installed in the environment. + Example usage: spin run python tools/pyright_completeness.py --verifytypes numpy --ignoreexternal \ @@ -40,7 +42,9 @@ def run_pyright_with_coverage( exclude_like: Sequence[str], ) -> int: result = subprocess.run( - ["pyright", *pyright_args], capture_output=True, text=True + ["basedpyright", *pyright_args], + capture_output=True, + text=True, ) try: From f254f1251f85de2a6d13c2432222e4fa8292fbb2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 9 Jan 2026 07:18:12 -0700 Subject: [PATCH 1248/1718] ENH: use more fine-grained critical sections in array coercion internals (#30514) --- numpy/_core/src/common/npy_pycompat.h | 2 +- numpy/_core/src/multiarray/array_coercion.c | 22 +++++-- numpy/_core/src/multiarray/ctors.c | 73 +++++++++++++-------- numpy/_core/src/multiarray/iterators.c | 4 +- numpy/_core/tests/test_multithreading.py | 58 +++++++++++----- 5 files changed, 108 insertions(+), 51 deletions(-) diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index 605833a511b7..52d44b17283a 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -34,7 +34,7 @@ } \ } #else -#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { +#define NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) { do { (void)(original); } while (0) #define NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() } #endif diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 2de639611bf6..3c5373e7f28e 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1159,6 +1159,10 @@ PyArray_DiscoverDTypeAndShape_Recursive( return -1; } + int ret = -1; + + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(obj); + npy_intp size = PySequence_Fast_GET_SIZE(seq); PyObject **objects = PySequence_Fast_ITEMS(seq); @@ -1166,17 +1170,19 @@ PyArray_DiscoverDTypeAndShape_Recursive( out_shape, 1, &size, NPY_TRUE, flags) < 0) { /* But do update, if there this is a ragged case */ *flags |= FOUND_RAGGED_ARRAY; - return max_dims; + ret = max_dims; + goto finish; } if (size == 0) { /* If the sequence is empty, this must be the last dimension */ *flags |= MAX_DIMS_WAS_REACHED; - return curr_dims + 1; + ret = curr_dims + 1; + goto finish; } /* Allow keyboard interrupts. See gh issue 18117. */ if (PyErr_CheckSignals() < 0) { - return -1; + goto finish; } /* @@ -1196,10 +1202,16 @@ PyArray_DiscoverDTypeAndShape_Recursive( flags, copy); if (max_dims < 0) { - return -1; + goto finish; } } - return max_dims; + ret = max_dims; + + finish:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + + return ret; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 3a43e1bd983b..91a3db2d6e5f 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -493,33 +493,49 @@ NPY_NO_EXPORT int PyArray_AssignFromCache_Recursive( PyArrayObject *self, const int ndim, coercion_cache_obj **cache) { + int ret = -1; /* Consume first cache element by extracting information and freeing it */ PyObject *obj = (*cache)->arr_or_sequence; Py_INCREF(obj); - npy_bool sequence = (*cache)->sequence; + npy_bool is_sequence = (*cache)->sequence; + /* + If it is a sequence, this object is the argument to PySequence_Fast, e.g. + the iterable that the user wants to coerce into an array + */ + PyObject *orig_seq = (*cache)->converted_obj; + /* Owned reference to an item in the sequence */ + PyObject *item_pyvalue = NULL; int depth = (*cache)->depth; *cache = npy_unlink_coercion_cache(*cache); - /* The element is either a sequence, or an array */ - if (!sequence) { + /* The element is either a sequence or an array */ + if (!is_sequence) { /* Straight forward array assignment */ assert(PyArray_Check(obj)); if (PyArray_CopyInto(self, (PyArrayObject *)obj) < 0) { - goto fail; + goto finish; } } else { assert(depth != ndim); - npy_intp length = PySequence_Length(obj); - if (length != PyArray_DIMS(self)[0]) { - PyErr_SetString(PyExc_RuntimeError, - "Inconsistent object during array creation? " - "Content of sequences changed (length inconsistent)."); - goto fail; - } - - for (npy_intp i = 0; i < length; i++) { - PyObject *value = PySequence_Fast_GET_ITEM(obj, i); + npy_intp orig_length = PyArray_DIMS(self)[0]; + int err = 1; + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(orig_seq); + for (npy_intp i = 0; i < orig_length; i++) { + // this macro takes *the argument* of PySequence_Fast, which is orig_seq; + // not the object returned by PySequence_Fast, which is a proxy object + // with its own per-object PyMutex lock. + // We want to lock the list object exposed to users, not the proxy. + npy_intp length = PySequence_Fast_GET_SIZE(obj); + if (length != orig_length) { + PyErr_SetString(PyExc_RuntimeError, + "Inconsistent object during array creation? " + "Content of sequences changed (length inconsistent)."); + goto finish_critical_section; + } + else { + Py_XSETREF(item_pyvalue, Py_NewRef(PySequence_Fast_GET_ITEM(obj, i))); + } if (ndim == depth + 1) { /* @@ -532,11 +548,11 @@ PyArray_AssignFromCache_Recursive( */ char *item; item = (PyArray_BYTES(self) + i * PyArray_STRIDES(self)[0]); - if (PyArray_Pack(PyArray_DESCR(self), item, value) < 0) { - goto fail; + if (PyArray_Pack(PyArray_DESCR(self), item, item_pyvalue) < 0) { + goto finish_critical_section; } /* If this was an array(-like) we still need to unlike int: */ - if (*cache != NULL && (*cache)->converted_obj == value) { + if (*cache != NULL && (*cache)->converted_obj == item_pyvalue) { *cache = npy_unlink_coercion_cache(*cache); } } @@ -544,22 +560,30 @@ PyArray_AssignFromCache_Recursive( PyArrayObject *view; view = (PyArrayObject *)array_item_asarray(self, i); if (view == NULL) { - goto fail; + goto finish_critical_section; } if (PyArray_AssignFromCache_Recursive(view, ndim, cache) < 0) { Py_DECREF(view); - goto fail; + goto finish_critical_section; } Py_DECREF(view); } } + err = 0; + finish_critical_section:; + + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); + if (err) { + goto finish; + } + } - Py_DECREF(obj); - return 0; + ret = 0; - fail: + finish:; + Py_XDECREF(item_pyvalue); Py_DECREF(obj); - return -1; + return ret; } @@ -1571,8 +1595,6 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, copy = 1; } - Py_BEGIN_CRITICAL_SECTION(op); - ndim = PyArray_DiscoverDTypeAndShape( op, max_depth, dims, &cache, in_DType, in_descr, &dtype, copy, &was_copied_by__array__); @@ -1741,7 +1763,6 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, cleanup:; Py_XDECREF(dtype); - Py_END_CRITICAL_SECTION(); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index bf77abcb547e..ae4797f59a86 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -1354,7 +1354,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, if (fast_seq == NULL) { return NULL; } - NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args) + NPY_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(args); n = PySequence_Fast_GET_SIZE(fast_seq); if (n > NPY_MAXARGS) { ret = multiiter_wrong_number_of_args(); @@ -1362,7 +1362,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq)); } Py_DECREF(fast_seq); - NPY_END_CRITICAL_SECTION_SEQUENCE_FAST() + NPY_END_CRITICAL_SECTION_SEQUENCE_FAST(); return ret; } diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 44b2c34cd68b..83484fb4c131 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -308,27 +308,33 @@ def func(index): # These are all implemented using PySequence_Fast, which needs locking to be safe def np_broadcast(arrs): - for i in range(100): + for i in range(50): np.broadcast(arrs) def create_array(arrs): - for i in range(100): + for i in range(50): np.array(arrs) def create_nditer(arrs): - for i in range(1000): + for i in range(50): np.nditer(arrs) -@pytest.mark.parametrize("kernel", (np_broadcast, create_array, create_nditer)) -def test_arg_locking(kernel): - # should complete without failing or generating an error about an array size - # changing - b = threading.Barrier(5) +@pytest.mark.parametrize( + "kernel, outcome", + ( + (np_broadcast, "error"), + (create_array, "error"), + (create_nditer, "success"), + ), +) +def test_arg_locking(kernel, outcome): + # should complete without triggering races but may error + done = 0 - arrs = [] + arrs = [np.array([1, 2, 3]) for _ in range(1000)] - def read_arrs(): + def read_arrs(b): nonlocal done b.wait() try: @@ -336,7 +342,7 @@ def read_arrs(): finally: done += 1 - def mutate_list(): + def contract_and_expand_list(b): b.wait() while done < 4: if len(arrs) > 10: @@ -344,10 +350,28 @@ def mutate_list(): elif len(arrs) <= 10: arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) - arrs = [np.array([1, 2, 3]) for _ in range(1000)] - - tasks = [threading.Thread(target=read_arrs) for _ in range(4)] - tasks.append(threading.Thread(target=mutate_list)) + def replace_list_items(b): + b.wait() + rng = np.random.RandomState() + rng.seed(0x4d3d3d3) + while done < 4: + data = rng.randint(0, 1000, size=4) + arrs[data[0]] = data[1:] - [t.start() for t in tasks] - [t.join() for t in tasks] + for mutation_func in (replace_list_items, contract_and_expand_list): + b = threading.Barrier(5) + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as tpe: + tasks = [tpe.submit(read_arrs, b) for _ in range(4)] + tasks.append(tpe.submit(mutation_func, b)) + for t in tasks: + t.result() + except RuntimeError as e: + if outcome == "success": + raise + assert "Inconsistent object during array creation?" in str(e) + msg = "replace_list_items should not raise errors" + assert mutation_func is contract_and_expand_list, msg + finally: + if len(tasks) < 5: + b.abort() From 1f355fcc38642d69ac5b8e722615aaafd2e2c723 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 10 Jan 2026 19:33:43 +0100 Subject: [PATCH 1249/1718] TYP: ``arange``: accept datetime strings --- numpy/_core/multiarray.pyi | 16 ++++++++++-- numpy/ma/core.pyi | 17 ++++++++++-- .../tests/data/reveal/array_constructors.pyi | 26 ++++++++++--------- 3 files changed, 43 insertions(+), 16 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index cc2ff92ead16..35111cf42225 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -67,6 +67,7 @@ from numpy._typing import ( _ArrayLikeStr_co, _ArrayLikeTD64_co, _ArrayLikeUInt_co, + _DT64Codes, _DTypeLike, _FloatLike_co, _IntLike_co, @@ -1117,11 +1118,22 @@ def arange( device: L["cpu"] | None = None, like: _SupportsArrayFunc | None = None, ) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... @overload # dtype= def arange( - start_or_stop: _ArangeScalar | float, + start_or_stop: _ArangeScalar | float | str, /, - stop: _ArangeScalar | float | None = None, + stop: _ArangeScalar | float | str | None = None, step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 10234e51ea2b..4dd3bbfbb098 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -89,6 +89,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeUInt_co, _CharLike_co, + _DT64Codes, _DTypeLike, _DTypeLikeBool, _DTypeLikeVoid, @@ -4057,11 +4058,23 @@ def arange( fill_value: _FillValue | None = None, hardmask: bool = False, ) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... @overload # dtype= def arange( - start_or_stop: _ArangeScalar | float, + start_or_stop: _ArangeScalar | float | str, /, - stop: _ArangeScalar | float | None = None, + stop: _ArangeScalar | float | str | None = None, step: _ArangeScalar | float | None = 1, *, dtype: DTypeLike | None = None, diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index d17793044d37..ffe0834e3309 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -120,18 +120,20 @@ _x_float: float _x_timedelta: np.timedelta64[int] _x_datetime: np.datetime64[int] -assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) -assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) -assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64]]) -assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) -assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) -assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) -assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(False, True), _Array1D[np.int_]) +assert_type(np.arange(10), _Array1D[np.int_]) +assert_type(np.arange(0, 10, step=2), _Array1D[np.int_]) +assert_type(np.arange(10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(0, stop=10.0), _Array1D[np.float64 | Any]) +assert_type(np.arange(_x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(0, _x_timedelta), _Array1D[np.timedelta64]) +assert_type(np.arange(_x_datetime, _x_datetime), _Array1D[np.datetime64]) +assert_type(np.arange(10, dtype=np.float64), _Array1D[np.float64]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), _Array1D[np.int16]) +assert_type(np.arange(10, dtype=int), _Array1D[np.int_]) +assert_type(np.arange(0, 10, dtype="f8"), _Array1D[Any]) +# https://github.com/numpy/numpy/issues/30628 +assert_type(np.arange("2025-12-20", "2025-12-23", dtype="datetime64[D]"), _Array1D[np.datetime64]) assert_type(np.require(A), npt.NDArray[np.float64]) assert_type(np.require(B), SubClass[np.float64]) From 77ea81df4cfbfdcbe6846a07fdced220141c59af Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 10 Jan 2026 11:45:13 -0700 Subject: [PATCH 1250/1718] MAINT: Update main after 2.4.0 release. - Forward port 2.4.1-notes.rst - Forward port 2.4.1-changelog.rst - Update release.rst - Update RELEASE_WALKTHROUGH.rst [skip cirrus] [skip actions] --- doc/RELEASE_WALKTHROUGH.rst | 10 +++--- doc/changelog/2.4.1-changelog.rst | 37 +++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.4.1-notes.rst | 52 ++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 4 deletions(-) create mode 100644 doc/changelog/2.4.1-changelog.rst create mode 100644 doc/source/release/2.4.1-notes.rst diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c8c1f129c0b2..9ed9a20df894 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -312,10 +312,12 @@ This assumes that you have forked ``_:: - For all releases, go to the bottom of the page and add a one line link. Look to the previous links for example. -- For the ``*.0`` release in a cycle, add a new section at the top with a short - description of the new features and point the news link to it. -- Edit the newsHeader and date fields at the top of news.md -- Also edit the buttonText on line 14 in content/en/config.yaml +- For the ``*.0`` release in a cycle: + + - Add a new section at the top with a short description of the new + features and point the news link to it. + - Edit the newsHeader and date fields at the top of news.md + - Also edit the buttonText on line 14 in content/en/config.yaml commit and push:: diff --git a/doc/changelog/2.4.1-changelog.rst b/doc/changelog/2.4.1-changelog.rst new file mode 100644 index 000000000000..3cf0d8ad0ec5 --- /dev/null +++ b/doc/changelog/2.4.1-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... diff --git a/doc/source/release.rst b/doc/source/release.rst index 5842fa9fc61a..36e01cac6e1b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.5.0 + 2.4.1 2.4.0 2.3.5 2.3.4 diff --git a/doc/source/release/2.4.1-notes.rst b/doc/source/release/2.4.1-notes.rst new file mode 100644 index 000000000000..c033a070bd73 --- /dev/null +++ b/doc/source/release/2.4.1-notes.rst @@ -0,0 +1,52 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.1 Release Notes +========================= + +The NumPy 2.4.1 is a patch release that fixes bugs discovered after the +2.4.0 release. In particular, the typo `SeedlessSequence` is preserved to +enable wheels using the random Cython API and built against NumPy < 2.4.0 +to run without errors. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alexander Shadchin +* Bill Tompkins + +* Charles Harris +* Joren Hammudoglu +* Marten van Kerkwijk +* Nathan Goldbaum +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 15 pull requests were merged for this release. + +* `#30490 `__: MAINT: Prepare 2.4.x for further development +* `#30503 `__: DOC: ``numpy.select``\ : fix ``default`` parameter docstring... +* `#30504 `__: REV: Revert part of #30164 (#30500) +* `#30506 `__: TYP: ``numpy.select``\ : allow passing array-like ``default``... +* `#30507 `__: MNT: use if constexpr for compile-time branch selection +* `#30513 `__: BUG: Fix leak in flat assignment iterator +* `#30516 `__: BUG: fix heap overflow in fixed-width string multiply (#30511) +* `#30523 `__: BUG: Ensure summed weights returned by np.average always are... +* `#30527 `__: TYP: Fix return type of histogram2d +* `#30594 `__: MAINT: avoid passing ints to random functions that take double... +* `#30595 `__: BLD: Avoiding conflict with pygit2 for static build +* `#30596 `__: MAINT: Fix msvccompiler missing error on FreeBSD +* `#30608 `__: BLD: update vendored Meson to 1.9.2 +* `#30620 `__: ENH: use more fine-grained critical sections in array coercion... +* `#30623 `__: BUG: Undo result type change of quantile/percentile but keep... + From c87b96a277bf2a20450f4a79653dfd2b8f5e35bf Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 10 Jan 2026 20:56:26 +0100 Subject: [PATCH 1251/1718] TYP: ``dtype``: unit-based ``datetime64`` specialization --- numpy/__init__.pyi | 48 ++++++++++++++++++++---- numpy/typing/tests/data/reveal/dtype.pyi | 17 +++++++++ 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 210edd322ad8..1fc0d320c249 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -97,7 +97,6 @@ from numpy._typing import ( # type: ignore[deprecated] _ULongLongCodes, _LongDoubleCodes, _CLongDoubleCodes, - _DT64Codes, _TD64Codes, _StrCodes, _BytesCodes, @@ -118,6 +117,12 @@ from numpy._typing import ( # type: ignore[deprecated] _UFunc_Nin2_Nout2, _GUFunc_Nin2_Nout1, ) +from numpy._typing._char_codes import ( + _DT64Codes_any, + _DT64Codes_date, + _DT64Codes_datetime, + _DT64Codes_int, +) # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( @@ -1393,26 +1398,55 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 metadata: dict[str, Any] = ..., ) -> dtype[clongdouble]: ... - # Miscellaneous string-based representations and ctypes - @overload + # datetime64 + @overload # datetime64[{Y,M,W,D}] def __new__( cls, - dtype: _TD64Codes, + dtype: _DT64Codes_date, align: py_bool = False, copy: py_bool = False, *, metadata: dict[str, Any] = ..., - ) -> dtype[timedelta64]: ... - @overload + ) -> dtype[datetime64[dt.date]]: ... + @overload # datetime64[{h,m,s,ms,us}] + def __new__( + cls, + dtype: _DT64Codes_datetime, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[datetime64[dt.datetime]]: ... + @overload # datetime64[{ns,ps,fs,as}] + def __new__( + cls, + dtype: _DT64Codes_int, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[datetime64[int]]: ... + @overload # datetime64[?] def __new__( cls, - dtype: _DT64Codes, + dtype: _DT64Codes_any, align: py_bool = False, copy: py_bool = False, *, metadata: dict[str, Any] = ..., ) -> dtype[datetime64]: ... + # timedelta64 + @overload # timedelta64[?] + def __new__( + cls, + dtype: _TD64Codes, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[timedelta64]: ... + # `StringDType` requires special treatment because it has no scalar type @overload def __new__( diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 48e9d54b7951..15ea47a6433b 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -68,6 +68,23 @@ assert_type(np.dtype("l"), np.dtype[np.int32 | np.int64]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) +# char-codes - datetime64 +assert_type(np.dtype("datetime64[Y]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[M]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[W]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[D]"), np.dtype[np.datetime64[dt.date]]) +assert_type(np.dtype("datetime64[h]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[m]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[s]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ms]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[us]"), np.dtype[np.datetime64[dt.datetime]]) +assert_type(np.dtype("datetime64[ns]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[ps]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[fs]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64[as]"), np.dtype[np.datetime64[int]]) +assert_type(np.dtype("datetime64"), np.dtype[np.datetime64]) +assert_type(np.dtype("M8"), np.dtype[np.datetime64]) +assert_type(np.dtype("M"), np.dtype[np.datetime64]) # ctypes assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 From 78215f912946c44c9bfb2ab1223befd5e997b919 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 10 Jan 2026 21:02:35 +0100 Subject: [PATCH 1252/1718] TYP: ``dtype``: unit-based ``timedelta64`` specialization --- numpy/__init__.pyi | 23 ++++++++++++++++++++++- numpy/typing/tests/data/reveal/dtype.pyi | 17 +++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1fc0d320c249..0ac83275f15e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -122,6 +122,9 @@ from numpy._typing._char_codes import ( _DT64Codes_date, _DT64Codes_datetime, _DT64Codes_int, + _TD64Codes_any, + _TD64Codes_int, + _TD64Codes_timedelta, ) # NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform @@ -1437,10 +1440,28 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 ) -> dtype[datetime64]: ... # timedelta64 + @overload # timedelta64[{W,D,h,m,s,ms,us}] + def __new__( + cls, + dtype: _TD64Codes_timedelta, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[timedelta64[dt.timedelta]]: ... + @overload # timedelta64[{Y,M,ns,ps,fs,as}] + def __new__( + cls, + dtype: _TD64Codes_int, + align: py_bool = False, + copy: py_bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[timedelta64[int]]: ... @overload # timedelta64[?] def __new__( cls, - dtype: _TD64Codes, + dtype: _TD64Codes_any, align: py_bool = False, copy: py_bool = False, *, diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 15ea47a6433b..c8c9e393f76e 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -85,6 +85,23 @@ assert_type(np.dtype("datetime64[as]"), np.dtype[np.datetime64[int]]) assert_type(np.dtype("datetime64"), np.dtype[np.datetime64]) assert_type(np.dtype("M8"), np.dtype[np.datetime64]) assert_type(np.dtype("M"), np.dtype[np.datetime64]) +# char-codes - timedelta64 +assert_type(np.dtype("timedelta64[Y]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[M]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[W]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[D]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[h]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[m]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[s]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ms]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[us]"), np.dtype[np.timedelta64[dt.timedelta]]) +assert_type(np.dtype("timedelta64[ns]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[ps]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[fs]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64[as]"), np.dtype[np.timedelta64[int]]) +assert_type(np.dtype("timedelta64"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m8"), np.dtype[np.timedelta64]) +assert_type(np.dtype("m"), np.dtype[np.timedelta64]) # ctypes assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 From e161c28abf292aed3624959826e455969a9d160d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 10 Jan 2026 22:57:44 +0100 Subject: [PATCH 1253/1718] TYP: remove unused imports in ``__init__.pyi`` --- numpy/__init__.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 210edd322ad8..983920b3b6d6 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -7,7 +7,7 @@ from abc import abstractmethod from builtins import bool as py_bool from decimal import Decimal from fractions import Fraction -from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from types import EllipsisType, ModuleType, MappingProxyType, GenericAlias from uuid import UUID from numpy.__config__ import show as show_config @@ -18,7 +18,6 @@ from numpy._typing import ( # type: ignore[deprecated] # Arrays ArrayLike, NDArray, - _SupportsArray, _NestedSequence, _ArrayLike, _ArrayLikeBool_co, From 53a2c42b0b3f51a72c702c7688ba7bbf5650ad3a Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 10 Jan 2026 19:22:04 -0800 Subject: [PATCH 1254/1718] DOC: Use "a np.type" instead of "an np.type" [skip azp][skip cirrus][skip actions] --- numpy/_core/src/multiarray/ctors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 0330d8c20619..91a3db2d6e5f 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1355,7 +1355,7 @@ _array_from_buffer_3118(PyObject *memoryview) PyExc_RuntimeError, "For the given ctypes object, neither the item size " "computed from the PEP 3118 buffer format nor from " - "converting the type to an np.dtype matched the actual " + "converting the type to a np.dtype matched the actual " "size. This is a bug both in python and numpy"); Py_DECREF(descr); return NULL; From cc03950b4d3cb191efd0ac91a48c08dd0d53cbc1 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 10 Jan 2026 19:23:14 -0800 Subject: [PATCH 1255/1718] DOC: Change back to "a np.type" [skip azp][skip cirrus][skip actions] --- numpy/lib/_iotools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 959a59e7480a..3586b41de86c 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -607,7 +607,7 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, self.default = default or False dtype = np.dtype('bool') else: - # Is the input an np.dtype ? + # Is the input a np.dtype ? try: self.func = None dtype = np.dtype(dtype_or_func) From 7ddffc98c221cb74854a8f2ed6881ab40434a5e8 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 10 Jan 2026 19:24:11 -0800 Subject: [PATCH 1256/1718] DOC: Use "a np.recarray" because np is pronounced as "numpy" [skip azp][skip cirrus][skip actions] --- numpy/lib/recfunctions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 6b044ca07d61..d466d5560a4c 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -1667,7 +1667,7 @@ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', defaults=None): """ Join arrays `r1` and `r2` on keys. - Alternative to join_by, that always returns an np.recarray. + Alternative to join_by, that always returns a np.recarray. See Also -------- From 1b785bfaf75128ad461303d7d9313f05412dce49 Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 10 Jan 2026 19:25:07 -0800 Subject: [PATCH 1257/1718] DOC: Use "a np.array" instead [skip azp][skip cirrus][skip actions] --- numpy/linalg/_linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index a702caebf55f..d11699bd1c5e 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -3004,7 +3004,7 @@ def _multi_dot_three(A, B, C, out=None): def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ - Return an np.array that encodes the optimal order of multiplications. + Return a np.array that encodes the optimal order of multiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. From 156724c4f23478139df7e195eb88708e4bf9adbb Mon Sep 17 00:00:00 2001 From: "Christine P. Chai" Date: Sat, 10 Jan 2026 19:25:48 -0800 Subject: [PATCH 1258/1718] DOC: Change back to "a np.void" [skip azp][skip cirrus][skip actions] --- numpy/ma/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 8fcd65af2e5c..196462720cd8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6564,7 +6564,7 @@ def __new__(cls, data, mask=nomask, dtype=None, fill_value=None, @property def _data(self): - # Make sure that the _data part is an np.void + # Make sure that the _data part is a np.void return super()._data[()] def __getitem__(self, indx): From 76e90f8e06a06908a83e8e61cea9c5afe3fe7650 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sun, 11 Jan 2026 19:38:40 +0100 Subject: [PATCH 1259/1718] MAINT: avoid possible race condition by not touching `os.environ` on import We need to set the `OPENBLAS_MAIN_FREE` environment variable before the OpenBLAS initialization runs, however we don't need to touch `os.environ` for that. Deleting a key from `os.environ` can lead to a race condition when numpy is imported in parallel in multiple threads, while `os.unsetenv` always succeeds. See gh-30627 for context. Also clean up `GOTOBLAS_MAIN_FREE`, that's a deprecated env var that has the same function as `OPENBLAS_MAIN_FREE`. There won't be any GotoBLAS or early OpenBLAS versions in use anymore that only have the GOTOBLAS flavor. Closes gh-30627 Closes gh-21223 --- numpy/_core/__init__.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 18b250f9972b..ede50aaeefc3 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -13,9 +13,11 @@ # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] -for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: +for envkey in ['OPENBLAS_MAIN_FREE']: if envkey not in os.environ: - os.environ[envkey] = '1' + # Note: using `putenv` (and `unsetenv` further down) instead of updating + # `os.environ` on purpose to avoid a race condition, see gh-30627. + os.putenv(envkey, '1') env_added.append(envkey) try: @@ -83,7 +85,7 @@ raise ImportError(msg) from exc finally: for envkey in env_added: - del os.environ[envkey] + os.unsetenv(envkey) del envkey del env_added del os From 73713be05817fa505cb11a2a66672fc1007f6b17 Mon Sep 17 00:00:00 2001 From: Aniket Singh Yadav Date: Mon, 12 Jan 2026 12:53:28 +0530 Subject: [PATCH 1260/1718] Update legacy pareto documentation to match Pareto II --- numpy/random/mtrand.pyx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index a74b4cf6941b..987d3edf159f 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -2421,11 +2421,14 @@ cdef class RandomState: Notes ----- - The probability density for the Pareto distribution is + The probability density for the Pareto II distribution is - .. math:: p(x) = \\frac{am^a}{x^{a+1}} + .. math:: p(x) = \\frac{a}{(x+1)^{a+1}} , x \ge 0 - where :math:`a` is the shape and :math:`m` the scale. + where :math:`a > 0` is the shape. + + The Pareto II distribution is a shifted and scaled version of the + Pareto I distribution, which can be found in `scipy.stats.pareto`. The Pareto distribution, named after the Italian economist Vilfredo Pareto, is a power law probability distribution From c25b7cf90ad60158cc850fd84e91bcacf660f3bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 21:53:38 +0000 Subject: [PATCH 1261/1718] MAINT: Bump github/codeql-action from 4.31.9 to 4.31.10 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.9 to 4.31.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/5d4e8d1aca955e8d8589aabd499c5cae939e33c7...cdefb33c0f6224e58673d9004f47f7cb3e328b89) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.10 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c7b935c56595..99c7afcabec7 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 + uses: github/codeql-action/init@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 + uses: github/codeql-action/autobuild@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9 + uses: github/codeql-action/analyze@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index a1e9a7aca6eb..02a4614c2177 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v2.1.27 + uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v2.1.27 with: sarif_file: results.sarif From 4420e94a8f60715f5ab2829f16c04b875143f15c Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 19:35:07 -0300 Subject: [PATCH 1262/1718] DEP: Deprecate numpy.fix in favor of numpy.trunc Deprecate `numpy.fix` as it provides identical functionality to `numpy.trunc` but is slower and is not part of the Array API standard. - Add DeprecationWarning to numpy.fix() - Update docstring with .. deprecated:: directive - Add tests to verify deprecation warning is emitted - Update existing tests to suppress deprecation warnings - Add release note about deprecation Closes #30096 --- .../upcoming_changes/30096.deprecation.rst | 6 ++ numpy/lib/_ufunclike_impl.py | 13 +++ numpy/lib/tests/test_ufunclike.py | 79 +++++++++++++------ 3 files changed, 73 insertions(+), 25 deletions(-) create mode 100644 doc/release/upcoming_changes/30096.deprecation.rst diff --git a/doc/release/upcoming_changes/30096.deprecation.rst b/doc/release/upcoming_changes/30096.deprecation.rst new file mode 100644 index 000000000000..41219eca7e94 --- /dev/null +++ b/doc/release/upcoming_changes/30096.deprecation.rst @@ -0,0 +1,6 @@ +``numpy.fix`` is deprecated +--------------------------- + +`numpy.fix` is deprecated. Use `numpy.trunc` instead, which is faster +and follows the Array API standard. Both functions provide identical +functionality: rounding array elements towards zero. diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 569840697d81..93771ecbabc3 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -5,6 +5,8 @@ """ __all__ = ['fix', 'isneginf', 'isposinf'] +import warnings + import numpy._core.numeric as nx from numpy._core.overrides import array_function_dispatch @@ -18,6 +20,10 @@ def fix(x, out=None): """ Round to nearest integer towards zero. + .. deprecated:: 2.3 + `numpy.fix` is deprecated. Use `numpy.trunc` instead, + which is faster and follows the Array API standard. + Round an array of floats element-wise to nearest integer towards zero. The rounded values have the same data-type as the input. @@ -56,6 +62,13 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ + # Deprecated in NumPy 2.3, 2025-01-12 + warnings.warn( + "numpy.fix is deprecated. Use numpy.trunc instead, " + "which is faster and follows the Array API standard.", + DeprecationWarning, + stacklevel=2, + ) return nx.trunc(x, out=out) diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index b4257ebf9191..2af24fc5eccd 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -1,3 +1,7 @@ +import warnings + +import pytest + import numpy as np from numpy import fix, isneginf, isposinf from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises @@ -40,12 +44,14 @@ def test_fix(self): out = np.zeros(a.shape, float) tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) - res = fix(a) - assert_equal(res, tgt) - res = fix(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - assert_equal(fix(3.14), 3) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) def test_fix_with_subclass(self): class MyArray(np.ndarray): @@ -67,17 +73,19 @@ def __array_finalize__(self, obj): a = np.array([1.1, -1.1]) m = MyArray(a, metadata='foo') - f = fix(m) - assert_array_equal(f, np.array([1, -1])) - assert_(isinstance(f, MyArray)) - assert_equal(f.metadata, 'foo') - - # check 0d arrays don't decay to scalars - m0d = m[0, ...] - m0d.metadata = 'bar' - f0d = fix(m0d) - assert_(isinstance(f0d, MyArray)) - assert_equal(f0d.metadata, 'bar') + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0, ...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') def test_scalar(self): x = np.inf @@ -87,11 +95,32 @@ def test_scalar(self): assert_equal(type(actual), type(expected)) x = -3.4 - actual = np.fix(x) - expected = np.float64(-3.0) - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - - out = np.array(0.0) - actual = np.fix(x, out=out) - assert_(actual is out) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) + + +class TestFixDeprecation: + """Test that numpy.fix emits a DeprecationWarning.""" + + def test_fix_emits_deprecation_warning(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a) + + def test_fix_scalar_emits_deprecation_warning(self): + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(3.14) + + def test_fix_with_out_emits_deprecation_warning(self): + a = np.array([1.5, 2.7]) + out = np.zeros(a.shape) + with pytest.warns(DeprecationWarning, match="numpy.fix is deprecated"): + fix(a, out=out) From e3d853098b181bc6616f16524ef71349cdbf1660 Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 19:38:22 -0300 Subject: [PATCH 1263/1718] DOC: Rename release note file to match PR number --- .../{30096.deprecation.rst => 30644.deprecation.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename doc/release/upcoming_changes/{30096.deprecation.rst => 30644.deprecation.rst} (100%) diff --git a/doc/release/upcoming_changes/30096.deprecation.rst b/doc/release/upcoming_changes/30644.deprecation.rst similarity index 100% rename from doc/release/upcoming_changes/30096.deprecation.rst rename to doc/release/upcoming_changes/30644.deprecation.rst From 2598457547a2d6d218f82da0d1e597d9d76b3a52 Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 20:07:16 -0300 Subject: [PATCH 1264/1718] TST: Add fix deprecation warning to doctest filter --- numpy/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/conftest.py b/numpy/conftest.py index ad256b56200c..90da68cfc2d5 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -184,7 +184,8 @@ def warnings_errors_and_rng(test=None): "Importing from numpy.matlib", "This function is deprecated.", # random_integers "Arrays of 2-dimensional vectors", # matlib.cross - "NumPy warning suppression and assertion utilities are deprecated." + "NumPy warning suppression and assertion utilities are deprecated.", + "numpy.fix is deprecated", # fix -> trunc ] msg = "|".join(msgs) From 5455dd412c611fec031b4b8a0ecb407214a94a05 Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 20:23:33 -0300 Subject: [PATCH 1265/1718] TST: Suppress fix deprecation warning in typing runtime test --- numpy/typing/tests/data/pass/ufunclike.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 7e556d10bef7..9d306e385001 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,9 +1,13 @@ from __future__ import annotations +import warnings from typing import Any import numpy as np +# Suppress DeprecationWarning for np.fix during runtime testing +warnings.filterwarnings("ignore", "numpy.fix is deprecated", DeprecationWarning) + class Object: def __ceil__(self) -> Object: From 36fff00f72deab1414abdacfd92cb74abee868cb Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 20:48:32 -0300 Subject: [PATCH 1266/1718] TYP: Remove category=PendingDeprecationWarning from fix stubs --- numpy/lib/_ufunclike_impl.pyi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index d48557a7b5d7..4145ff205e1b 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -12,16 +12,16 @@ from numpy._typing import ( __all__ = ["fix", "isneginf", "isposinf"] @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") def fix(x: _FloatLike_co, out: None = None) -> np.floating: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[np.floating]: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[np.object_]: ... @overload -@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +@deprecated("numpy.fix is deprecated. Use numpy.trunc instead.") def fix[ArrayT: np.ndarray](x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: ArrayT) -> ArrayT: ... # From 6db14a737c90815db7817ff369799006a4f7a94f Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 20:51:19 -0300 Subject: [PATCH 1267/1718] DOC: Fix deprecation version (2.5) and year (2026) --- numpy/lib/_ufunclike_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 93771ecbabc3..0f503d03a556 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -20,7 +20,7 @@ def fix(x, out=None): """ Round to nearest integer towards zero. - .. deprecated:: 2.3 + .. deprecated:: 2.5 `numpy.fix` is deprecated. Use `numpy.trunc` instead, which is faster and follows the Array API standard. @@ -62,7 +62,7 @@ def fix(x, out=None): array([ 2., 2., -2., -2.]) """ - # Deprecated in NumPy 2.3, 2025-01-12 + # Deprecated in NumPy 2.5, 2026-01-12 warnings.warn( "numpy.fix is deprecated. Use numpy.trunc instead, " "which is faster and follows the Array API standard.", From ae63bac6886e3a982e4f6acbdbe1f639318a64e0 Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Mon, 12 Jan 2026 20:53:51 -0300 Subject: [PATCH 1268/1718] TST: Use @pytest.mark.filterwarnings instead of warnings.catch_warnings --- numpy/lib/tests/test_ufunclike.py | 63 ++++++++++++++----------------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index 2af24fc5eccd..8452a913c98c 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -1,5 +1,3 @@ -import warnings - import pytest import numpy as np @@ -39,20 +37,20 @@ def test_isneginf(self): with assert_raises(TypeError): isneginf(a) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix(self): a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) out = np.zeros(a.shape, float) tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - res = fix(a) - assert_equal(res, tgt) - res = fix(a, out) - assert_equal(res, tgt) - assert_equal(out, tgt) - assert_equal(fix(3.14), 3) + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_fix_with_subclass(self): class MyArray(np.ndarray): def __new__(cls, data, metadata=None): @@ -73,20 +71,19 @@ def __array_finalize__(self, obj): a = np.array([1.1, -1.1]) m = MyArray(a, metadata='foo') - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - f = fix(m) - assert_array_equal(f, np.array([1, -1])) - assert_(isinstance(f, MyArray)) - assert_equal(f.metadata, 'foo') - - # check 0d arrays don't decay to scalars - m0d = m[0, ...] - m0d.metadata = 'bar' - f0d = fix(m0d) - assert_(isinstance(f0d, MyArray)) - assert_equal(f0d.metadata, 'bar') - + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0, ...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") def test_scalar(self): x = np.inf actual = np.isposinf(x) @@ -95,16 +92,14 @@ def test_scalar(self): assert_equal(type(actual), type(expected)) x = -3.4 - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - actual = np.fix(x) - expected = np.float64(-3.0) - assert_equal(actual, expected) - assert_equal(type(actual), type(expected)) - - out = np.array(0.0) - actual = np.fix(x, out=out) - assert_(actual is out) + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) class TestFixDeprecation: From 8ecf22f62e976c398261f803475a0e9e9230ddc7 Mon Sep 17 00:00:00 2001 From: skyvanguard Date: Tue, 13 Jan 2026 08:56:17 -0300 Subject: [PATCH 1269/1718] Use @pytest.mark.filterwarnings for fix deprecation in typing tests Move the deprecation warning filter from the test data file to the actual test function. This ensures the filter follows NumPy testing conventions and doesn't use warnings.filterwarnings in a non-test file. --- numpy/typing/tests/data/pass/ufunclike.py | 4 ---- numpy/typing/tests/test_typing.py | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 9d306e385001..7e556d10bef7 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,13 +1,9 @@ from __future__ import annotations -import warnings from typing import Any import numpy as np -# Suppress DeprecationWarning for np.fix during runtime testing -warnings.filterwarnings("ignore", "numpy.fix is deprecated", DeprecationWarning) - class Object: def __ceil__(self) -> Object: diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index ca4cf37fec3b..cc99b634d6e0 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -189,6 +189,7 @@ def test_reveal(path: str) -> None: @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) def test_code_runs(path: str) -> None: """Validate that the code in `path` properly during runtime.""" From a90ef57574c501a780fe834123b20fcea1329f90 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Wed, 14 Jan 2026 04:36:49 +0530 Subject: [PATCH 1270/1718] ENH: lock-free hashtable implementation for ufunc dispatching (#30593) --- .github/workflows/compiler_sanitizers.yml | 2 +- numpy/_core/meson.build | 4 +- numpy/_core/src/common/npy_hashtable.c | 311 ++++++++++++++++++ numpy/_core/src/common/npy_hashtable.cpp | 259 --------------- numpy/_core/src/common/npy_hashtable.h | 20 +- .../src/multiarray/_multiarray_tests.c.src | 39 +-- numpy/_core/src/umath/dispatching.cpp | 86 ++--- numpy/_core/src/umath/ufunc_object.c | 9 - numpy/_core/tests/test_hashtable.py | 133 +++++++- 9 files changed, 478 insertions(+), 385 deletions(-) create mode 100644 numpy/_core/src/common/npy_hashtable.c delete mode 100644 numpy/_core/src/common/npy_hashtable.cpp diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 962646122e17..146fb5d15b34 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -106,5 +106,5 @@ jobs: export TSAN_OPTIONS="halt_on_error=0:allocator_may_return_null=1:suppressions=$GITHUB_WORKSPACE/tools/ci/tsan_suppressions.txt" echo "TSAN_OPTIONS=$TSAN_OPTIONS" python -m spin test \ - `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ + `find numpy -name "test*.py" | xargs grep -E -l "import threading|ThreadPoolExecutor" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 7cab1b9fd167..5b78cc307be0 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -732,7 +732,7 @@ py.extension_module('_multiarray_tests', src_file.process('src/multiarray/_multiarray_tests.c.src'), 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.cpp', + 'src/common/npy_hashtable.c', 'src/common/npy_import.c', src_file.process('src/common/templ_common.h.src') ], @@ -1087,7 +1087,7 @@ src_multiarray_umath_common = [ 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.cpp', + 'src/common/npy_hashtable.c', 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ufunc_override.c', diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c new file mode 100644 index 000000000000..535dbca842db --- /dev/null +++ b/numpy/_core/src/common/npy_hashtable.c @@ -0,0 +1,311 @@ +/* Lock-free hash table implementation for identity based keys + * (C arrays of pointers) used for ufunc dispatching cache. + * + * This cache does not do any reference counting of the stored objects, + * and the stored pointers must remain valid while in the cache. + * The cache entries cannot be changed or deleted once added, only new + * entries can be added. It is thread safe and lock-free for reading, and + * uses a mutex for writing (adding new entries). See below for the details + * of thread safety. + * + * The actual hash table is stored in the `buckets` struct which contains + * a flexible array member for the keys and values. It avoids multiple + * atomic operations as resizing the hash table only requires a single atomic + * store to swap in the new buckets pointer. + * + * Thread safety notes for free-threading builds: + * - Reading from the cache (getting items) is lock-free and thread safe. + * The reader reads the current `buckets` pointer using an atomic load + * with memory_order_acquire order. This ensures that the reader + * synchronizes with any concurrent writers that may be resizing the cache. + * The value of item is then read using an atomic load with memory_order_acquire + * order so that it sees the key written by the writer before the value. + * + * - Writing to the cache (adding new items) uses ``tb->mutex`` mutex to + * ensure only one thread writes at a time. The new items are added + * concurrently with readers and synchronized using atomic operations. + * The key is stored first (using memcpy), and then the value is stored + * using an atomic store with memory_order_release order so that + * the store of key is visible to readers that see the value. + * + * - Resizing the cache uses the same mutex to ensure only one thread + * resizes at a time. The new larger cache is built while holding the + * mutex, and then swapped in using an atomic operation. Because, + * readers can be reading from the old cache while the new one is + * swapped in, the old cache is not free immediately. Instead, it is + * kept in a linked list of old caches using the `prev` pointer in the + * `buckets` struct. The old caches are only freed when the identity + * hash table is deallocated, ensuring that no readers are using them + * anymore. + */ + +#include "npy_hashtable.h" + +#include "templ_common.h" +#include + +// It is defined here instead of header to avoid flexible array member warning in C++. +struct buckets { + struct buckets *prev; /* linked list of old buckets */ + npy_intp size; /* current size */ + npy_intp nelem; /* number of elements */ + PyObject *array[]; /* array of keys and values */ +}; + +#if SIZEOF_PY_UHASH_T > 4 +#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) +#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL) +#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL) +#define _NpyHASH_XXROTATE(x) ((x << 31) | (x >> 33)) /* Rotate left 31 bits */ +#else +#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL) +#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL) +#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)374761393UL) +#define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ +#endif + +#ifdef Py_GIL_DISABLED +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) \ + atomic_load_explicit((_Atomic(void *) *)&(ptr), memory_order_acquire) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) \ + atomic_store_explicit((_Atomic(void *) *)&(ptr), (void *)(val), memory_order_release) +#else +#define FT_ATOMIC_LOAD_PTR_ACQUIRE(ptr) (ptr) +#define FT_ATOMIC_STORE_PTR_RELEASE(ptr, val) (ptr) = (val) +#endif + +/* + * This hashing function is basically the Python tuple hash with the type + * identity hash inlined. The tuple hash itself is a reduced version of xxHash. + * + * Users cannot control pointers, so we do not have to worry about DoS attacks? + */ +static inline Py_hash_t +identity_list_hash(PyObject *const *v, int len) +{ + Py_uhash_t acc = _NpyHASH_XXPRIME_5; + for (int i = 0; i < len; i++) { + /* + * Lane is the single item hash, which for us is the rotated pointer. + * Identical to the python type hash (pointers end with 0s normally). + */ + size_t y = (size_t)v[i]; + Py_uhash_t lane = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4)); + acc += lane * _NpyHASH_XXPRIME_2; + acc = _NpyHASH_XXROTATE(acc); + acc *= _NpyHASH_XXPRIME_1; + } + return acc; +} +#undef _NpyHASH_XXPRIME_1 +#undef _NpyHASH_XXPRIME_2 +#undef _NpyHASH_XXPRIME_5 +#undef _NpyHASH_XXROTATE + + +static inline PyObject ** +find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key) +{ + Py_hash_t hash = identity_list_hash(key, key_len); + npy_uintp perturb = (npy_uintp)hash; + npy_intp mask = buckets->size - 1; + npy_intp bucket = (npy_intp)hash & mask; + + while (1) { + PyObject **item = &(buckets->array[bucket * (key_len + 1)]); + PyObject *value = FT_ATOMIC_LOAD_PTR_ACQUIRE(item[0]); + if (value == NULL) { + /* The item is not in the cache; return the empty bucket */ + return item; + } + if (memcmp(item+1, key, key_len * sizeof(PyObject *)) == 0) { + /* This is a match, so return the item/bucket */ + return item; + } + /* Hash collision, perturb like Python (must happen rarely!) */ + perturb >>= 5; /* Python uses the macro PERTURB_SHIFT == 5 */ + bucket = mask & (bucket * 5 + perturb + 1); + } +} + + +static inline PyObject ** +find_item(PyArrayIdentityHash const *tb, PyObject *const *key) +{ + struct buckets *buckets = FT_ATOMIC_LOAD_PTR_ACQUIRE(tb->buckets); + return find_item_buckets(buckets, tb->key_len, key); +} + + +NPY_NO_EXPORT PyArrayIdentityHash * +PyArrayIdentityHash_New(int key_len) +{ + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); + if (res == NULL) { + PyErr_NoMemory(); + return NULL; + } + + assert(key_len > 0); + res->key_len = key_len; + + npy_intp initial_size = 4; /* Start with a size of 4 */ + + res->buckets = PyMem_Calloc(1, sizeof(struct buckets) + + initial_size * (key_len + 1) * sizeof(PyObject *)); + if (res->buckets == NULL) { + PyErr_NoMemory(); + PyMem_Free(res); + return NULL; + } + res->buckets->prev = NULL; + res->buckets->size = initial_size; + res->buckets->nelem = 0; + +#ifdef Py_GIL_DISABLED + res->mutex = (PyMutex){0}; +#endif + return res; +} + + +NPY_NO_EXPORT void +PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) +{ + struct buckets *b = tb->buckets; +#ifdef Py_GIL_DISABLED + // free all old buckets + while (b != NULL) { + struct buckets *prev = b->prev; + PyMem_Free(b); + b = prev; + } +#else + assert(b->prev == NULL); + PyMem_Free(b); +#endif + PyMem_Free(tb); +} + + +static int +_resize_if_necessary(PyArrayIdentityHash *tb) +{ +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + struct buckets *old_buckets = tb->buckets; + int key_len = tb->key_len; + npy_intp new_size, prev_size = old_buckets->size; + assert(prev_size > 0); + + if ((old_buckets->nelem + 1) * 2 > prev_size) { + /* Double in size */ + new_size = prev_size * 2; + } + else { + new_size = prev_size; + while ((old_buckets->nelem + 8) * 2 < new_size / 2) { + /* + * Should possibly be improved. However, we assume that we + * almost never shrink. Still if we do, do not shrink as much + * as possible to avoid growing right away. + */ + new_size /= 2; + } + assert(new_size >= 4); + } + if (new_size == prev_size) { + return 0; + } + + npy_intp alloc_size; + if (npy_mul_sizes_with_overflow(&alloc_size, new_size, key_len + 1)) { + return -1; + } + struct buckets *new_buckets = (struct buckets *)PyMem_Calloc( + 1, sizeof(struct buckets) + alloc_size * sizeof(PyObject *)); + if (new_buckets == NULL) { + PyErr_NoMemory(); + return -1; + } + new_buckets->size = new_size; + new_buckets->nelem = 0; + for (npy_intp i = 0; i < prev_size; i++) { + PyObject **item = &old_buckets->array[i * (key_len + 1)]; + if (item[0] != NULL) { + PyObject **tb_item = find_item_buckets(new_buckets, key_len, item + 1); + memcpy(tb_item+1, item+1, key_len * sizeof(PyObject *)); + new_buckets->nelem++; + tb_item[0] = item[0]; + } + } +#ifdef Py_GIL_DISABLED + new_buckets->prev = old_buckets; +#else + PyMem_Free(old_buckets); +#endif + FT_ATOMIC_STORE_PTR_RELEASE(tb->buckets, new_buckets); + return 0; +} + + +/** + * Set an item in the identity hash table if it does not already exist. + * If it does exist, return the existing item. + * + * @param tb The mapping. + * @param key The key, must be a C-array of pointers of the length + * corresponding to the mapping. + * @param value Normally a Python object, no reference counting is done + * and it should not be NULL. + * @param result The resulting value, either the existing one or the + * newly added value. + * @returns 0 on success, -1 with a MemoryError set on failure. + */ +static inline int +PyArrayIdentityHash_SetItemDefaultLockHeld(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) +{ +#ifdef Py_GIL_DISABLED + assert(PyMutex_IsLocked(&tb->mutex)); +#endif + assert(default_value != NULL); + if (_resize_if_necessary(tb) < 0) { + return -1; + } + + PyObject **tb_item = find_item(tb, key); + if (tb_item[0] == NULL) { + memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); + tb->buckets->nelem++; + FT_ATOMIC_STORE_PTR_RELEASE(tb_item[0], default_value); + *result = default_value; + } else { + *result = tb_item[0]; + } + + return 0; +} + +NPY_NO_EXPORT int +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result) +{ +#ifdef Py_GIL_DISABLED + PyMutex_Lock(&tb->mutex); +#endif + int ret = PyArrayIdentityHash_SetItemDefaultLockHeld(tb, key, default_value, result); +#ifdef Py_GIL_DISABLED + PyMutex_Unlock(&tb->mutex); +#endif + return ret; +} + + +NPY_NO_EXPORT PyObject * +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) +{ + PyObject **tb_item = find_item(tb, key); + return FT_ATOMIC_LOAD_PTR_ACQUIRE(tb_item[0]); +} diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp deleted file mode 100644 index 27e014ca00e0..000000000000 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* - * This functionality is designed specifically for the ufunc machinery to - * dispatch based on multiple DTypes. Since this is designed to be used - * as purely a cache, it currently does no reference counting. - * Even though this is a cache, there is currently no maximum size. It may - * make sense to limit the size, or count collisions: If too many collisions - * occur, we could grow the cache, otherwise, just replace an old item that - * was presumably not used for a long time. - * - * If a different part of NumPy requires a custom hashtable, the code should - * be reused with care since specializing it more for the ufunc dispatching - * case is likely desired. - */ - -#include "npy_hashtable.h" - -#include -#include - -#include "templ_common.h" -#include - - - -#if SIZEOF_PY_UHASH_T > 4 -#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) -#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL) -#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL) -#define _NpyHASH_XXROTATE(x) ((x << 31) | (x >> 33)) /* Rotate left 31 bits */ -#else -#define _NpyHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL) -#define _NpyHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL) -#define _NpyHASH_XXPRIME_5 ((Py_uhash_t)374761393UL) -#define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ -#endif - -/* - * This hashing function is basically the Python tuple hash with the type - * identity hash inlined. The tuple hash itself is a reduced version of xxHash. - * - * Users cannot control pointers, so we do not have to worry about DoS attacks? - */ -static inline Py_hash_t -identity_list_hash(PyObject *const *v, int len) -{ - Py_uhash_t acc = _NpyHASH_XXPRIME_5; - for (int i = 0; i < len; i++) { - /* - * Lane is the single item hash, which for us is the rotated pointer. - * Identical to the python type hash (pointers end with 0s normally). - */ - size_t y = (size_t)v[i]; - Py_uhash_t lane = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4)); - acc += lane * _NpyHASH_XXPRIME_2; - acc = _NpyHASH_XXROTATE(acc); - acc *= _NpyHASH_XXPRIME_1; - } - return acc; -} -#undef _NpyHASH_XXPRIME_1 -#undef _NpyHASH_XXPRIME_2 -#undef _NpyHASH_XXPRIME_5 -#undef _NpyHASH_XXROTATE - - -static inline PyObject ** -find_item(PyArrayIdentityHash const *tb, PyObject *const *key) -{ - Py_hash_t hash = identity_list_hash(key, tb->key_len); - npy_uintp perturb = (npy_uintp)hash; - npy_intp bucket; - npy_intp mask = tb->size - 1 ; - PyObject **item; - - bucket = (npy_intp)hash & mask; - while (1) { - item = &(tb->buckets[bucket * (tb->key_len + 1)]); - - if (item[0] == NULL) { - /* The item is not in the cache; return the empty bucket */ - return item; - } - if (memcmp(item+1, key, tb->key_len * sizeof(PyObject *)) == 0) { - /* This is a match, so return the item/bucket */ - return item; - } - /* Hash collision, perturb like Python (must happen rarely!) */ - perturb >>= 5; /* Python uses the macro PERTURB_SHIFT == 5 */ - bucket = mask & (bucket * 5 + perturb + 1); - } -} - - -NPY_NO_EXPORT PyArrayIdentityHash * -PyArrayIdentityHash_New(int key_len) -{ - PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); - if (res == NULL) { - PyErr_NoMemory(); - return NULL; - } - - assert(key_len > 0); - res->key_len = key_len; - res->size = 4; /* Start with a size of 4 */ - res->nelem = 0; - - res->buckets = (PyObject **)PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); - if (res->buckets == NULL) { - PyErr_NoMemory(); - PyMem_Free(res); - return NULL; - } - -#ifdef Py_GIL_DISABLED - res->mutex = new(std::nothrow) std::shared_mutex(); - if (res->mutex == nullptr) { - PyErr_NoMemory(); - PyMem_Free(res); - return NULL; - } -#endif - return res; -} - - -NPY_NO_EXPORT void -PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) -{ - PyMem_Free(tb->buckets); -#ifdef Py_GIL_DISABLED - delete (std::shared_mutex *)tb->mutex; -#endif - PyMem_Free(tb); -} - - -static int -_resize_if_necessary(PyArrayIdentityHash *tb) -{ - npy_intp new_size, prev_size = tb->size; - PyObject **old_table = tb->buckets; - assert(prev_size > 0); - - if ((tb->nelem + 1) * 2 > prev_size) { - /* Double in size */ - new_size = prev_size * 2; - } - else { - new_size = prev_size; - while ((tb->nelem + 8) * 2 < new_size / 2) { - /* - * Should possibly be improved. However, we assume that we - * almost never shrink. Still if we do, do not shrink as much - * as possible to avoid growing right away. - */ - new_size /= 2; - } - assert(new_size >= 4); - } - if (new_size == prev_size) { - return 0; - } - - npy_intp alloc_size; - if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { - return -1; - } - tb->buckets = (PyObject **)PyMem_Calloc(alloc_size, sizeof(PyObject *)); - if (tb->buckets == NULL) { - tb->buckets = old_table; - PyErr_NoMemory(); - return -1; - } - - tb->size = new_size; - for (npy_intp i = 0; i < prev_size; i++) { - PyObject **item = &old_table[i * (tb->key_len + 1)]; - if (item[0] != NULL) { - PyObject **tb_item = find_item(tb, item + 1); - tb_item[0] = item[0]; - memcpy(tb_item+1, item+1, tb->key_len * sizeof(PyObject *)); - } - } - PyMem_Free(old_table); - return 0; -} - - -/** - * Add an item to the identity cache. The storage location must not change - * unless the cache is cleared. - * - * @param tb The mapping. - * @param key The key, must be a C-array of pointers of the length - * corresponding to the mapping. - * @param value Normally a Python object, no reference counting is done. - * use NULL to clear an item. If the item does not exist, no - * action is performed for NULL. - * @param replace If 1, allow replacements. If replace is 0 an error is raised - * if the stored value is different from the value to be cached. If the - * value to be cached is identical to the stored value, the value to be - * cached is ignored and no error is raised. - * @returns 0 on success, -1 with a MemoryError or RuntimeError (if an item - * is added which is already in the cache and replace is 0). The - * caller should avoid the RuntimeError. - */ -NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace) -{ - if (value != NULL && _resize_if_necessary(tb) < 0) { - /* Shrink, only if a new value is added. */ - return -1; - } - - PyObject **tb_item = find_item(tb, key); - if (value != NULL) { - if (tb_item[0] != NULL && tb_item[0] != value && !replace) { - PyErr_SetString(PyExc_RuntimeError, - "Identity cache already includes an item with this key."); - return -1; - } - tb_item[0] = value; - memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); - tb->nelem += 1; - } - else { - /* Clear the bucket -- just the value should be enough though. */ - memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); - } - - return 0; -} - - -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) -{ - PyObject *res = find_item(tb, key)[0]; - return res; -} - -#ifdef Py_GIL_DISABLED - -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key) -{ - PyObject *res; - std::shared_mutex *mutex = (std::shared_mutex *)tb->mutex; - NPY_BEGIN_ALLOW_THREADS - mutex->lock_shared(); - NPY_END_ALLOW_THREADS - res = find_item(tb, key)[0]; - mutex->unlock_shared(); - return res; -} - -#endif // Py_GIL_DISABLED diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 02acc12d3191..a369ba1ba59b 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -11,26 +11,20 @@ extern "C" { #endif +struct buckets; + typedef struct { - int key_len; /* number of identities used */ - /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ - PyObject **buckets; - npy_intp size; /* current size */ - npy_intp nelem; /* number of elements */ + int key_len; /* number of identities used */ + struct buckets *buckets; /* current buckets */ #ifdef Py_GIL_DISABLED - void *mutex; /* std::shared_mutex, prevents races to fill the cache */ + PyMutex mutex; #endif } PyArrayIdentityHash; NPY_NO_EXPORT int -PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, - PyObject *const *key, PyObject *value, int replace); - -#ifdef Py_GIL_DISABLED -NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItemWithLock(PyArrayIdentityHash *tb, PyObject *const *key); -#endif // Py_GIL_DISABLED +PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, + PyObject *const *key, PyObject *default_value, PyObject **result); NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 6f6d4bf8940d..9aaf0013958a 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -965,26 +965,15 @@ create_identity_hash(PyObject *NPY_UNUSED(self), PyObject *args) } /* - * Set item in identity hash table provided as capsule and key as tuple. - * If replace is False and the key already exists, RuntimeError is raised else - * the value is replaced. + * Set default item in identity hash table provided as capsule and key as tuple. + * If the key is already present, return the existing value else set to value and + * return that. */ static PyObject * -identity_hash_set_item(PyObject *NPY_UNUSED(self), - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +identity_hash_set_item_default(PyObject *NPY_UNUSED(self), PyObject *args) { - NPY_PREPARE_ARGPARSER; - PyObject *capsule, *key_tuple, *value; - int replace = 0; - PyObject *replace_obj = Py_False; - - if (npy_parse_arguments("identity_hash_set_item", args, len_args, kwnames, - "capsule", NULL, &capsule, - "key", NULL, &key_tuple, - "value", NULL, &value, - "|replace", NULL, &replace_obj, - NULL, NULL, NULL) < 0) { + if (!PyArg_ParseTuple(args, "OOO", &capsule, &key_tuple, &value)) { return NULL; } @@ -994,11 +983,6 @@ identity_hash_set_item(PyObject *NPY_UNUSED(self), return NULL; } - replace = PyObject_IsTrue(replace_obj); - if (error_converting(replace)) { - return NULL; - } - PyArrayIdentityHash *tb = (PyArrayIdentityHash *)PyCapsule_GetPointer(capsule, "PyArrayIdentityHash"); assert(tb != NULL); @@ -1008,11 +992,12 @@ identity_hash_set_item(PyObject *NPY_UNUSED(self), return NULL; } - if (PyArrayIdentityHash_SetItem(tb, &PyTuple_GET_ITEM(key_tuple, 0), value, replace) < 0) { + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault(tb, &PyTuple_GET_ITEM(key_tuple, 0), value, &result) < 0) { return NULL; } - - Py_RETURN_NONE; + Py_INCREF(result); + return result; } @@ -2354,9 +2339,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"create_identity_hash", create_identity_hash, METH_VARARGS, "Create a new PyArrayIdentityHash wrapped in a PyCapsule."}, - {"identity_hash_set_item", - (PyCFunction)identity_hash_set_item, - METH_KEYWORDS | METH_FASTCALL, "Set an item in a PyArrayIdentityHash capsule."}, + {"identity_hash_set_item_default", + (PyCFunction)identity_hash_set_item_default, + METH_VARARGS, "Set a default item in a PyArrayIdentityHash capsule."}, {"identity_hash_get_item", identity_hash_get_item, METH_VARARGS, "Get an item from a PyArrayIdentityHash capsule."}, diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index db5698d8a819..cf2ce657b426 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -42,9 +42,6 @@ #include #include -#include -#include - #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" #include "npy_import.h" @@ -826,6 +823,13 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, * to use for a ufunc. This function may recurse with `do_legacy_fallback` * set to False. * + * The result is cached in the ufunc's dispatch cache for faster lookup next time. + * It is possible that multiple threads call this function at the same time, and + * there is cache miss, in that case all threads will do the full resolution, however + * only one will store the result in the cache (the others get the stored result). + * This is ensured by `PyArrayIdentityHash_SetItemDefault` which only sets the item + * if it is not already set otherwise returning the existing value. + * * If value-based promotion is necessary, this is handled ahead of time by * `promote_and_get_ufuncimpl`. */ @@ -868,12 +872,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem( + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -891,12 +896,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem( + PyObject *result = NULL; + if (PyArrayIdentityHash_SetItemDefault( (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + (PyObject **)op_dtypes, info, &result) < 0) { return NULL; } - return info; + return result; } } @@ -958,52 +964,20 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, Py_XDECREF(new_op_dtypes[i]); } - /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem( - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (info == NULL) { return NULL; } - return info; -} - -#ifdef Py_GIL_DISABLED -/* - * Fast path for promote_and_get_info_and_ufuncimpl. - * Acquires a read lock to check for a cache hit and then - * only acquires a write lock on a cache miss to fill the cache - */ -static inline PyObject * -promote_and_get_info_and_ufuncimpl_with_locking( - PyUFuncObject *ufunc, - PyArrayObject *const ops[], - PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *op_dtypes[], - npy_bool legacy_promotion_is_possible) -{ - std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); - PyObject *info = PyArrayIdentityHash_GetItemWithLock( - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes); - - if (info != NULL && PyObject_TypeCheck( - PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { - /* Found the ArrayMethod and NOT a promoter: return it */ - return info; + if (cacheable) { + PyObject *result = NULL; + /* Add this to the cache using the original types: */ + if (PyArrayIdentityHash_SetItemDefault((PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, &result) < 0) { + return NULL; + } + return result; } - - // cache miss, need to acquire a write lock and recursively calculate the - // correct dispatch resolution - NPY_BEGIN_ALLOW_THREADS - mutex->lock(); - NPY_END_ALLOW_THREADS - info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); - mutex->unlock(); - return info; } -#endif /** * The central entry-point for the promotion and dispatching machinery. @@ -1093,20 +1067,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - /* - * We hold the GIL here, so on the GIL-enabled build the GIL prevents - * races to fill the promotion cache. - * - * On the free-threaded build we need to set up our own locking to prevent - * races to fill the promotion cache. - */ -#ifdef Py_GIL_DISABLED - PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, - ops, signature, op_dtypes, legacy_promotion_is_possible); -#else PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); -#endif if (info == NULL) { goto handle_error; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 63ac438eabc4..fdeb7efc0fb6 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4337,18 +4337,9 @@ try_trivial_scalar_call( // Try getting info from the (private) cache. Fall back if not found, // so that the the dtype gets registered and things will work next time. PyArray_DTypeMeta *op_dtypes[2] = {NPY_DTYPE(dt), NULL}; -#ifdef Py_GIL_DISABLED - // Other threads may be in the process of filling the dispatch cache, - // so we need to acquire the free-threading-specific dispatch cache mutex - // before reading the cache - PyObject *info = PyArrayIdentityHash_GetItemWithLock( // borrowed reference. - (PyArrayIdentityHash *)ufunc->_dispatch_cache, - (PyObject **)op_dtypes); -#else PyObject *info = PyArrayIdentityHash_GetItem( // borrowed reference. (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); -#endif if (info == NULL) { goto bail; } diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 19c3392e86d8..6c9631b8ebfb 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,10 +1,15 @@ +import random +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor + import pytest from numpy._core._multiarray_tests import ( create_identity_hash, identity_hash_get_item, - identity_hash_set_item, + identity_hash_set_item_default, ) +from numpy.testing import IS_WASM @pytest.mark.parametrize("key_length", [1, 3, 6]) @@ -20,28 +25,132 @@ def test_identity_hashtable_get_set(key_length, length): for i in range(length): key, value = keys_vals[i] - identity_hash_set_item(ht, key, value) + assert identity_hash_set_item_default(ht, key, value) is value for key, value in keys_vals: got = identity_hash_get_item(ht, key) assert got is value - +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") @pytest.mark.parametrize("key_length", [1, 3, 6]) -def test_identity_hashtable_replace(key_length): +def test_identity_hashtable_default_thread_safety(key_length): ht = create_identity_hash(key_length) key = tuple(object() for _ in range(key_length)) val1 = object() val2 = object() - identity_hash_set_item(ht, key, val1) - got = identity_hash_get_item(ht, key) - assert got is val1 + got1 = identity_hash_set_item_default(ht, key, val1) + assert got1 is val1 + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val2) for _ in range(8)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_set_thread_safety(key_length): + ht = create_identity_hash(key_length) + + key = tuple(object() for _ in range(key_length)) + val1 = object() + + def thread_func(val): + return identity_hash_set_item_default(ht, key, val) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func, val1) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is val1 for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +def test_identity_hashtable_get_thread_safety(key_length): + ht = create_identity_hash(key_length) + key = tuple(object() for _ in range(key_length)) + value = object() + identity_hash_set_item_default(ht, key, value) + + def thread_func(): + return identity_hash_get_item(ht, key) + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(thread_func) for _ in range(100)] + results = [f.result() for f in futures] + + assert all(r is value for r in results) + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent(key_length, length): + ht = create_identity_hash(key_length) + keys_vals = [] + for i in range(length): + keys = tuple(object() for _ in range(key_length)) + keys_vals.append((keys, object())) + + def set_item(kv): + key, value = kv + got = identity_hash_set_item_default(ht, key, value) + assert got is value + + def get_item(kv): + key, value = kv + got = identity_hash_get_item(ht, key) + assert got is None or got is value + + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for kv in keys_vals: + futures.append(executor.submit(set_item, kv)) + futures.append(executor.submit(get_item, kv)) + for future in futures: + future.result() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +@pytest.mark.parametrize("key_length", [3, 6, 10]) +@pytest.mark.parametrize("length", [1 << 4, 1 << 8, 1 << 12]) +def test_identity_hashtable_get_set_concurrent_collisions(key_length, length): + ht = create_identity_hash(key_length) + base_key = tuple(object() for _ in range(key_length - 1)) + keys_vals = defaultdict(list) + for i in range(length): + keys = base_key + (random.choice(base_key), ) + keys_vals[keys].append(object()) + + set_item_results = defaultdict(set) + + def set_item(kv): + key, values = kv + value = random.choice(values) + got = identity_hash_set_item_default(ht, key, value) + set_item_results[key].add(got) + + get_item_results = defaultdict(set) + + def get_item(kv): + key, values = kv + got = identity_hash_get_item(ht, key) + get_item_results[key].add(got) - with pytest.raises(RuntimeError): - identity_hash_set_item(ht, key, val2) + with ThreadPoolExecutor(max_workers=8) as executor: + futures = [] + for keys, values in keys_vals.items(): + futures.append(executor.submit(set_item, (keys, values))) + futures.append(executor.submit(get_item, (keys, values))) + for future in futures: + future.result() - identity_hash_set_item(ht, key, val2, replace=True) - got = identity_hash_get_item(ht, key) - assert got is val2 + for key in keys_vals.keys(): + assert len(set_item_results[key]) == 1 + set_item_value = set_item_results[key].pop() + for r in get_item_results[key]: + assert r is None or r is set_item_value From 50cf749761491cd1008aecfe91b988cdf78bc654 Mon Sep 17 00:00:00 2001 From: Iason Krommydas Date: Wed, 14 Jan 2026 13:41:18 +0100 Subject: [PATCH 1271/1718] BUG: unrelated error raised when a dtype's `__setstate__` is called with an invalid state tuple size (#30647) Raise a simple error rather than trying to correctly raise a more precise on in __setstate__ for some bad inputs. --- numpy/_core/src/multiarray/descriptor.c | 15 ++++++--------- numpy/_core/tests/test_dtype.py | 23 +++++++++++++++++++++++ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index f2fb313cf442..0d1bf6b89e1a 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -844,7 +844,7 @@ _try_convert_from_inherit_tuple(PyArray_Descr *type, PyObject *newobj) return (PyArray_Descr *)Py_NotImplemented; } if (!PyDataType_ISLEGACY(type) || !PyDataType_ISLEGACY(conv)) { - /* + /* * This specification should probably be never supported, but * certainly not for new-style DTypes. */ @@ -1951,7 +1951,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base_descr) { if (!PyDataType_ISLEGACY(base_descr)) { - /* + /* * The main use of this function is mutating strings, so probably * disallowing this is fine in practice. */ @@ -2917,13 +2917,10 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) } break; default: - /* raise an error */ - if (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0)) > 5) { - version = PyLong_AsLong(PyTuple_GET_ITEM(args, 0)); - } - else { - version = -1; - } + PyErr_SetString(PyExc_ValueError, + "Invalid state while unpickling. Is the pickle corrupted " + "or created with a newer NumPy version?"); + return NULL; } /* diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index a77b59261c06..da499f487dfb 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1341,6 +1341,29 @@ def test_pickle_dtype(self, dt): assert roundtrip_dt == dt assert hash(roundtrip_dt) == pre_pickle_hash + @pytest.mark.parametrize('dt', [ + np.dtype([('a', 'i4'), ('b', 'f8')]), + np.dtype('i4, i1', align=True), + ]) + def test_setstate_invalid_tuple_size(self, dt): + # gh-30476 + valid_state = dt.__reduce__()[2] + dt.__setstate__(valid_state) + + for size in [1, 2, 3, 4]: + with pytest.raises( + ValueError, match="Invalid state while unpickling" + ): + dt.__setstate__(valid_state[:size]) + + min_extra = 10 - len(valid_state) + for extra in range(min_extra, min_extra + 5): + extended = valid_state + (None,) * extra + with pytest.raises( + ValueError, match="Invalid state while unpickling" + ): + dt.__setstate__(extended) + class TestPromotion: """Test cases related to more complex DType promotions. Further promotion From 23cb680c9ca4695799ed373ab47624dc6b409fb6 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 14 Jan 2026 14:12:13 +0100 Subject: [PATCH 1272/1718] MAINT: bump ``pytest-run-parallel`` to ``0.8.2`` (#30649) --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 3a5a2dc4792f..188a1c34e65c 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -154,5 +154,5 @@ jobs: - name: Test in multiple threads if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} run: | - pip install pytest-run-parallel==0.7.0 + pip install pytest-run-parallel==0.8.2 spin test -p 4 -- --timeout=600 --durations=10 From 8e3a4224c0ac43bc7d5f123fd8146e8e0bc062c0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 7 Jan 2026 21:35:20 +0100 Subject: [PATCH 1273/1718] DEP: deprecate ``chararray`` --- .../upcoming_changes/30605.deprecation.rst | 1 + numpy/_core/tests/test_deprecations.py | 5 ++++ numpy/char/__init__.py | 27 ++++++++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 doc/release/upcoming_changes/30605.deprecation.rst diff --git a/doc/release/upcoming_changes/30605.deprecation.rst b/doc/release/upcoming_changes/30605.deprecation.rst new file mode 100644 index 000000000000..062160f210ef --- /dev/null +++ b/doc/release/upcoming_changes/30605.deprecation.rst @@ -0,0 +1 @@ +* ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or bytes dtype instead. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 811c00bd3f6a..6c52aea84e5d 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -206,6 +206,11 @@ def test_attributeerror_includes_info(self, name): getattr(np, name) +class TestCharArray(_DeprecationTestCase): + def test_deprecated_chararray(self): + self.assert_deprecated(lambda: np.char.chararray) + + class TestDeprecatedDTypeAliases(_DeprecationTestCase): @pytest.mark.parametrize("dtype_code", ["a", "a10"]) def test_a_dtype_alias(self, dtype_code: str): diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index d98d38c1d6af..7c4bb43fe137 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -1,2 +1,27 @@ -from numpy._core.defchararray import * from numpy._core.defchararray import __all__, __doc__ + + +def __getattr__(name: str): + if name == "chararray": + # Deprecated in NumPy 2.5, 2026-01-07 + import warnings + + warnings.warn( + ( + "The chararray class is deprecated and will be removed in a future " + "release. Use an ndarray with a string or bytes dtype instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + from numpy._core.defchararray import chararray + + return chararray + + import numpy._core.defchararray as char + + if (export := getattr(char, name, None)) is not None: + return export + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") From d9712a79cf7ac85ac3cf452af643f8f7cb99cec0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 7 Jan 2026 21:37:48 +0100 Subject: [PATCH 1274/1718] TYP: mark ``chararray`` as ```@deprecated`` --- numpy/_core/defchararray.pyi | 8 ++++++-- numpy/char/__init__.pyi | 2 +- numpy/typing/tests/data/fail/chararray.pyi | 4 ++-- numpy/typing/tests/data/reveal/char.pyi | 3 +++ numpy/typing/tests/data/reveal/chararray.pyi | 4 ++-- numpy/typing/tests/data/reveal/lib_function_base.pyi | 6 +++--- 6 files changed, 17 insertions(+), 10 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 713cfea0e05e..b339b50c37aa 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,6 +1,6 @@ from collections.abc import Buffer from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload -from typing_extensions import TypeVar +from typing_extensions import TypeVar, deprecated import numpy as np from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ @@ -78,7 +78,7 @@ __all__ = [ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] +type _CharArray[ScalarT: np.character] = chararray[_AnyShape, dtype[ScalarT]] # type: ignore[deprecated] type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] type _StringDTypeOrUnicodeArray = _StringDTypeArray | NDArray[np.str_] @@ -86,6 +86,10 @@ type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] ### +@deprecated( + "The chararray class is deprecated and will be removed in a future release. " + "Use an ndarray with a string or bytes dtype instead." +) class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload def __new__( diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index e151f20e5f38..11dd885e50f0 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -1,4 +1,4 @@ -from numpy._core.defchararray import ( +from numpy._core.defchararray import ( # type: ignore[deprecated] add, array, asarray, diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index 589895510227..806ec5a0d303 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -2,8 +2,8 @@ from typing import Any import numpy as np -AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] AR_S.encode() # type: ignore[misc] AR_U.decode() # type: ignore[misc] diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index b83ecc62221f..b6866a6d9f96 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -204,6 +204,9 @@ assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.char.translate(AR_T, ""), AR_T_alias) +# mypy: disable-error-code="deprecated" +# pyright: reportDeprecated=false + assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index b93826922662..771276e41110 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -3,8 +3,8 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] -type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +type _BytesCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] # type: ignore[deprecated] +type _StrCharArray = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] AR_U: _StrCharArray AR_S: _BytesCharArray diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 090af934a411..bab43106d1c5 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -26,7 +26,7 @@ AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] # type: ignore[deprecated] AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] @@ -130,8 +130,8 @@ assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) # copy assert_type(np.copy(AR_LIKE_f8), np.ndarray) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) # type: ignore[deprecated] # pyright correctly infers `NDArray[str_]` here assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] From 6e25bee650e3186ae1bce4f8cd90c415c2c4de9c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 7 Jan 2026 22:04:07 +0100 Subject: [PATCH 1275/1718] TST: fix failing ``chararray`` tests caused by a ``DeprecationWarning`` --- numpy/_core/tests/test_arraymethod.py | 7 ++++--- numpy/_core/tests/test_defchararray.py | 14 ++++++++++++++ numpy/_core/tests/test_regression.py | 1 + numpy/lib/tests/test_arraysetops.py | 1 + 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index 5b3d51585718..4400fccf32e8 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -11,6 +11,9 @@ import numpy as np from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl +# accessing `numpy.char.charray` will issue a deprecation warning +from numpy._core.defchararray import chararray + class TestResolveDescriptors: # Test mainly error paths of the resolve_descriptors function, @@ -62,9 +65,7 @@ def test_invalid_arguments(self, args, error): @pytest.mark.parametrize( - "cls", [ - np.ndarray, np.recarray, np.char.chararray, np.matrix, np.memmap - ] + "cls", [np.ndarray, np.recarray, chararray, np.matrix, np.memmap] ) class TestClassGetItem: def test_class_getitem(self, cls: type[np.ndarray]) -> None: diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 7200b9c97094..643293580708 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -13,6 +13,11 @@ kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} +ignore_charray_deprecation = pytest.mark.filterwarnings( + r"ignore:\w+ chararray \w+:DeprecationWarning" +) + + class TestBasic: def test_from_object_array(self): A = np.array([['abc', 2], @@ -134,6 +139,7 @@ def fail(): assert_raises(ValueError, fail) +@ignore_charray_deprecation class TestWhitespace: def test1(self): A = np.array([['abc ', '123 '], @@ -147,12 +153,14 @@ def test1(self): assert_(not np.any(A < B)) assert_(not np.any(A != B)) +@ignore_charray_deprecation class TestChar: def test_it(self): A = np.array('abc1', dtype='c').view(np.char.chararray) assert_equal(A.shape, (4,)) assert_equal(A.upper()[:2].tobytes(), b'AB') +@ignore_charray_deprecation class TestComparisons: def A(self): return np.array([['abc', 'abcc', '123'], @@ -199,6 +207,7 @@ def test_type(self): assert_(isinstance(out1, np.ndarray)) assert_(isinstance(out2, np.ndarray)) +@ignore_charray_deprecation class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" @@ -207,6 +216,7 @@ def B(self): [['efg', 'efg', '123 '], ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" @@ -215,6 +225,7 @@ def A(self): [['abc', 'abcc', '123'], ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) +@ignore_charray_deprecation class TestInformation: def A(self): return np.array([[' abc ', ''], @@ -356,6 +367,7 @@ def fail(): assert_raises(TypeError, fail) +@ignore_charray_deprecation class TestMethods: def A(self): return np.array([[' abc ', ''], @@ -691,6 +703,7 @@ def fail(): assert_array_equal(B.isdecimal(), [ [False, False], [True, False], [False, False]]) +@ignore_charray_deprecation class TestOperations: def A(self): return np.array([['abc', '123'], @@ -856,6 +869,7 @@ def test_replace(self): assert_equal(np.char.replace('Python is good', 'good', 'great'), 'Python is great') +@ignore_charray_deprecation def test_empty_indexing(): """Regression test for ticket 1948.""" # Check that indexing a chararray with an empty list/array returns an diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index f16935bc5dec..32d61f8de935 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -35,6 +35,7 @@ ) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") class TestRegression: def test_invalid_round(self): # Ticket #3 diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 4e8d503427de..d6bd45da36f1 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -695,6 +695,7 @@ def get_types(self): types.append('timedelta64[D]') return types + @pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test_unique_1d(self): a = [5, 7, 1, 2, 1, 5, 7] * 10 From 9b9a167d617dfe255df4b74cd343f049333a7036 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 8 Jan 2026 11:48:48 +0100 Subject: [PATCH 1276/1718] DOC/DEP: update docs to reflect deprecation of ``chararray`` --- doc/neps/nep-0055-string_dtype.rst | 4 +--- doc/source/reference/arrays.classes.rst | 4 ++++ doc/source/reference/routines.char.rst | 5 ++--- numpy/_core/defchararray.py | 4 ++++ 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index 555052fa16f7..28dc9572ed6a 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -51,9 +51,7 @@ needs and then changes in the Python ecosystem. Support for strings was added to NumPy to support users of the NumArray ``chararray`` type. Remnants of this are still visible in the NumPy API: string-related functionality lives in ``np.char``, to support the -``np.char.chararray`` class. This class is not formally deprecated, but has a -had comment in the module docstring suggesting to use string dtypes instead -since NumPy 1.4. +``np.char.chararray`` class, which was deprecated in NumPy 2.5. NumPy's ``bytes_`` DType was originally used to represent the Python 2 ``str`` type before Python 3 support was added to NumPy. The bytes DType makes the most diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index df64e9e5d042..d38043661a52 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -568,6 +568,10 @@ Character arrays (:mod:`numpy.char`) `dtype` `object_`, `bytes_` or `str_`, and use the free functions in the `numpy.char` module for fast vectorized string operations. +.. deprecated:: 2.5 + ``numpy.char.chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + These are enhanced arrays of either :class:`str_` type or :class:`bytes_` type. These arrays inherit from the :class:`ndarray`, but specially-define the operations ``+``, ``*``, diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index 92c605071e50..7dfb2e6a18e3 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -9,9 +9,8 @@ Legacy fixed-width string functionality .. legacy:: - The string operations in this module, as well as the `numpy.char.chararray` - class, are planned to be deprecated in the future. Use `numpy.strings` - instead. + The string operations in this module are planned to be deprecated in the future, and + the `numpy.char.chararray` class is deprecated in NumPy 2.5. Use `numpy.strings` instead. The `numpy.char` module provides a set of vectorized string operations for arrays of type `numpy.str_` or `numpy.bytes_`. For example diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index e88c1e10d12f..4f1edfb824de 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -409,6 +409,10 @@ class chararray(ndarray): Provides a convenient view on arrays of string and unicode values. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: The `chararray` class exists for backwards compatibility with Numarray, it is not recommended for new development. Starting from numpy From 796b7f9df2b06a10eefab714817cacd80580967e Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Wed, 14 Jan 2026 19:15:25 +0530 Subject: [PATCH 1277/1718] DOC: fix grammar and explain changelog tool --- doc/RELEASE_WALKTHROUGH.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c8c1f129c0b2..54cc72a3e069 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -97,7 +97,9 @@ make sure that the release notes have an entry in the ``release.rst`` file:: Generate the changelog ---------------------- -The changelog is generated using the changelog tool:: +The changelog is generated using the changelog tool (``spin changelog``), +which collects merged pull requests and formats them into a release-ready +changelog:: $ spin changelog $GITHUB v2.3.0..maintenance/2.4.x > doc/changelog/2.4.0-changelog.rst @@ -120,7 +122,7 @@ run ``spin notes``, which will incorporate the snippets into the $ spin notes $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.4.0-notes.rst -Once the ``notes-towncrier`` contents has been incorporated into release note +Once the ``notes-towncrier`` contents have been incorporated into release notes the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes will always need some fixups, the introduction will need to be written, and significant changes should be called out. For patch releases the changelog text @@ -185,7 +187,7 @@ If you need to delete the tag due to error:: Go to the ``numpy-release`` repository in your browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button in ``actions``. Make sure that the upload target is ``pypi`` in the -*evironment* dropdown. the wheels take about 1 hour to build, but sometimes +*evironment* dropdown. The wheels take about 1 hour to build, but sometimes GitHub is very slow. If some wheel builds fail for unrelated reasons, you can re-run them as normal in the GitHub Actions UI with ``re-run failed``. After the wheels are built review the results, checking that the number of artifacts From 9088ad1b92045162e438ea8565c4b6291052df8c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 Jan 2026 15:29:14 +0100 Subject: [PATCH 1278/1718] MAINT: add custom ``__dir__`` in ``numpy.char`` --- numpy/char/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index 7c4bb43fe137..dd6c36c84451 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -25,3 +25,9 @@ def __getattr__(name: str): return export raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +def __dir__() -> list[str]: + import numpy._core.defchararray as char + + return dir(char) From 6c213b4766c7e28d53eddf533ae8e58cd15a51ff Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 Jan 2026 15:39:05 +0100 Subject: [PATCH 1279/1718] TST: ignore some additional deprecation warnings for ``chararray`` --- numpy/tests/test_public_api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 5565fad6cb7f..a5cc63fe3a19 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -466,6 +466,7 @@ def test_core_shims_coherence(): assert member is getattr(core, member_name) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test_functions_single_location(): """ Check that each public function is available from one location only. @@ -676,6 +677,7 @@ def _check_correct_qualname_and_module(obj) -> bool: ) +@pytest.mark.filterwarnings(r"ignore:\w+ chararray \w+:DeprecationWarning") def test___qualname___and___module___attribute(): # NumPy messes with module and name/qualname attributes, but any object # should be discoverable based on its module and qualname, so test that. From 27a4f8f0d3febe4b2e66cde14c1baccdadfe5965 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 Jan 2026 16:06:20 +0100 Subject: [PATCH 1280/1718] DOC: work around intersphinx reference warnings for `np.str_` and `np._bytes` --- numpy/_core/defchararray.py | 2 +- numpy/_core/strings.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 4f1edfb824de..61274eb1a4a0 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -274,7 +274,7 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index e9fa7f58e3ea..db72d6819ba1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -224,7 +224,7 @@ def mod(a, values): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``bytes_`` or ``str_`` dtype values : array_like of values These values will be element-wise interpolated into the string. @@ -263,7 +263,7 @@ def find(a, sub, start=0, end=None): ---------- a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array_like, with ``bytes_`` or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -375,9 +375,9 @@ def rindex(a, sub, start=0, end=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``bytes_`` or ``str_`` dtype - sub : array-like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``bytes_`` or ``str_`` dtype start, end : array-like, with any integer dtype, optional @@ -1689,7 +1689,7 @@ def translate(a, table, deletechars=None): Parameters ---------- - a : array-like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``bytes_`` or ``str_`` dtype table : str of length 256 From 3d792981bdf4f3bec046be9e8e574ed9004a1510 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 Jan 2026 16:30:40 +0100 Subject: [PATCH 1281/1718] DOC/TST: ignore ``chararray`` warning in doctests --- numpy/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/conftest.py b/numpy/conftest.py index 90da68cfc2d5..7880e0cf1ac3 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -186,6 +186,7 @@ def warnings_errors_and_rng(test=None): "Arrays of 2-dimensional vectors", # matlib.cross "NumPy warning suppression and assertion utilities are deprecated.", "numpy.fix is deprecated", # fix -> trunc + "The chararray class is deprecated", # char.chararray ] msg = "|".join(msgs) From b59642d989dca3551d9be44fc2c523f4a9941352 Mon Sep 17 00:00:00 2001 From: Teja Muthyala Date: Wed, 14 Jan 2026 14:37:07 -0500 Subject: [PATCH 1282/1718] BUG: Raise ValueError when spawn() called with negative n_children Fixes #30577 This PR adds validation to reject negative n_children arguments in all spawn() methods which previously caused erratic behavior: * SeedSequence.spawn() * SeedlessSeedSequence.spawn() * BitGenerator.spawn() * Generator.spawn() Changes: Added if n_children < 0: raise ValueError("n_children must be non-negative") to * SeedSequence.spawn() * SeedlessSeedSequence.spawn() * BitGenerator.spawn() * Generator.spawn() Added test_spawn_negative_n_children() test to verify all spawn methods correctly reject negative arguments Test and results: Ran spin test -t numpy/random/tests/test_direct.py::test_spawn_negative_n_children -v ==================================== test session starts ==================================== platform darwin -- Python 3.13.5, pytest-7.4.0, pluggy-1.6.0 -- /Users/tejamuthyala/dev/numpy/venv/bin/python3.13 cachedir: .pytest_cache hypothesis profile 'np.test() profile' -> database=None, deadline=None, print_blob=True, derandomize=True, suppress_health_check=(HealthCheck.data_too_large, HealthCheck.filter_too_much, HealthCheck.too_slow, HealthCheck.large_base_example, HealthCheck.function_scoped_fixture, HealthCheck.differing_executors, HealthCheck.nested_given) rootdir: /Users/tejamuthyala/dev/numpy configfile: pytest.ini plugins: xdist-3.8.0, timeout-2.4.0, cov-4.1.0, hypothesis-6.142.2 collected 1 item numpy/random/tests/test_direct.py::test_spawn_negative_n_children PASSED [100%] ===================================== 1 passed in 1.09s ===================================== --- numpy/random/_generator.pyx | 2 ++ numpy/random/bit_generator.pyx | 7 +++++++ numpy/random/tests/test_direct.py | 25 +++++++++++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index e3692c5b9904..6623d347a4cf 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -294,6 +294,8 @@ cdef class Generator: >>> nested_spawn = child_rng1.spawn(20) """ + if n_children < 0: + raise ValueError("n_children must be non-negative") return [type(self)(g) for g in self._bit_generator.spawn(n_children)] def random(self, size=None, dtype=np.float64, out=None): diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index 919fccdd7bb6..676f95e5ad70 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -241,6 +241,8 @@ cdef class SeedlessSeedSequence: raise NotImplementedError('seedless SeedSequences cannot generate state') def spawn(self, n_children): + if n_children < 0: + raise ValueError("n_children must be non-negative") return [self] * n_children @@ -476,6 +478,9 @@ cdef class SeedSequence: """ cdef uint32_t i + if n_children < 0: + raise ValueError("n_children must be non-negative") + seqs = [] for i in range(self.n_children_spawned, self.n_children_spawned + n_children): @@ -626,6 +631,8 @@ cdef class BitGenerator: Equivalent method on the generator and seed sequence. """ + if n_children < 0: + raise ValueError("n_children must be non-negative") if not isinstance(self._seed_seq, ISpawnableSeedSequence): raise TypeError( "The underlying SeedSequence does not implement spawning.") diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 9916f8ad3440..bdd2ee7d633e 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -183,6 +183,31 @@ def test_generator_spawning(): assert new_rngs[0].uniform() != new_rngs[1].uniform() +def test_spawn_negative_n_children(): + """Test that spawn raises ValueError for negative n_children.""" + from numpy.random.bit_generator import SeedlessSeedSequence + + rng = np.random.default_rng(42) + seq = rng.bit_generator.seed_seq + + # Test SeedSequence.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + seq.spawn(-1) + + # Test SeedlessSeedSequence.spawn + seedless = SeedlessSeedSequence() + with pytest.raises(ValueError, match="n_children must be non-negative"): + seedless.spawn(-1) + + # Test BitGenerator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.bit_generator.spawn(-1) + + # Test Generator.spawn + with pytest.raises(ValueError, match="n_children must be non-negative"): + rng.spawn(-1) + + def test_non_spawnable(): from numpy.random.bit_generator import ISeedSequence From 4ac65b867d5dfffa63c6f03c03f4aceea366c498 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Jan 2026 17:53:22 +0000 Subject: [PATCH 1283/1718] MAINT: Bump actions/cache from 5.0.1 to 5.0.2 Bumps [actions/cache](https://github.com/actions/cache) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/9255dc7a253b0ccc959486e2bca901246202afeb...8b402f58fbc84540c8b491a91e594a4576fec3d7) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 21a0aa722319..318f5591c2ac 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -97,7 +97,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -205,7 +205,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 188a1c34e65c..a7d18daec4b5 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -50,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -74,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1 + uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 4e2d3f0cdca877aa7e9eca9f089fa4076cda1eee Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Sat, 17 Jan 2026 04:42:59 +0530 Subject: [PATCH 1284/1718] BUG: fix time-of-check vs time-of-use bug in npy_hashtable.c (#30662) --- numpy/_core/src/common/npy_hashtable.c | 47 +++++++++++--------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 535dbca842db..5086fd26af69 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -104,7 +104,8 @@ identity_list_hash(PyObject *const *v, int len) static inline PyObject ** -find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key) +find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key, + PyObject **pvalue) { Py_hash_t hash = identity_list_hash(key, key_len); npy_uintp perturb = (npy_uintp)hash; @@ -113,8 +114,11 @@ find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key) while (1) { PyObject **item = &(buckets->array[bucket * (key_len + 1)]); - PyObject *value = FT_ATOMIC_LOAD_PTR_ACQUIRE(item[0]); - if (value == NULL) { + PyObject *val = FT_ATOMIC_LOAD_PTR_ACQUIRE(item[0]); + if (pvalue != NULL) { + *pvalue = val; + } + if (val == NULL) { /* The item is not in the cache; return the empty bucket */ return item; } @@ -130,10 +134,10 @@ find_item_buckets(struct buckets *buckets, int key_len, PyObject *const *key) static inline PyObject ** -find_item(PyArrayIdentityHash const *tb, PyObject *const *key) +find_item(PyArrayIdentityHash const *tb, PyObject *const *key, PyObject **pvalue) { struct buckets *buckets = FT_ATOMIC_LOAD_PTR_ACQUIRE(tb->buckets); - return find_item_buckets(buckets, tb->key_len, key); + return find_item_buckets(buckets, tb->key_len, key, pvalue); } @@ -196,29 +200,17 @@ _resize_if_necessary(PyArrayIdentityHash *tb) #endif struct buckets *old_buckets = tb->buckets; int key_len = tb->key_len; - npy_intp new_size, prev_size = old_buckets->size; + npy_intp prev_size = old_buckets->size; assert(prev_size > 0); - if ((old_buckets->nelem + 1) * 2 > prev_size) { - /* Double in size */ - new_size = prev_size * 2; - } - else { - new_size = prev_size; - while ((old_buckets->nelem + 8) * 2 < new_size / 2) { - /* - * Should possibly be improved. However, we assume that we - * almost never shrink. Still if we do, do not shrink as much - * as possible to avoid growing right away. - */ - new_size /= 2; - } - assert(new_size >= 4); - } - if (new_size == prev_size) { + if ((old_buckets->nelem + 1) * 2 <= old_buckets->size) { + /* No resize necessary if load factor is not more than 0.5 */ return 0; } + /* Double in size */ + npy_intp new_size = old_buckets->size * 2; + npy_intp alloc_size; if (npy_mul_sizes_with_overflow(&alloc_size, new_size, key_len + 1)) { return -1; @@ -234,7 +226,7 @@ _resize_if_necessary(PyArrayIdentityHash *tb) for (npy_intp i = 0; i < prev_size; i++) { PyObject **item = &old_buckets->array[i * (key_len + 1)]; if (item[0] != NULL) { - PyObject **tb_item = find_item_buckets(new_buckets, key_len, item + 1); + PyObject **tb_item = find_item_buckets(new_buckets, key_len, item + 1, NULL); memcpy(tb_item+1, item+1, key_len * sizeof(PyObject *)); new_buckets->nelem++; tb_item[0] = item[0]; @@ -275,7 +267,7 @@ PyArrayIdentityHash_SetItemDefaultLockHeld(PyArrayIdentityHash *tb, return -1; } - PyObject **tb_item = find_item(tb, key); + PyObject **tb_item = find_item(tb, key, NULL); if (tb_item[0] == NULL) { memcpy(tb_item+1, key, tb->key_len * sizeof(PyObject *)); tb->buckets->nelem++; @@ -306,6 +298,7 @@ PyArrayIdentityHash_SetItemDefault(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - PyObject **tb_item = find_item(tb, key); - return FT_ATOMIC_LOAD_PTR_ACQUIRE(tb_item[0]); + PyObject *value = NULL; + find_item(tb, key, &value); + return value; } From db3cac3c226447a036d16f3a34e3f1ed4427fb18 Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Sun, 18 Jan 2026 17:25:45 +0530 Subject: [PATCH 1285/1718] DOC: explain spin utility in testing guidelines --- doc/TESTS.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 803625e727ae..640e0489620c 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -64,7 +64,11 @@ Running tests from the command line ----------------------------------- If you want to build NumPy in order to work on NumPy itself, use the ``spin`` -utility. To run NumPy's full test suite:: +utility.The spin utility is NumPy’s developer command-line tool used when +working with a local source checkout of NumPy. It provides convenient wrappers +around common development tasks such as building NumPy, running tests, and +checking documentation, ensuring the correct build environment is used. +To run NumPy's full test suite:: $ spin test -m full From 0a84d4cd1af5a54bad35b0ea9c90cdb1fb187922 Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Sun, 18 Jan 2026 17:47:24 +0530 Subject: [PATCH 1286/1718] DOC:Explain spin-utility in TESTS.rst --- doc/TESTS.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 640e0489620c..d98c78e4bed2 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -66,7 +66,7 @@ Running tests from the command line If you want to build NumPy in order to work on NumPy itself, use the ``spin`` utility.The spin utility is NumPy’s developer command-line tool used when working with a local source checkout of NumPy. It provides convenient wrappers -around common development tasks such as building NumPy, running tests, and +around common development tasks such as building NumPy, running tests, and checking documentation, ensuring the correct build environment is used. To run NumPy's full test suite:: From 756761e672fba0cf26425d7da6ea4b3873f5d76e Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Mon, 19 Jan 2026 17:12:12 +0530 Subject: [PATCH 1287/1718] DOC:add top level description of spin to contributing guide --- CONTRIBUTING.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 0919790c65d1..6f7a984ac314 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -16,3 +16,17 @@ to all interactions, including issues and PRs. For more, please read https://www.numpy.org/devdocs/dev/index.html Thank you for contributing, and happy coding! + +Spin: NumPy’s developer tool +---------------------------- + +NumPy uses a command-line tool called ``spin`` to support common development +tasks such as running tests, building documentation, and managing other +developer workflows. + +The ``spin`` tool provides a consistent interface for contributors working on +NumPy itself, wrapping multiple underlying tools and configurations into a +single command that follows NumPy’s development conventions. + +Most contributors will interact with ``spin`` when running tests locally, +building the documentation, or preparing changes for review. From 9a12196453c863a33b046d331e9e8d71304d4bf0 Mon Sep 17 00:00:00 2001 From: Daniel Tang Date: Mon, 19 Jan 2026 08:25:16 -0500 Subject: [PATCH 1288/1718] DEV: make `spin mypy` run `spin build` For consistency with `spin test`. I wondered why `spin mypy` had been ignoring my changes and printing stale results. --- .spin/cmds.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.spin/cmds.py b/.spin/cmds.py index 5c4d5e90f6d7..1f5b0d78611d 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -544,6 +544,7 @@ def ipython(*, ipython_args, build_dir): def mypy(ctx): """🦆 Run Mypy tests for NumPy """ + ctx.invoke(build) env = os.environ env['NPY_RUN_MYPY_IN_TESTSUITE'] = '1' ctx.params['pytest_args'] = [os.path.join('numpy', 'typing')] From 900a7881e1df7a897685455450aaec3e8256160c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jan 2026 19:33:09 +0000 Subject: [PATCH 1289/1718] MAINT: Bump int128/hide-comment-action from 1.49.0 to 1.50.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.49.0 to 1.50.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/d56df214f3902ffb330ca629acf5051a4e22aee2...a218e276fb47d0d526ee989fe02e935a5095417b) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.50.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index b29f36dbf7e7..7a83cdb53d88 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@d56df214f3902ffb330ca629acf5051a4e22aee2 # v1.49.0 + uses: int128/hide-comment-action@a218e276fb47d0d526ee989fe02e935a5095417b # v1.50.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 09cde51b8e803d28082759e3c46ef809864dae82 Mon Sep 17 00:00:00 2001 From: Alejandro Candioti Date: Mon, 19 Jan 2026 16:36:50 -0800 Subject: [PATCH 1290/1718] Improving binsearch explanation --- numpy/_core/src/npysort/binsearch.cpp | 33 ++++++++++++++++++--------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index 99e2d1771907..3ec4fecef0c6 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -75,15 +75,16 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, /* In this binary search implementation, the candidate insertion indices for - the jth key are in the range [base_j, base_j+length] and on each iteration - we pick a pivot at the mid-point of the range to compare against the jth - key. Depending on the comparison result, we adjust the base_j and halve - the length of the interval. + the j-th key are in the range [base_j, base_j+length] and on each + iteration we pick a pivot at the mid-point of the range to compare against + the j-th key. Depending on the comparison result, we adjust the base_j and + halve the length of the interval. - To batch multiple queries, we do one pivot pass for each different length - for all keys, storing intermediate values of base_j for every key_j. To - avoid consuming extra memory, we use the ret array to store intermediate - values of each base until they become the final result in the last step. + To batch multiple queries, we process all bases with a fixed length. The + length is halved on each iteration of an outer loop and all bases are + updated in an inner loop. To avoid consuming extra memory, we use the + result array to store intermediate values of each base until they become + the final result in the last step. There are two benefits of this approach: @@ -100,9 +101,13 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, different keys. Meaning that the CPU can execute multiple keys out-of-order in parallel. - Invariant: - - cmp(arr[i], key_val) == true for all i < base - - cmp(arr[i], key_val) == false for all i >= base + length + Invariant (for every j): + - cmp(arr[i], key_val_j) == true for all i < base_j + - cmp(arr[i], key_val_j) == false for all i >= base_j + length + + where cmp(a, b) operator depends on side input: + - For side = "left", cmp operator is < + - For side = "right", cmp operator is <= The insertion index candidates are in range [base, base+length] and on each iteration we shrink the range into either @@ -110,6 +115,12 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, or [base + floor(length / 2), ceil(length / 2)] + The outer loop terminates when length = 1. At that point, for each j + the insertion order is either base_j or base_j + 1. An additional + comparison is required to determine which of the two values. + If cmp(arr[base_j], key_val_j) == true, insertion index is base_j + 1. + Otherwise the insertion order is base_j. + Optimization: we unroll the first iteration for the following reasons: 1. ret is not initialized with the bases, so we save |keys| writes by not having to intialize it with 0s. From fba740c0a0f7ba9ff70be71a506bd6dd4a7efa65 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 20 Jan 2026 16:12:32 +0530 Subject: [PATCH 1291/1718] BUG: fix alloc tls destruction on gcc (#30673) Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/alloc.cpp | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.cpp index 8684cb1b71b7..e2b632616a6f 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -51,10 +51,27 @@ typedef struct { void * ptrs[NCACHE]; } cache_bucket; -static NPY_TLS cache_bucket datacache[NBUCKETS]; -static NPY_TLS cache_bucket dimcache[NBUCKETS_DIM]; - +static NPY_TLS cache_bucket _datacache[NBUCKETS]; +static NPY_TLS cache_bucket _dimcache[NBUCKETS_DIM]; + +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61991 +// gcc has a bug where if the thread local variable +// is unused then in some cases it's destructor may not get +// called at thread exit. So to workaround this, we access the +// datacache and dimcache through this struct so that +// cache_destructor gets initialized and used, ensuring that +// the destructor gets called properly at thread exit. +// The datacache and dimcache are not embedded in this struct +// because that would make this struct very large and certain +// platforms like armhf can crash while allocating that large +// TLS block. typedef struct cache_destructor { + cache_bucket *dimcache; + cache_bucket *datacache; + cache_destructor() { + dimcache = &_dimcache[0]; + datacache = &_datacache[0]; + } ~cache_destructor() { for (npy_uint i = 0; i < NBUCKETS; ++i) { while (datacache[i].available > 0) { @@ -71,6 +88,9 @@ typedef struct cache_destructor { static NPY_TLS cache_destructor tls_cache_destructor; +#define datacache tls_cache_destructor.datacache +#define dimcache tls_cache_destructor.dimcache + /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value From 90f6cc1b9ea852058e9fd1debce88173fd958b60 Mon Sep 17 00:00:00 2001 From: Daniel Tang Date: Tue, 20 Jan 2026 12:39:34 -0500 Subject: [PATCH 1292/1718] DOC: __array_namespace__info__: set_module not __module__ (#30679) --- numpy/_array_api_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 067e38798718..41adb835433d 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -24,8 +24,10 @@ uint32, uint64, ) +from numpy._utils import set_module +@set_module('numpy') class __array_namespace_info__: """ Get the array API inspection namespace for NumPy. @@ -58,8 +60,6 @@ class __array_namespace_info__: """ - __module__ = 'numpy' - def capabilities(self): """ Return a dictionary of array API library capabilities. From f7f1aae1a53f3be599e1af6e0f28ed629f561754 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jan 2026 17:53:23 +0000 Subject: [PATCH 1293/1718] MAINT: Bump conda-incubator/setup-miniconda from 3.2.0 to 3.3.0 Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.2.0 to 3.3.0. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/835234971496cad1653abb28a638a281cf32541f...fc2d68f6413eb2d87b895e92f8584b5b94a10167) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-version: 3.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index a7d18daec4b5..940bf06e68a4 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -59,7 +59,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 + uses: conda-incubator/setup-miniconda@fc2d68f6413eb2d87b895e92f8584b5b94a10167 # v3.3.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge From 4567f0bc3cd1e8486c575faac40f9b09f454f2de Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Jan 2026 11:06:28 -0700 Subject: [PATCH 1294/1718] MAINT: update pins in requirements files --- requirements/build_requirements.txt | 2 +- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 2 +- requirements/doc_requirements.txt | 4 ++-- requirements/linter_requirements.txt | 2 +- requirements/test_requirements.txt | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 1f6eb1435cfc..18db99508a09 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.15 +spin build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 824934787e10..743266e528ab 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin==0.15 +spin # Keep this in sync with ci_requirements.txt scipy-openblas32==0.3.30.0.7 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 37e685fef0cc..66d1866f30c5 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin==0.15 +spin # Keep this in sync with ci32_requirements.txt scipy-openblas32==0.3.30.0.7 scipy-openblas64==0.3.30.0.7 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index b8f5cb2bd8fd..f7a347df135f 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,6 +1,6 @@ # doxygen required, use apt-get or dnf -sphinx==7.2.6 -numpydoc==1.4 +sphinx==9.1.0 +numpydoc==1.10.0 pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 73eafbaf52a1..497bd07f403c 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.7 +ruff==0.14.13 GitPython>=3.1.30 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index e3b17f0fc856..dd036f5e1267 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,7 +1,7 @@ Cython -hypothesis==6.142.2 -pytest==7.4.0 -pytest-cov==4.1.0 +hypothesis==6.150.2 +pytest==9.0.2 +pytest-cov==7.0.0 meson ninja; sys_platform != "emscripten" pytest-xdist From 950cee2ac6fa66680c3490a4212cbaaf6d82acb8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Jan 2026 11:18:28 -0700 Subject: [PATCH 1295/1718] MAINT: fix linter errors --- numpy/_core/tests/test_stringdtype.py | 2 +- numpy/f2py/rules.py | 12 ++++++------ numpy/polynomial/tests/test_printing.py | 13 +++++++------ numpy/testing/_private/utils.py | 4 ++-- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 492894087aa9..e8bc7c886d4e 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1457,7 +1457,7 @@ def test_unary(string_array, unicode_array, function_name): "strip", "lstrip", "rstrip", - "replace" + "replace", "zfill", ] diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 68c49e60028e..77b703017537 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1015,8 +1015,8 @@ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, {l_and(isoptional, l_not(hasinitvalue)) : ' if (#varname#_capi != Py_None)'}, - ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' - '\n if (f2py_success) {'], + (' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {')], 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', 'need': ['#ctype#_from_pyobj'], '_check': l_and(iscomplex, isintent_nothide), @@ -1057,13 +1057,13 @@ {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], '_check': isstring }, { # Common - 'frompyobj': [ + 'frompyobj': [( """\ slen(#varname#) = #elsize#; f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" """#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" """`#varname#\' of #pyname# to C #ctype#\"); - if (f2py_success) {""", + if (f2py_success) {"""), # The trailing null value for Fortran is blank. {l_not(isintent_c): " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, @@ -1201,8 +1201,8 @@ if (f2py_success) {"""]}, ], 'cleanupfrompyobj': [ # note that this list will be reversed - ' } ' - '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + (' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */'), {l_not(l_or(isintent_out, isintent_hide)): """\ if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { Py_XDECREF(capi_#varname#_as_array); }"""}, diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index f7d0131c94a9..b41e73a28a01 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -491,12 +491,13 @@ def test_numeric_object_coefficients(self): '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', - '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' - '(1.23457e-04) x**4', - '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' - '(1.234568e-04) x**4 + (1.234568e-05) x**5', - '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' - '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + ('1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4'), + ('1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5'), + ('1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') +) class TestPrintOptions: """ diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 2ec9dc8dea2d..2919332eb54e 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -905,8 +905,8 @@ def assert_same_inf_values(x, y, infs_mask): n_mismatch = reduced.size - reduced.sum(dtype=intp) n_elements = flagged.size if flagged.ndim != 0 else reduced.size percent_mismatch = 100 * n_mismatch / n_elements - remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' - f'({percent_mismatch:.3g}%)'] + remarks = [(f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)')] if invalids.ndim != 0: if flagged.ndim > 0: positions = np.argwhere(np.asarray(~flagged))[invalids] From fc2bea5609ebe804f22833808f6d288b1b4f8aaa Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Jan 2026 11:45:20 -0700 Subject: [PATCH 1296/1718] MAINT: revert changes to docs pins --- requirements/doc_requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index f7a347df135f..b8f5cb2bd8fd 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,6 +1,6 @@ # doxygen required, use apt-get or dnf -sphinx==9.1.0 -numpydoc==1.10.0 +sphinx==7.2.6 +numpydoc==1.4 pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design From 86a74fbf6866b90761b7ba54496fa29d4438a292 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 Jan 2026 11:54:28 -0700 Subject: [PATCH 1297/1718] MAINT: Run dependabot on the requirements files If this works, we may want to run on the tools files as well. --- .github/dependabot.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 171f3019883a..71f04fb1a8a1 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,3 +10,12 @@ updates: - "03 - Maintenance" ignore: - dependency-name: "bus1/cabuild" +updates: + - package-ecosystem: pip + directory: /requirements + schedule: + interval: daily + commit-message: + prefix: "MAINT" + labels: + - "03 - Maintenance" From a5818d9880c6ad9ecb4c0d7faf2578b338819476 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Jan 2026 12:05:15 -0700 Subject: [PATCH 1298/1718] TST: skip deep recursion tests on 32 bit --- numpy/testing/_private/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 2919332eb54e..c7966360d3f5 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2848,6 +2848,8 @@ def wrapper(*args, **kwargs): pytest.skip("Pyston disables recursion checking") if IS_WASM: pytest.skip("WASM has limited stack size") + if not IS_64BIT: + pytest.skip("32 bit Python has limited stack size") cflags = sysconfig.get_config_var('CFLAGS') or '' config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' address_sanitizer = ( From 4ca820dbc30a1fe00851ab111724c52dfb820375 Mon Sep 17 00:00:00 2001 From: Vikram Kumar Date: Wed, 21 Jan 2026 00:56:33 +0530 Subject: [PATCH 1299/1718] BUG: validate contraction axes in tensordot (#30521) --- numpy/_core/numeric.py | 15 ++++++++++++++- numpy/_core/tests/test_numeric.py | 6 ++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index b79cc03c1320..fd9737a3fe72 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1022,7 +1022,8 @@ def tensordot(a, b, axes=2): * (2,) array_like Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements array_like must be of the same length. - + Each axis may appear at most once; repeated axes are not allowed. + For example, ``axes=([1, 1], [0, 0])`` is invalid. Returns ------- output : ndarray @@ -1053,6 +1054,13 @@ def tensordot(a, b, axes=2): first in both sequences, the second axis second, and so forth. The calculation can be referred to ``numpy.einsum``. + For example, if ``a.shape == (2, 3, 4)`` and ``b.shape == (3, 4, 5)``, + then ``axes=([1, 2], [0, 1])`` sums over the ``(3, 4)`` dimensions of + both arrays and produces an output of shape ``(2, 5)``. + + Each summation axis corresponds to a distinct contraction index; repeating + an axis (for example ``axes=([1, 1], [0, 0])``) is invalid. + The shape of the result consists of the non-contracted axes of the first tensor, followed by the non-contracted axes of the second. @@ -1170,6 +1178,11 @@ def tensordot(a, b, axes=2): axes_b = [axes_b] nb = 1 + if len(set(axes_a)) != len(axes_a): + raise ValueError("duplicate axes are not allowed in tensordot") + if len(set(axes_b)) != len(axes_b): + raise ValueError("duplicate axes are not allowed in tensordot") + a, b = asarray(a), asarray(b) as_ = a.shape nda = a.ndim diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 15869f522f73..664b7aef257d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -4209,6 +4209,12 @@ def test_raise(self): class TestTensordot: + def test_rejects_duplicate_axes(self): + a = np.ones((2, 3, 3)) + b = np.ones((3, 3, 4)) + with pytest.raises(ValueError): + np.tensordot(a, b, axes=([1, 1], [0, 0])) + def test_zero_dimension(self): # Test resolution to issue #5663 a = np.ndarray((3, 0)) From 754cb2ca3da89375a606f9e3d9e39bdeeeb10d46 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 20 Jan 2026 13:49:53 -0700 Subject: [PATCH 1300/1718] BUG: fix free-threaded PyObject layout in replace_scalar_type_names helper (#30686) --- doc/source/conf.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index f6e7fc57bde7..77fde4920f9f 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -2,6 +2,7 @@ import os import re import sys +import sysconfig from datetime import datetime from docutils import nodes @@ -17,6 +18,9 @@ # must be kept alive to hold the patched names _name_cache = {} +FREE_THREADED_BUILD = sysconfig.get_config_var('Py_GIL_DISABLED') + + def replace_scalar_type_names(): """ Rename numpy types to use the canonical names to make sphinx behave """ import ctypes @@ -30,10 +34,19 @@ class PyObject(ctypes.Structure): class PyTypeObject(ctypes.Structure): pass - PyObject._fields_ = [ - ('ob_refcnt', Py_ssize_t), - ('ob_type', ctypes.POINTER(PyTypeObject)), - ] + if not FREE_THREADED_BUILD: + PyObject._fields_ = [ + ('ob_refcnt', Py_ssize_t), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] + else: + # As of Python 3.14 + PyObject._fields_ = [ + ('ob_refcnt_full', ctypes.c_int64), + # an anonymous struct that we don't try to model + ('__private', ctypes.c_int64), + ('ob_type', ctypes.POINTER(PyTypeObject)), + ] PyTypeObject._fields_ = [ # varhead From 066e7d24bd0e6042a3d226f7b0d9ace929983b8e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jan 2026 21:02:35 +0000 Subject: [PATCH 1301/1718] MAINT: Bump pytest from 7.4.0 to 9.0.2 in /requirements Bumps [pytest](https://github.com/pytest-dev/pytest) from 7.4.0 to 9.0.2. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/7.4.0...9.0.2) --- updated-dependencies: - dependency-name: pytest dependency-version: 9.0.2 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 019a69da687a..209d812c44c2 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 -pytest==7.4.0 +pytest==9.0.2 tzdata pytest-xdist From 5411dfc38370c33606164d587a957344d15c05b5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 21 Jan 2026 08:23:41 +1100 Subject: [PATCH 1302/1718] MAINT: update OpenBLAS --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 743266e528ab..6f396a0d6a06 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.30.0.7 +scipy-openblas32==0.3.31.22.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 66d1866f30c5..0b57a13dff06 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.30.0.7 -scipy-openblas64==0.3.30.0.7 +scipy-openblas32==0.3.31.22.0 +scipy-openblas64==0.3.31.22.0 From f78a62d470cc32e3a110b2564ff327bb6ad58969 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 20 Jan 2026 14:00:35 -0700 Subject: [PATCH 1303/1718] MAINT: Exclude some dependencies from dependabot. - dependency-name: "scipy-openblas32" - dependency-name: "scipy-openblas64" - dependency-name: "jupyterlite-pyodide-kernel" - dependency-name: "sphinx" [skip actions] [skip azp] [skip cirrus] --- .github/dependabot.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 71f04fb1a8a1..4a5d738e6613 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -19,3 +19,8 @@ updates: prefix: "MAINT" labels: - "03 - Maintenance" + ignore: + - dependency-name: "scipy-openblas32" + - dependency-name: "scipy-openblas64" + - dependency-name: "jupyterlite-pyodide-kernel" + - dependency-name: "sphinx" From 5e9a8acc22adb31d59943cfaaa5f10d33e5f9367 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jan 2026 23:50:06 +0000 Subject: [PATCH 1304/1718] MAINT: Bump numpydoc from 1.4 to 1.10.0 in /requirements Bumps [numpydoc](https://github.com/numpy/numpydoc) from 1.4 to 1.10.0. - [Release notes](https://github.com/numpy/numpydoc/releases) - [Changelog](https://github.com/numpy/numpydoc/blob/main/RELEASE.rst) - [Commits](https://github.com/numpy/numpydoc/compare/v1.4.0...v1.10.0) --- updated-dependencies: - dependency-name: numpydoc dependency-version: 1.10.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/doc_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index b8f5cb2bd8fd..ea75103117a3 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,6 +1,6 @@ # doxygen required, use apt-get or dnf sphinx==7.2.6 -numpydoc==1.4 +numpydoc==1.10.0 pydata-sphinx-theme>=0.15.2 sphinx-copybutton sphinx-design From 7e0b2f8905b2d6a14439c78b2e47562a9dcfff54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jan 2026 23:50:21 +0000 Subject: [PATCH 1305/1718] MAINT: Bump hypothesis from 6.81.1 to 6.150.2 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.81.1 to 6.150.2. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.81.1...hypothesis-python-6.150.2) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.150.2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 209d812c44c2..062ac0f1e7f5 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.81.1 +hypothesis==6.150.2 pytest==9.0.2 tzdata pytest-xdist From 4d050125e1a6fc0eb0ed6da72d7d0e3e77c3fec1 Mon Sep 17 00:00:00 2001 From: Wei Bo Gao Date: Wed, 21 Jan 2026 13:31:17 +0800 Subject: [PATCH 1306/1718] MAINT: Fix typo 'paramterize' in test_multiarray.py --- numpy/_core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 62f3bd4a77c4..017d7a29f1ce 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5681,7 +5681,7 @@ def _create_data(self): def param_filename(self, request): # This fixtures returns string or path_obj # so that every test doesn't need to have the - # paramterize marker. + # parametrize marker. return request.param def test_nofile(self): From 9d2bed22fa7462f8de83038aaba833dd426a4fd4 Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Wed, 21 Jan 2026 18:13:54 +0530 Subject: [PATCH 1307/1718] Update CONTRIBUTING.rst Co-authored-by: Matti Picus --- CONTRIBUTING.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 6f7a984ac314..c7fbc48a36e7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -21,7 +21,8 @@ Spin: NumPy’s developer tool ---------------------------- NumPy uses a command-line tool called ``spin`` to support common development -tasks such as running tests, building documentation, and managing other +tasks such as building from source, running tests, building documentation, +and managing other developer workflows. The ``spin`` tool provides a consistent interface for contributors working on From 7e90513d9c7c09de14f139a9b3cac224f8c94c70 Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Wed, 21 Jan 2026 18:14:34 +0530 Subject: [PATCH 1308/1718] Update CONTRIBUTING.rst Co-authored-by: Matti Picus --- CONTRIBUTING.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index c7fbc48a36e7..71af6fcf93e0 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -17,6 +17,8 @@ For more, please read https://www.numpy.org/devdocs/dev/index.html Thank you for contributing, and happy coding! +.. _spin_tool: + Spin: NumPy’s developer tool ---------------------------- From 633fe3ffe61dc9528be99fb822aaece332b6de4a Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Wed, 21 Jan 2026 18:15:27 +0530 Subject: [PATCH 1309/1718] Update CONTRIBUTING.rst Co-authored-by: Matti Picus --- CONTRIBUTING.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 71af6fcf93e0..ef3a67552ed0 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -31,5 +31,3 @@ The ``spin`` tool provides a consistent interface for contributors working on NumPy itself, wrapping multiple underlying tools and configurations into a single command that follows NumPy’s development conventions. -Most contributors will interact with ``spin`` when running tests locally, -building the documentation, or preparing changes for review. From 0f17a89ce8c0f344c37f4bb066d4d71391705955 Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Wed, 21 Jan 2026 18:30:09 +0530 Subject: [PATCH 1310/1718] Revise spin utility description in TESTS.rst Updated reference to the spin utility in the documentation. --- doc/TESTS.rst | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/TESTS.rst b/doc/TESTS.rst index d98c78e4bed2..f0cd063687fd 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -63,11 +63,9 @@ example, the ``_core`` module, use the following:: Running tests from the command line ----------------------------------- -If you want to build NumPy in order to work on NumPy itself, use the ``spin`` -utility.The spin utility is NumPy’s developer command-line tool used when -working with a local source checkout of NumPy. It provides convenient wrappers -around common development tasks such as building NumPy, running tests, and -checking documentation, ensuring the correct build environment is used. +If you want to build NumPy in order to work on NumPy itself, use the +:ref:`spin utility `. + To run NumPy's full test suite:: $ spin test -m full From 2f0c8985675a2fa5e29068d0ac57bb028df938f0 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Wed, 21 Jan 2026 17:59:14 +0100 Subject: [PATCH 1311/1718] TST: fix limited API example in tests for latest Cython Cython 3.3 will require a minimum target of 3.9 for the limited API. There is no reason for us to still test 3.6/3.7 Remove `#define Py_LIMITED_API` from a C source file, that's not a good habit and it requires manually keeping the version in sync between the C file and the meson.build file. Also fix warnings like these: ``` WARNING: Project specifies no minimum version but uses features which were added in versions: * 1.3.0: {'limited_api arg in python.extension_module'} ``` by requiring a recent Meson version. --- numpy/_core/tests/examples/limited_api/limited_api1.c | 2 -- numpy/_core/tests/examples/limited_api/meson.build | 10 +++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/numpy/_core/tests/examples/limited_api/limited_api1.c b/numpy/_core/tests/examples/limited_api/limited_api1.c index 3dbf5698f1d4..115a3f3a6835 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api1.c +++ b/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -1,5 +1,3 @@ -#define Py_LIMITED_API 0x03060000 - #include #include #include diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 65287d8654f5..2348b0856d0f 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -1,4 +1,8 @@ -project('checks', 'c', 'cython') +project( + 'checks', + 'c', 'cython', + meson_version: '>=1.8.3', +) py = import('python').find_installation(pure: false) @@ -31,7 +35,7 @@ py.extension_module( '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', ], include_directories: [npy_include_path], - limited_api: '3.6', + limited_api: '3.9', ) py.extension_module( @@ -55,5 +59,5 @@ py.extension_module( '-DCYTHON_LIMITED_API=1', ], include_directories: [npy_include_path], - limited_api: '3.7', + limited_api: '3.9', ) From 91029c17e00165c39d03cb17877c62c4887f66ee Mon Sep 17 00:00:00 2001 From: Iason Krommydas Date: Wed, 21 Jan 2026 18:42:31 +0100 Subject: [PATCH 1312/1718] ENH: enable `Py_TPFLAGS_SEQUENCE` for the ndarray object (#30653) Co-authored-by: Nathan Goldbaum --- .github/workflows/macos.yml | 2 +- .../upcoming_changes/30653.new_feature.rst | 6 +++ doc/source/reference/arrays.ndarray.rst | 24 ++++++++++ numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/tests/test_memmap.py | 48 +++++++++++++++++++ numpy/_core/tests/test_multiarray.py | 42 ++++++++++++++++ numpy/_core/tests/test_records.py | 48 +++++++++++++++++++ numpy/ma/tests/test_core.py | 45 +++++++++++++++++ numpy/matrixlib/tests/test_defmatrix.py | 22 +++++++++ 9 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 doc/release/upcoming_changes/30653.new_feature.rst diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 940bf06e68a4..689e775b6aa3 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -155,4 +155,4 @@ jobs: if: ${{ matrix.version == '3.14t' && matrix.build_runner[0] == 'macos-14' }} run: | pip install pytest-run-parallel==0.8.2 - spin test -p 4 -- --timeout=600 --durations=10 + spin test -p 4 -- -sv --timeout=600 --durations=10 diff --git a/doc/release/upcoming_changes/30653.new_feature.rst b/doc/release/upcoming_changes/30653.new_feature.rst new file mode 100644 index 000000000000..0b79fd25fc77 --- /dev/null +++ b/doc/release/upcoming_changes/30653.new_feature.rst @@ -0,0 +1,6 @@ +``numpy.ndarray`` now supports structural pattern matching +---------------------------------------------------------- +`numpy.ndarray` and its subclasses now have the ``Py_TPFLAGS_SEQUENCE`` flag +set, enabling structural pattern matching (PEP 634) with ``match``/``case`` +statements. This also enables Cython to optimize integer indexing operations. +See :ref:`arrays.ndarray.pattern-matching` for details. diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 4dca5b541a38..17922bbb7a44 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -618,3 +618,27 @@ Utility method for typing: :toctree: generated/ ndarray.__class_getitem__ + +.. _arrays.ndarray.pattern-matching: + +Structural pattern matching +=========================== + +Arrays support :pep:`structural pattern matching <634>`. The array is matched +as a sequence, so you can unpack arrays along the first dimension in +``match``/``case`` statements:: + + >>> arr = np.array([[1, 2], [3, 4]]) + >>> match arr: + ... case [row1, row2]: + ... print(f"row1={row1}, row2={row2}") + row1=[1 2], row2=[3 4] + +Nested patterns work too, matching inner dimensions:: + + >>> match arr: + ... case [[a, b], [c, d]]: + ... print(f"a={a}, b={b}, c={c}, d={d}") + a=1, b=2, c=3, d=4 + +All ndarray subclasses inherit this behavior. diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 4156bd4179cf..fe2ec25558c5 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -1232,7 +1232,7 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_as_mapping = &array_as_mapping, .tp_str = (reprfunc)array_str, .tp_as_buffer = &array_as_buffer, - .tp_flags =(Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE), + .tp_flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_SEQUENCE), .tp_richcompare = (richcmpfunc)array_richcompare, .tp_weaklistoffset = offsetof(PyArrayObject_fields, weakreflist), diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 572c15a98507..40222b62020a 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -245,3 +245,51 @@ def test_shape_type(self): memmap(self.tmpfp, shape=self.shape, mode='w+') memmap(self.tmpfp, shape=list(self.shape), mode='w+') memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(3,)) + arr[:] = [1, 2, 3] + match arr: + case [a, b, c]: + assert a == 1 + assert b == 2 + assert c == 3 + case _: + raise AssertionError("1D memmap did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2)) + arr[:] = [[1, 2], [3, 4]] + match arr: + case [row1, row2]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + case _: + raise AssertionError("2D memmap did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + with NamedTemporaryFile() as f: + arr = memmap(f, dtype='int64', mode='w+', shape=(2, 2, 2)) + arr[:] = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, [[1, 2], [3, 4]]) + assert_array_equal(plane2, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + assert_array_equal(row3, [5, 6]) + assert_array_equal(row4, [7, 8]) + case _: + raise AssertionError("3D memmap did not match sequence pattern") diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 471bfb0b6897..b393a21cd839 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -11107,3 +11107,45 @@ def test_add_newdoc_function_signature(self, func, parameter_names): sig = inspect.signature(func) assert sig.parameters assert tuple(sig.parameters) == parameter_names + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + arr = np.array([1, 2, 3]) + match arr: + case [a, b, c]: + assert a == 1 + assert b == 2 + assert c == 3 + case _: + raise AssertionError("1D ndarray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + arr = np.array([[1, 2], [3, 4]]) + match arr: + case [row1, row2]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + case _: + raise AssertionError("2D ndarray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, [[1, 2], [3, 4]]) + assert_array_equal(plane2, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D ndarray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, [1, 2]) + assert_array_equal(row2, [3, 4]) + assert_array_equal(row3, [5, 6]) + assert_array_equal(row4, [7, 8]) + case _: + raise AssertionError("3D ndarray did not match sequence pattern") diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 1fbaa0024f38..80f76a865eda 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -542,3 +542,51 @@ def test_find_duplicate(): l3 = [2, 2, 1, 4, 1, 6, 2, 3] assert_(np.rec.find_duplicate(l3) == [2, 1]) + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([(1, 1.5), (2, 2.5), (3, 3.5)], dtype=dt).view(np.recarray) + match arr: + case [a, b, c]: + assert a.x == 1 and a.y == 1.5 + assert b.x == 2 and b.y == 2.5 + assert c.x == 3 and c.y == 3.5 + case _: + raise AssertionError("1D recarray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([[(1, 1.5), (2, 2.5)], [(3, 3.5), (4, 4.5)]], + dtype=dt).view(np.recarray) + match arr: + case [row1, row2]: + assert_array_equal(row1.x, [1, 2]) + assert_array_equal(row2.x, [3, 4]) + case _: + raise AssertionError("2D recarray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + dt = np.dtype([('x', 'i4'), ('y', 'f8')]) + arr = np.array([[[(1, 1.5), (2, 2.5)], [(3, 3.5), (4, 4.5)]], + [[(5, 5.5), (6, 6.5)], [(7, 7.5), (8, 8.5)]]], + dtype=dt).view(np.recarray) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1.x, [[1, 2], [3, 4]]) + assert_array_equal(plane2.x, [[5, 6], [7, 8]]) + case _: + raise AssertionError("3D recarray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1.x, [1, 2]) + assert_array_equal(row2.x, [3, 4]) + assert_array_equal(row3.x, [5, 6]) + assert_array_equal(row4.x, [7, 8]) + case _: + raise AssertionError("3D recarray did not match sequence pattern") diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index ba880461ee1a..067fce2f0777 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -6035,3 +6035,48 @@ def test_frommethod_signature(fn, signature): def test_convert2ma_signature(fn, signature): assert str(inspect.signature(fn)) == signature assert fn.__module__ == 'numpy.ma.core' + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_1d(self): + arr = array([1, 2, 3], mask=[0, 1, 0]) + match arr: + case [a, b, c]: + assert a == 1 + assert b is masked + assert c == 3 + case _: + raise AssertionError("1D MaskedArray did not match sequence pattern") + + def test_match_sequence_pattern_2d(self): + arr = array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + match arr: + case [row1, row2]: + assert_array_equal(row1, array([1, 2], mask=[0, 1])) + assert_array_equal(row2, array([3, 4], mask=[1, 0])) + case _: + raise AssertionError("2D MaskedArray did not match sequence pattern") + + def test_match_sequence_pattern_3d(self): + arr = array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + mask=[[[0, 1], [1, 0]], [[1, 0], [0, 1]]]) + # outer matching + match arr: + case [plane1, plane2]: + assert_array_equal(plane1, array([[1, 2], [3, 4]], + mask=[[0, 1], [1, 0]])) + assert_array_equal(plane2, array([[5, 6], [7, 8]], + mask=[[1, 0], [0, 1]])) + case _: + raise AssertionError("3D MaskedArray did not match sequence pattern") + # inner matching + match arr: + case [[row1, row2], [row3, row4]]: + assert_array_equal(row1, array([1, 2], mask=[0, 1])) + assert_array_equal(row2, array([3, 4], mask=[1, 0])) + assert_array_equal(row3, array([5, 6], mask=[1, 0])) + assert_array_equal(row4, array([7, 8], mask=[0, 1])) + case _: + raise AssertionError("3D MaskedArray did not match sequence pattern") diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index b538973726b5..93154c3c2207 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -451,3 +451,25 @@ def test_expand_dims_matrix(self): expanded = np.expand_dims(a, axis=1) assert_equal(expanded.ndim, 3) assert_(not isinstance(expanded, np.matrix)) + + +class TestPatternMatching: + """Tests for structural pattern matching support (PEP 634).""" + + def test_match_sequence_pattern_2d(self): + # matrix is always 2D, so rows are (1, N) matrices not 1D arrays + arr = matrix([[1, 2], [3, 4]]) + # outer matching + match arr: + case [row1, row2]: + assert_array_equal(row1, [[1, 2]]) + assert_array_equal(row2, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") + # inner matching - rows are still 2D matrices, not scalars + match arr: + case [[a], [b]]: + assert_array_equal(a, [[1, 2]]) + assert_array_equal(b, [[3, 4]]) + case _: + raise AssertionError("2D matrix did not match sequence pattern") From 8c2d84be56e34f86dc0a142f75825451d8f46268 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 21 Jan 2026 15:38:30 -0700 Subject: [PATCH 1313/1718] MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact --- numpy/_core/include/numpy/ndarrayobject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index f06bafe5b52a..6bfc40fa6e2f 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -32,7 +32,7 @@ extern "C" { #define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) -#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) +#define PyArray_CheckExact(op) (Py_TYPE((PyObject*)(op)) == &PyArray_Type) #define PyArray_HasArrayInterfaceType(op, type, context, out) \ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ From 70d898b94bfc43ed57e80d560f6a95873713d9f6 Mon Sep 17 00:00:00 2001 From: Nandika P T Date: Thu, 22 Jan 2026 13:23:05 +0530 Subject: [PATCH 1314/1718] Remove 'spin' tool section from CONTRIBUTING.rst Removed section about the 'spin' developer tool from the CONTRIBUTING document. --- CONTRIBUTING.rst | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ef3a67552ed0..15e8f7546cc2 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -17,17 +17,3 @@ For more, please read https://www.numpy.org/devdocs/dev/index.html Thank you for contributing, and happy coding! -.. _spin_tool: - -Spin: NumPy’s developer tool ----------------------------- - -NumPy uses a command-line tool called ``spin`` to support common development -tasks such as building from source, running tests, building documentation, -and managing other -developer workflows. - -The ``spin`` tool provides a consistent interface for contributors working on -NumPy itself, wrapping multiple underlying tools and configurations into a -single command that follows NumPy’s development conventions. - From cd9e49dd094f296fd712ea8648f1c65f1f4e47c2 Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Thu, 22 Jan 2026 13:45:41 +0530 Subject: [PATCH 1315/1718] DOC: add top-level spin utility documentation --- doc/source/dev/index.rst | 1 + doc/source/dev/spin.rst | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 doc/source/dev/spin.rst diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 50dac45e475a..f1ac71e3ba5c 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -253,6 +253,7 @@ The rest of the story :maxdepth: 2 development_environment + spin howto_build_docs development_workflow development_advanced_debugging diff --git a/doc/source/dev/spin.rst b/doc/source/dev/spin.rst new file mode 100644 index 000000000000..bc70aa8df8bc --- /dev/null +++ b/doc/source/dev/spin.rst @@ -0,0 +1,29 @@ +.. _spin_tool: + +Spin: NumPy’s developer tool +---------------------------- + +NumPy uses a command-line tool called ``spin`` to support common development +tasks such as building from source, running tests, building documentation, +and managing other +developer workflows. + +The ``spin`` tool provides a consistent interface for contributors working on +NumPy itself, wrapping multiple underlying tools and configurations into a +single command that follows NumPy’s development conventions. + +Running the full test suite:: + + $ spin test -m full + +Running a subset of tests:: + + $ spin test -t numpy/_core/tests + +Running tests with coverage:: + + $ spin test --coverage + +Building the documentation:: + + $ spin docs \ No newline at end of file From f733002b8d85af2bff0336009562ff4f1d0be74e Mon Sep 17 00:00:00 2001 From: Nandika32 Date: Thu, 22 Jan 2026 14:24:29 +0530 Subject: [PATCH 1316/1718] Add spin.rst documentation as requested --- doc/source/dev/spin.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/dev/spin.rst b/doc/source/dev/spin.rst index bc70aa8df8bc..c1a00c337c30 100644 --- a/doc/source/dev/spin.rst +++ b/doc/source/dev/spin.rst @@ -11,7 +11,6 @@ developer workflows. The ``spin`` tool provides a consistent interface for contributors working on NumPy itself, wrapping multiple underlying tools and configurations into a single command that follows NumPy’s development conventions. - Running the full test suite:: $ spin test -m full From 4a38b4706aaeb9600300a46b394c202329a95513 Mon Sep 17 00:00:00 2001 From: antareepsarkar Date: Thu, 22 Jan 2026 16:08:07 +0530 Subject: [PATCH 1317/1718] BUG: fix meshgrid return type --- numpy/lib/_function_base_impl.py | 3 +++ numpy/lib/tests/test_function_base.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index ef51f9622085..734319227c69 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5195,6 +5195,9 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if copy: output = tuple(x.copy() for x in output) + if sparse and copy == False: + return tuple(output) + return output diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a9edb80c97da..7f6ae2a7f9bf 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2799,8 +2799,11 @@ def test_indexing(self): def test_sparse(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + [A, B] = meshgrid([1, 2, 3, 4], [4, 5, 6], sparse=True, copy=False) assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + assert_array_equal(A, np.array([[1, 2, 3, 4]])) + assert_array_equal(B, np.array([[4], [5], [6]])) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments From 49d9ca0a8f697e0e203c7f018cae0386be975816 Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Thu, 22 Jan 2026 16:50:34 +0530 Subject: [PATCH 1318/1718] DOC: add release note for 30707 Update documentation to reflect that `meshgrid` now consistently returns a tuple. --- doc/release/upcoming_changes/30707.change.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 doc/release/upcoming_changes/30707.change.rst diff --git a/doc/release/upcoming_changes/30707.change.rst b/doc/release/upcoming_changes/30707.change.rst new file mode 100644 index 000000000000..a99a5749a8df --- /dev/null +++ b/doc/release/upcoming_changes/30707.change.rst @@ -0,0 +1,4 @@ +``meshgrid`` now always returns a tuple +--------------------------------------------- +``np.meshgrid`` previously used to return a list when ``sparse`` was true and ``copy`` was false. +Now, it always returns a tuple regardless of the arguments. From 070954a2f15a77d393b7903053a6f9728a8d07d1 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 Jan 2026 16:33:33 +0100 Subject: [PATCH 1319/1718] BUG: Fix some bugs found via valgrind (#30680) --- numpy/_core/src/multiarray/item_selection.c | 4 ++-- numpy/_core/src/multiarray/mapping.c | 21 +++++++++--------- numpy/_core/src/multiarray/multiarraymodule.c | 2 ++ numpy/_core/src/multiarray/refcount.c | 4 +++- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- numpy/_core/src/multiarray/unique.cpp | 6 +++-- numpy/_core/src/umath/ufunc_object.c | 5 +++-- numpy/_core/tests/examples/cython/checks.pyx | 1 + numpy/_core/tests/test_arrayobject.py | 22 ++++++++++++++++++- 9 files changed, 48 insertions(+), 19 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index ff100c3d9d5d..4751db9b4705 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -1371,7 +1371,6 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (needcopy) { PyArray_ClearBuffer(odescr, buffer, elsize, N, 1); PyDataMem_UserFREE(buffer, N * elsize, mem_handler); - Py_DECREF(odescr); } if (ret < 0 && !PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ @@ -1384,6 +1383,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort, if (PyErr_Occurred() && ret == 0) { ret = -1; } + Py_XDECREF(odescr); Py_DECREF(it); Py_DECREF(mem_handler); NPY_cast_info_xfree(&to_cast_info); @@ -1594,7 +1594,6 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, if (needcopy) { PyArray_ClearBuffer(odescr, valbuffer, elsize, N, 1); PyDataMem_UserFREE(valbuffer, N * elsize, mem_handler); - Py_DECREF(odescr); } PyDataMem_UserFREE(idxbuffer, N * sizeof(npy_intp), mem_handler); if (ret < 0) { @@ -1605,6 +1604,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, Py_XDECREF(rop); rop = NULL; } + Py_XDECREF(odescr); Py_XDECREF(it); Py_XDECREF(rit); Py_DECREF(mem_handler); diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index d6128f74621a..5a8ec64664ac 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -270,7 +270,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, npy_intp n; PyObject *obj = NULL; - PyArrayObject *arr; + PyArrayObject *arr = NULL; // free'd on error use Py_CLEAR to decref. int index_type = 0; int ellipsis_pos = -1; @@ -489,6 +489,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, index_type = HAS_BOOL; indices[curr_idx].type = HAS_BOOL; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. /* keep track anyway, just to be complete */ used_ndim = array_ndims; @@ -523,7 +524,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, indices[curr_idx].value = n; indices[curr_idx].object = PyArray_Zeros(1, &n, PyArray_DescrFromType(NPY_INTP), 0); - Py_DECREF(arr); + Py_CLEAR(arr); if (indices[curr_idx].object == NULL) { goto failed_building_indices; @@ -541,7 +542,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, n = _nonzero_indices((PyObject *)arr, nonzero_result); if (n < 0) { - Py_DECREF(arr); goto failed_building_indices; } @@ -552,7 +552,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, for (i=0; i < n; i++) { Py_DECREF(nonzero_result[i]); } - Py_DECREF(arr); goto failed_building_indices; } @@ -566,7 +565,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, used_ndim += 1; curr_idx += 1; } - Py_DECREF(arr); + Py_CLEAR(arr); /* All added indices have 1 dimension */ if (fancy_ndim < 1) { @@ -587,7 +586,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, */ npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr); - Py_DECREF(arr); + Py_CLEAR(arr); if (error_converting(ind)) { goto failed_building_indices; } @@ -603,15 +602,17 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, } } + if (fancy_ndim < PyArray_NDIM(arr)) { + fancy_ndim = PyArray_NDIM(arr); + } + index_type |= HAS_FANCY; indices[curr_idx].type = HAS_FANCY; indices[curr_idx].value = -1; indices[curr_idx].object = (PyObject *)arr; + arr = NULL; // Reference moved, clean up for error path. used_ndim += 1; - if (fancy_ndim < PyArray_NDIM(arr)) { - fancy_ndim = PyArray_NDIM(arr); - } curr_idx += 1; continue; } @@ -632,7 +633,6 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, is_flatiter_object ? "" : ", numpy.newaxis (`None`)" ); } - Py_DECREF(arr); goto failed_building_indices; } @@ -760,6 +760,7 @@ prepare_index_noarray(int array_ndims, npy_intp *array_dims, PyObject *index, return index_type; failed_building_indices: + Py_XDECREF(arr); for (i=0; i < curr_idx; i++) { Py_XDECREF(indices[i].object); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 2c45508e9e66..668da32add18 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -2338,6 +2338,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds if (sep == NULL || strlen(sep) == 0) { PyErr_SetString(PyExc_ValueError, "The binary mode of fromstring is removed, use frombuffer instead"); + Py_XDECREF(descr); return NULL; } return PyArray_FromString(data, (npy_intp)s, descr, (npy_intp)nin, sep); @@ -2406,6 +2407,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) } if (npy_fseek(fp, offset, SEEK_CUR) != 0) { PyErr_SetFromErrno(PyExc_OSError); + Py_XDECREF(type); goto cleanup; } if (type == NULL) { diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index ac70f38f39a5..9c5a15da03f5 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -124,7 +124,7 @@ PyArray_ZeroContiguousBuffer( * and only arrays which own their memory should clear it. */ int aligned = PyArray_ISALIGNED(arr); - if (PyArray_ISCONTIGUOUS(arr)) { + if (PyArray_ISCONTIGUOUS(arr) || PyArray_IS_F_CONTIGUOUS(arr)) { return PyArray_ClearBuffer( descr, PyArray_BYTES(arr), descr->elsize, PyArray_SIZE(arr), aligned); @@ -152,10 +152,12 @@ PyArray_ZeroContiguousBuffer( /* Process the innermost dimension */ if (clear_info.func(NULL, clear_info.descr, data_it, inner_shape, inner_stride, clear_info.auxdata) < 0) { + NPY_traverse_info_xfree(&clear_info); return -1; } } NPY_RAW_ITER_ONE_NEXT(idim, ndim, coord, shape_it, data_it, strides_it); + NPY_traverse_info_xfree(&clear_info); return 0; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 2d63dd6e3602..48ad4ab540a9 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -955,7 +955,7 @@ datetimetype_repr(PyObject *self) ret = PyUnicode_FromFormat("numpy.datetime64('%s')", iso); } } - + Py_DECREF(meta); } else { PyObject *meta = metastr_to_unicode(&scal->obmeta, 1); diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 8fc5b580961e..675e0226d14f 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -508,9 +508,11 @@ array__unique_hash(PyObject *NPY_UNUSED(module), auto type = PyArray_TYPE(arr); // we only support data types present in our unique_funcs map if (unique_funcs.find(type) == unique_funcs.end()) { - Py_RETURN_NOTIMPLEMENTED; + result = Py_NewRef(Py_NotImplemented); + } + else { + result = unique_funcs[type](arr, equal_nan); } - result = unique_funcs[type](arr, equal_nan); } catch (const std::bad_alloc &e) { PyErr_NoMemory(); diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index fdeb7efc0fb6..c4c5907e4cda 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2738,8 +2738,9 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, else { fixed_strides[0] = PyArray_STRIDES(op[0])[axis]; fixed_strides[1] = PyArray_STRIDES(op[1])[axis]; - fixed_strides[2] = fixed_strides[0]; } + // First argument is also passed as output (e.g. see dataptr below). + fixed_strides[2] = fixed_strides[0]; NPY_ARRAYMETHOD_FLAGS flags = 0; @@ -4515,7 +4516,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, "use the `out` keyword argument instead. If you hoped to work with " "more than 2 inputs, combine them into a single array and get the extrema " "for the relevant axis.") < 0) { - return NULL; + goto fail; } } diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index bfb2de95fa59..6dcce1c2606d 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -248,6 +248,7 @@ def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim cnp.NpyIter_GetGetMultiIndex(cit, NULL) cdef cnp.NpyIter_IterNextFunc _iternext = \ cnp.NpyIter_GetIterNext(cit, NULL) + cnp.NpyIter_Deallocate(cit) return 1 diff --git a/numpy/_core/tests/test_arrayobject.py b/numpy/_core/tests/test_arrayobject.py index ffa1ba001776..f4e268b377b3 100644 --- a/numpy/_core/tests/test_arrayobject.py +++ b/numpy/_core/tests/test_arrayobject.py @@ -1,7 +1,9 @@ +import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal +from numpy.testing import HAS_REFCOUNT, assert_array_equal def test_matrix_transpose_raises_error_for_1d(): @@ -73,3 +75,21 @@ def test_array_wrap(subclass_self, subclass_arr): # Non 0-D array can't be converted to scalar, so we ignore that arr1d = np.array([3], dtype=np.int8).view(subclass_arr) assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_cleanup_with_refs_non_contig(): + # Regression test, leaked the dtype (but also good for rest) + dtype = np.dtype("O,i") + obj = object() + expected_ref_dtype = sys.getrefcount(dtype) + expected_ref_obj = sys.getrefcount(obj) + proto = np.full((3, 4, 5, 6, 7), np.array((obj, 2), dtype=dtype)) + # Give array a non-trivial order to exercise more cleanup paths. + arr = proto.transpose((2, 0, 3, 1, 4)).copy("K") + del proto, arr + + actual_ref_dtype = sys.getrefcount(dtype) + actual_ref_obj = sys.getrefcount(obj) + assert actual_ref_dtype == expected_ref_dtype + assert actual_ref_obj == actual_ref_dtype From 7e03e788fbe23f9eb4d356b11bbe99ef0ca9bc4d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 18:43:42 +0100 Subject: [PATCH 1320/1718] TYP: remove unused `# noqa: E501` comments --- numpy/random/mtrand.pyi | 46 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 7a654971f19b..72954dd2f1bd 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -105,7 +105,7 @@ class RandomState: def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... @overload def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @@ -185,7 +185,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., ) -> uint8: ... @overload def randint( @@ -193,7 +193,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., ) -> uint16: ... @overload def randint( @@ -201,7 +201,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., ) -> uint32: ... @overload def randint( @@ -209,7 +209,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., ) -> uint: ... @overload def randint( @@ -217,7 +217,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., ) -> ulong: ... @overload def randint( @@ -225,7 +225,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., ) -> uint64: ... @overload def randint( @@ -233,7 +233,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., ) -> int8: ... @overload def randint( @@ -241,7 +241,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., ) -> int16: ... @overload def randint( @@ -249,7 +249,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., ) -> int32: ... @overload def randint( @@ -257,7 +257,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., ) -> int_: ... @overload def randint( @@ -265,7 +265,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., ) -> long: ... @overload def randint( @@ -273,7 +273,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., ) -> int64: ... @overload def randint( @@ -296,7 +296,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., ) -> NDArray[int8]: ... @overload def randint( @@ -304,7 +304,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., ) -> NDArray[int16]: ... @overload def randint( @@ -312,7 +312,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., ) -> NDArray[int32]: ... @overload def randint( @@ -320,7 +320,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., ) -> NDArray[int64]: ... @overload def randint( @@ -328,7 +328,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., ) -> NDArray[uint8]: ... @overload def randint( @@ -336,7 +336,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., ) -> NDArray[uint16]: ... @overload def randint( @@ -344,7 +344,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., ) -> NDArray[uint32]: ... @overload def randint( @@ -352,7 +352,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., ) -> NDArray[uint64]: ... @overload def randint( @@ -360,7 +360,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., ) -> NDArray[long]: ... @overload def randint( @@ -368,7 +368,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., ) -> NDArray[ulong]: ... def bytes(self, length: int) -> builtins.bytes: ... @overload From 2402c94491de656bb4c829e1d37b11d490f23a9b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 18:52:14 +0100 Subject: [PATCH 1321/1718] TYP: `random.mtrand`: simplify dtype-like aliases --- numpy/random/mtrand.pyi | 56 ++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 72954dd2f1bd..9f6abe229330 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -4,7 +4,6 @@ from typing import Any, Literal, overload import numpy as np from numpy import ( - dtype, float64, int8, int16, @@ -24,6 +23,7 @@ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _DTypeLike, _DTypeLikeBool, _Int8Codes, _Int16Codes, @@ -32,7 +32,6 @@ from numpy._typing import ( _IntPCodes, _LongCodes, _ShapeLike, - _SupportsDType, _UInt8Codes, _UInt16Codes, _UInt32Codes, @@ -149,12 +148,7 @@ class RandomState: # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... @overload - def randint( - self, - low: int, - high: int | None = None, - size: None = None, - ) -> int: ... + def randint(self, low: int, high: int | None = None, size: None = None) -> int: ... @overload def randint( self, @@ -185,7 +179,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + dtype: _DTypeLike[np.uint8] | _UInt8Codes = ..., ) -> uint8: ... @overload def randint( @@ -193,7 +187,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + dtype: _DTypeLike[np.uint16] | _UInt16Codes = ..., ) -> uint16: ... @overload def randint( @@ -201,7 +195,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + dtype: _DTypeLike[np.uint32] | _UInt32Codes = ..., ) -> uint32: ... @overload def randint( @@ -209,7 +203,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint] | type[uint] | _UIntPCodes | _SupportsDType[dtype[uint]] = ..., + dtype: _DTypeLike[np.uintp] | _UIntPCodes = ..., ) -> uint: ... @overload def randint( @@ -217,7 +211,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + dtype: _DTypeLike[np.ulong] | _ULongCodes = ..., ) -> ulong: ... @overload def randint( @@ -225,7 +219,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + dtype: _DTypeLike[np.uint64] | _UInt64Codes = ..., ) -> uint64: ... @overload def randint( @@ -233,7 +227,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + dtype: _DTypeLike[np.int8] | _Int8Codes = ..., ) -> int8: ... @overload def randint( @@ -241,7 +235,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + dtype: _DTypeLike[np.int16] | _Int16Codes = ..., ) -> int16: ... @overload def randint( @@ -249,7 +243,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + dtype: _DTypeLike[np.int32] | _Int32Codes = ..., ) -> int32: ... @overload def randint( @@ -257,7 +251,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int_] | type[int_] | _IntPCodes | _SupportsDType[dtype[int_]] = ..., + dtype: _DTypeLike[np.intp] | _IntPCodes = ..., ) -> int_: ... @overload def randint( @@ -265,7 +259,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + dtype: _DTypeLike[np.long] | _LongCodes = ..., ) -> long: ... @overload def randint( @@ -273,7 +267,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., ) -> int64: ... @overload def randint( @@ -296,7 +290,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + dtype: _DTypeLike[np.int8] | _Int8Codes = ..., ) -> NDArray[int8]: ... @overload def randint( @@ -304,7 +298,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + dtype: _DTypeLike[np.int16] | _Int16Codes = ..., ) -> NDArray[int16]: ... @overload def randint( @@ -312,7 +306,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + dtype: _DTypeLike[np.int32] | _Int32Codes = ..., ) -> NDArray[int32]: ... @overload def randint( @@ -320,7 +314,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., ) -> NDArray[int64]: ... @overload def randint( @@ -328,7 +322,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + dtype: _DTypeLike[np.uint8] = ..., ) -> NDArray[uint8]: ... @overload def randint( @@ -336,7 +330,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + dtype: _DTypeLike[np.uint16] | _UInt16Codes = ..., ) -> NDArray[uint16]: ... @overload def randint( @@ -344,7 +338,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + dtype: _DTypeLike[np.uint32] | _UInt32Codes = ..., ) -> NDArray[uint32]: ... @overload def randint( @@ -352,7 +346,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + dtype: _DTypeLike[np.uint64] | _UInt64Codes = ..., ) -> NDArray[uint64]: ... @overload def randint( @@ -360,7 +354,7 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + dtype: _DTypeLike[np.long] | _LongCodes = ..., ) -> NDArray[long]: ... @overload def randint( @@ -368,9 +362,9 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + dtype: _DTypeLike[np.ulong] | _ULongCodes = ..., ) -> NDArray[ulong]: ... - def bytes(self, length: int) -> builtins.bytes: ... + def bytes(self, length: int) -> bytes: ... @overload def choice( self, From c36c81df1b9d40a87120f18864f8d35ffe5926d6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 18:57:53 +0100 Subject: [PATCH 1322/1718] TYP/STY: `random.mtrand`: reformat in a ruff-compatible manner --- numpy/random/mtrand.pyi | 305 +++++++++++++++++++++++++--------------- 1 file changed, 190 insertions(+), 115 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 9f6abe229330..1e0210915d2d 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,4 +1,4 @@ -import builtins +from builtins import bytes as py_bytes from collections.abc import Callable from typing import Any, Literal, overload @@ -105,50 +105,63 @@ class RandomState: def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... + + # def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... + + # @overload def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload - def get_state( - self, legacy: Literal[True] = True - ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... - def set_state( - self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] - ) -> None: ... + def get_state(self, legacy: Literal[True] = True) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... + + # + def set_state(self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]) -> None: ... + + # @overload def random_sample(self, size: None = None) -> float: ... @overload def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + + # @overload def random(self, size: None = None) -> float: ... @overload def random(self, size: _ShapeLike) -> NDArray[float64]: ... + + # @overload def beta(self, a: float, b: float, size: None = None) -> float: ... @overload - def beta( - self, - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def exponential( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def standard_exponential(self, size: None = None) -> float: ... @overload def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + + # @overload def tomaxint(self, size: None = None) -> int: ... - @overload - # Generates long values, but stores it in a 64bit int: + @overload # Generates long values, but stores it in a 64bit int: def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + + # @overload - def randint(self, low: int, high: int | None = None, size: None = None) -> int: ... + def randint( + self, + low: int, + high: int | None = None, + size: None = None, + ) -> int: ... @overload def randint( self, @@ -364,7 +377,11 @@ class RandomState: size: _ShapeLike | None = None, dtype: _DTypeLike[np.ulong] | _ULongCodes = ..., ) -> NDArray[ulong]: ... - def bytes(self, length: int) -> bytes: ... + + # + def bytes(self, length: int) -> py_bytes: ... + + # @overload def choice( self, @@ -397,9 +414,14 @@ class RandomState: replace: bool = True, p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... + + # @overload def uniform( - self, low: float = 0.0, high: float = 1.0, size: None = None + self, + low: float = 0.0, + high: float = 1.0, + size: None = None, ) -> float: ... @overload def uniform( @@ -408,17 +430,26 @@ class RandomState: high: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def rand(self) -> float: ... @overload def rand(self, *args: int) -> NDArray[float64]: ... + + # @overload def randn(self) -> float: ... @overload def randn(self, *args: int) -> NDArray[float64]: ... + + # @overload def random_integers( - self, low: int, high: int | None = None, size: None = None + self, + low: int, + high: int | None = None, + size: None = None, ) -> int: ... @overload def random_integers( @@ -427,15 +458,20 @@ class RandomState: high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, ) -> NDArray[long]: ... + + # @overload def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal( - self, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def standard_normal(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def normal( - self, loc: float = 0.0, scale: float = 1.0, size: None = None + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, ) -> float: ... @overload def normal( @@ -444,39 +480,43 @@ class RandomState: scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # + @overload + def standard_gamma(self, shape: float, size: None = None) -> float: ... @overload - def standard_gamma( + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # + @overload + def gamma( self, shape: float, + scale: float = 1.0, size: None = None, ) -> float: ... @overload - def standard_gamma( - self, - shape: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... - @overload - def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... - @overload def gamma( self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload - def f( - self, - dfnum: _ArrayLikeFloat_co, - dfden: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def noncentral_f( - self, dfnum: float, dfden: float, nonc: float, size: None = None + self, + dfnum: float, + dfden: float, + nonc: float, + size: None = None, ) -> float: ... @overload def noncentral_f( @@ -486,67 +526,74 @@ class RandomState: nonc: _ArrayLikeFloat_co, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def chisquare(self, df: float, size: None = None) -> float: ... @overload - def chisquare( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def noncentral_chisquare( - self, df: float, nonc: float, size: None = None + self, + df: float, + nonc: float, + size: None = None, ) -> float: ... @overload def noncentral_chisquare( self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None + size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def standard_t(self, df: float, size: None = None) -> float: ... @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: None = None - ) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[float64]: ... @overload - def standard_t( - self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload - def vonmises( - self, - mu: _ArrayLikeFloat_co, - kappa: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def pareto(self, a: float, size: None = None) -> float: ... @overload - def pareto( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def weibull(self, a: float, size: None = None) -> float: ... @overload - def weibull( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def power(self, a: float, size: None = None) -> float: ... @overload - def power( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def standard_cauchy(self, size: None = None) -> float: ... @overload def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def laplace( - self, loc: float = 0.0, scale: float = 1.0, size: None = None + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, ) -> float: ... @overload def laplace( @@ -555,9 +602,14 @@ class RandomState: scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def gumbel( - self, loc: float = 0.0, scale: float = 1.0, size: None = None + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, ) -> float: ... @overload def gumbel( @@ -566,9 +618,14 @@ class RandomState: scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def logistic( - self, loc: float = 0.0, scale: float = 1.0, size: None = None + self, + loc: float = 0.0, + scale: float = 1.0, + size: None = None, ) -> float: ... @overload def logistic( @@ -577,9 +634,14 @@ class RandomState: scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def lognormal( - self, mean: float = 0.0, sigma: float = 1.0, size: None = None + self, + mean: float = 0.0, + sigma: float = 1.0, + size: None = None, ) -> float: ... @overload def lognormal( @@ -588,24 +650,27 @@ class RandomState: sigma: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def rayleigh( - self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload - def wald( - self, - mean: _ArrayLikeFloat_co, - scale: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # @overload def triangular( - self, left: float, mode: float, right: float, size: None = None + self, + left: float, + mode: float, + right: float, + size: None = None, ) -> float: ... @overload def triangular( @@ -615,48 +680,55 @@ class RandomState: right: _ArrayLikeFloat_co, size: _ShapeLike | None = None, ) -> NDArray[float64]: ... + + # @overload - def binomial( - self, n: int, p: float, size: None = None - ) -> int: ... + def binomial(self, n: int, p: float, size: None = None) -> int: ... @overload - def binomial( - self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # @overload def negative_binomial( - self, n: float, p: float, size: None = None + self, + n: float, + p: float, + size: None = None, ) -> int: ... @overload def negative_binomial( self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None + size: _ShapeLike | None = None, ) -> NDArray[long]: ... + + # @overload - def poisson( - self, lam: float = 1.0, size: None = None - ) -> int: ... + def poisson(self, lam: float = 1.0, size: None = None) -> int: ... @overload - def poisson( - self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # @overload def zipf(self, a: float, size: None = None) -> int: ... @overload - def zipf( - self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # @overload def geometric(self, p: float, size: None = None) -> int: ... @overload - def geometric( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # @overload def hypergeometric( - self, ngood: int, nbad: int, nsample: int, size: None = None + self, + ngood: int, + nbad: int, + nsample: int, + size: None = None, ) -> int: ... @overload def hypergeometric( @@ -666,12 +738,14 @@ class RandomState: nsample: _ArrayLikeInt_co, size: _ShapeLike | None = None, ) -> NDArray[long]: ... + + # @overload def logseries(self, p: float, size: None = None) -> int: ... @overload - def logseries( - self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[long]: ... + def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # def multivariate_normal( self, mean: _ArrayLikeFloat_co, @@ -680,15 +754,17 @@ class RandomState: check_valid: Literal["warn", "raise", "ignore"] = "warn", tol: float = 1e-8, ) -> NDArray[float64]: ... - def multinomial( - self, n: _ArrayLikeInt_co, - pvals: _ArrayLikeFloat_co, - size: _ShapeLike | None = None - ) -> NDArray[long]: ... - def dirichlet( - self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None - ) -> NDArray[float64]: ... + + # + def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + + # + def dirichlet(self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # def shuffle(self, x: ArrayLike) -> None: ... + + # @overload def permutation(self, x: int) -> NDArray[long]: ... @overload @@ -749,5 +825,4 @@ sample = _rand.random_sample ranf = _rand.random_sample def set_bit_generator(bitgen: BitGenerator) -> None: ... - def get_bit_generator() -> BitGenerator: ... From 476a5002e7fbdc4a7d4d7d73ef81d3fae89af541 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:07:36 +0100 Subject: [PATCH 1323/1718] TYP: `RandomState.randint`: fix incorrect `dtype` defaults --- numpy/random/mtrand.pyi | 128 ++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 63 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 1e0210915d2d..790322c33805 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -99,7 +99,8 @@ __all__ = [ class RandomState: _bit_generator: BitGenerator - def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... + + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = None) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... @@ -161,6 +162,7 @@ class RandomState: low: int, high: int | None = None, size: None = None, + dtype: type[int] = int, ) -> int: ... @overload def randint( @@ -168,15 +170,8 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: type[bool] = ..., - ) -> bool: ... - @overload - def randint( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: type[np.bool] = ..., + *, + dtype: type[np.bool], ) -> np.bool: ... @overload def randint( @@ -184,118 +179,115 @@ class RandomState: low: int, high: int | None = None, size: None = None, - dtype: type[int] = ..., - ) -> int: ... - @overload - def randint( - self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.uint8] | _UInt8Codes = ..., - ) -> uint8: ... + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> int8: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.uint16] | _UInt16Codes = ..., - ) -> uint16: ... + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> int16: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.uint32] | _UInt32Codes = ..., - ) -> uint32: ... + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> int32: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.uintp] | _UIntPCodes = ..., - ) -> uint: ... + *, + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> int_: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.ulong] | _ULongCodes = ..., - ) -> ulong: ... + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> int64: ... @overload def randint( self, - low: int, - high: int | None = None, - size: None = None, - dtype: _DTypeLike[np.uint64] | _UInt64Codes = ..., - ) -> uint64: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> NDArray[np.int_]: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.int8] | _Int8Codes = ..., - ) -> int8: ... + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, + ) -> uint8: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.int16] | _Int16Codes = ..., - ) -> int16: ... + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, + ) -> uint16: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.int32] | _Int32Codes = ..., - ) -> int32: ... + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, + ) -> uint32: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.intp] | _IntPCodes = ..., - ) -> int_: ... + *, + dtype: _DTypeLike[np.uintp] | _UIntPCodes, + ) -> uint: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.long] | _LongCodes = ..., - ) -> long: ... + *, + dtype: _DTypeLike[np.ulong] | _ULongCodes, + ) -> ulong: ... @overload def randint( self, low: int, high: int | None = None, size: None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., - ) -> int64: ... - @overload - def randint( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> uint64: ... @overload def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLikeBool = ..., + *, + dtype: _DTypeLikeBool, ) -> NDArray[np.bool]: ... @overload def randint( @@ -303,7 +295,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int8] | _Int8Codes = ..., + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, ) -> NDArray[int8]: ... @overload def randint( @@ -311,7 +304,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int16] | _Int16Codes = ..., + *, + dtype: _DTypeLike[np.int16] | _Int16Codes, ) -> NDArray[int16]: ... @overload def randint( @@ -319,7 +313,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int32] | _Int32Codes = ..., + *, + dtype: _DTypeLike[np.int32] | _Int32Codes, ) -> NDArray[int32]: ... @overload def randint( @@ -327,7 +322,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + *, + dtype: _DTypeLike[np.int64] | _Int64Codes, ) -> NDArray[int64]: ... @overload def randint( @@ -335,7 +331,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.uint8] = ..., + *, + dtype: _DTypeLike[np.uint8] | _UInt8Codes, ) -> NDArray[uint8]: ... @overload def randint( @@ -343,7 +340,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.uint16] | _UInt16Codes = ..., + *, + dtype: _DTypeLike[np.uint16] | _UInt16Codes, ) -> NDArray[uint16]: ... @overload def randint( @@ -351,7 +349,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.uint32] | _UInt32Codes = ..., + *, + dtype: _DTypeLike[np.uint32] | _UInt32Codes, ) -> NDArray[uint32]: ... @overload def randint( @@ -359,7 +358,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.uint64] | _UInt64Codes = ..., + *, + dtype: _DTypeLike[np.uint64] | _UInt64Codes, ) -> NDArray[uint64]: ... @overload def randint( @@ -367,7 +367,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.long] | _LongCodes = ..., + *, + dtype: _DTypeLike[np.long] | _LongCodes, ) -> NDArray[long]: ... @overload def randint( @@ -375,7 +376,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: _DTypeLike[np.ulong] | _ULongCodes = ..., + *, + dtype: _DTypeLike[np.ulong] | _ULongCodes, ) -> NDArray[ulong]: ... # From f2cab886ee65504566cd0635648f7809566a7401 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:11:04 +0100 Subject: [PATCH 1324/1718] TYP: `RandomState.randint`: remove unlikely overloads --- numpy/random/mtrand.pyi | 46 +++++------------------------------------ 1 file changed, 5 insertions(+), 41 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 790322c33805..bf060082b3b0 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -24,7 +24,7 @@ from numpy._typing import ( _ArrayLikeFloat_co, _ArrayLikeInt_co, _DTypeLike, - _DTypeLikeBool, + _DTypeLikeInt, _Int8Codes, _Int16Codes, _Int32Codes, @@ -165,15 +165,6 @@ class RandomState: dtype: type[int] = int, ) -> int: ... @overload - def randint( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: type[np.bool], - ) -> np.bool: ... - @overload def randint( self, low: int, @@ -219,14 +210,6 @@ class RandomState: dtype: _DTypeLike[np.int64] | _Int64Codes, ) -> int64: ... @overload - def randint( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, - ) -> NDArray[np.int_]: ... - @overload def randint( self, low: int, @@ -263,15 +246,6 @@ class RandomState: dtype: _DTypeLike[np.uintp] | _UIntPCodes, ) -> uint: ... @overload - def randint( - self, - low: int, - high: int | None = None, - size: None = None, - *, - dtype: _DTypeLike[np.ulong] | _ULongCodes, - ) -> ulong: ... - @overload def randint( self, low: int, @@ -286,9 +260,8 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _DTypeLikeBool, - ) -> NDArray[np.bool]: ... + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> NDArray[np.int_]: ... @overload def randint( self, @@ -368,17 +341,8 @@ class RandomState: high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[np.long] | _LongCodes, - ) -> NDArray[long]: ... - @overload - def randint( - self, - low: _ArrayLikeInt_co, - high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, - *, - dtype: _DTypeLike[np.ulong] | _ULongCodes, - ) -> NDArray[ulong]: ... + dtype: _DTypeLikeInt, + ) -> NDArray[Any]: ... # def bytes(self, length: int) -> py_bytes: ... From 7e20e20fabb7318d321a7bc29bcd43c391fd3fcc Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 22 Jan 2026 19:12:17 +0100 Subject: [PATCH 1325/1718] BUG: Fixup the quantile promotion fixup Sorry, I incorrectly reverted a line and made things maybe worse :(. Basically it now *always* returned float64 (at least for most methods). I am surprised that we had not tests for this at all. I suppose this needs backporting, which isn't great, but... --- numpy/lib/_function_base_impl.py | 2 +- numpy/lib/tests/test_function_base.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index ef51f9622085..0dd7f5308ed2 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4576,7 +4576,7 @@ def _get_gamma(virtual_indexes, previous_indexes, method): gamma = method["fix_gamma"](gamma, virtual_indexes) # Ensure both that we have an array, and that we keep the dtype # (which may have been matched to the input array). - return np.asanyarray(gamma) + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) def _lerp(a, b, t, out=None): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a9edb80c97da..1a661376a3b9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3946,6 +3946,9 @@ def test_q_strong_promotion(self, method): a = np.array([1, 2, 3, 4, 5], dtype=np.float32) value = np.percentile(a, np.float64(50), method=method) assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.percentile(a, np.float32(50), method=method) + assert value.dtype == np.float32 class TestQuantile: @@ -4384,6 +4387,9 @@ def test_q_strong_promotion(self, method): a = np.array([1, 2, 3, 4, 5], dtype=np.float32) value = np.quantile(a, np.float64(0.5), method=method) assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.quantile(a, np.float32(0.5), method=method) + assert value.dtype == np.float32 class TestLerp: From 051a6c7e0c3d9d2da04810bd76a633418d56c9d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:16:27 +0100 Subject: [PATCH 1326/1718] TYP/STY: `random.mtrand`: avoid `from numpy import ...` --- numpy/random/mtrand.pyi | 145 ++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 81 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index bf060082b3b0..fc30e230c247 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -3,21 +3,6 @@ from collections.abc import Callable from typing import Any, Literal, overload import numpy as np -from numpy import ( - float64, - int8, - int16, - int32, - int64, - int_, - long, - uint, - uint8, - uint16, - uint32, - uint64, - ulong, -) from numpy._typing import ( ArrayLike, NDArray, @@ -30,14 +15,12 @@ from numpy._typing import ( _Int32Codes, _Int64Codes, _IntPCodes, - _LongCodes, _ShapeLike, _UInt8Codes, _UInt16Codes, _UInt32Codes, _UInt64Codes, _UIntPCodes, - _ULongCodes, ) from numpy.random.bit_generator import BitGenerator @@ -114,46 +97,46 @@ class RandomState: @overload def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... @overload - def get_state(self, legacy: Literal[True] = True) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... + def get_state(self, legacy: Literal[True] = True) -> dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]: ... # - def set_state(self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]) -> None: ... + def set_state(self, state: dict[str, Any] | tuple[str, NDArray[np.uint32], int, int, float]) -> None: ... # @overload def random_sample(self, size: None = None) -> float: ... @overload - def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + def random_sample(self, size: _ShapeLike) -> NDArray[np.float64]: ... # @overload def random(self, size: None = None) -> float: ... @overload - def random(self, size: _ShapeLike) -> NDArray[float64]: ... + def random(self, size: _ShapeLike) -> NDArray[np.float64]: ... # @overload def beta(self, a: float, b: float, size: None = None) -> float: ... @overload - def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def standard_exponential(self, size: None = None) -> float: ... @overload - def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + def standard_exponential(self, size: _ShapeLike) -> NDArray[np.float64]: ... # @overload def tomaxint(self, size: None = None) -> int: ... @overload # Generates long values, but stores it in a 64bit int: - def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + def tomaxint(self, size: _ShapeLike) -> NDArray[np.int64]: ... # @overload @@ -172,7 +155,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.int8] | _Int8Codes, - ) -> int8: ... + ) -> np.int8: ... @overload def randint( self, @@ -181,7 +164,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.int16] | _Int16Codes, - ) -> int16: ... + ) -> np.int16: ... @overload def randint( self, @@ -190,7 +173,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.int32] | _Int32Codes, - ) -> int32: ... + ) -> np.int32: ... @overload def randint( self, @@ -199,7 +182,7 @@ class RandomState: size: None = None, *, dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, - ) -> int_: ... + ) -> np.int_: ... @overload def randint( self, @@ -208,7 +191,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.int64] | _Int64Codes, - ) -> int64: ... + ) -> np.int64: ... @overload def randint( self, @@ -217,7 +200,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.uint8] | _UInt8Codes, - ) -> uint8: ... + ) -> np.uint8: ... @overload def randint( self, @@ -226,7 +209,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.uint16] | _UInt16Codes, - ) -> uint16: ... + ) -> np.uint16: ... @overload def randint( self, @@ -235,7 +218,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.uint32] | _UInt32Codes, - ) -> uint32: ... + ) -> np.uint32: ... @overload def randint( self, @@ -244,7 +227,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.uintp] | _UIntPCodes, - ) -> uint: ... + ) -> np.uint: ... @overload def randint( self, @@ -253,7 +236,7 @@ class RandomState: size: None = None, *, dtype: _DTypeLike[np.uint64] | _UInt64Codes, - ) -> uint64: ... + ) -> np.uint64: ... @overload def randint( self, @@ -270,7 +253,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.int8] | _Int8Codes, - ) -> NDArray[int8]: ... + ) -> NDArray[np.int8]: ... @overload def randint( self, @@ -279,7 +262,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.int16] | _Int16Codes, - ) -> NDArray[int16]: ... + ) -> NDArray[np.int16]: ... @overload def randint( self, @@ -288,7 +271,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.int32] | _Int32Codes, - ) -> NDArray[int32]: ... + ) -> NDArray[np.int32]: ... @overload def randint( self, @@ -297,7 +280,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.int64] | _Int64Codes, - ) -> NDArray[int64]: ... + ) -> NDArray[np.int64]: ... @overload def randint( self, @@ -306,7 +289,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint8] | _UInt8Codes, - ) -> NDArray[uint8]: ... + ) -> NDArray[np.uint8]: ... @overload def randint( self, @@ -315,7 +298,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint16] | _UInt16Codes, - ) -> NDArray[uint16]: ... + ) -> NDArray[np.uint16]: ... @overload def randint( self, @@ -324,7 +307,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint32] | _UInt32Codes, - ) -> NDArray[uint32]: ... + ) -> NDArray[np.uint32]: ... @overload def randint( self, @@ -333,7 +316,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint64] | _UInt64Codes, - ) -> NDArray[uint64]: ... + ) -> NDArray[np.uint64]: ... @overload def randint( self, @@ -363,7 +346,7 @@ class RandomState: size: _ShapeLike | None = None, replace: bool = True, p: _ArrayLikeFloat_co | None = None, - ) -> NDArray[long]: ... + ) -> NDArray[np.long]: ... @overload def choice( self, @@ -395,19 +378,19 @@ class RandomState: low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def rand(self) -> float: ... @overload - def rand(self, *args: int) -> NDArray[float64]: ... + def rand(self, *args: int) -> NDArray[np.float64]: ... # @overload def randn(self) -> float: ... @overload - def randn(self, *args: int) -> NDArray[float64]: ... + def randn(self, *args: int) -> NDArray[np.float64]: ... # @overload @@ -423,13 +406,13 @@ class RandomState: low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + ) -> NDArray[np.long]: ... # @overload def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def standard_normal(self, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -445,13 +428,13 @@ class RandomState: loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def standard_gamma(self, shape: float, size: None = None) -> float: ... @overload - def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -467,13 +450,13 @@ class RandomState: shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload - def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -491,13 +474,13 @@ class RandomState: dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def chisquare(self, df: float, size: None = None) -> float: ... @overload - def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -513,45 +496,45 @@ class RandomState: df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def standard_t(self, df: float, size: None = None) -> float: ... @overload - def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64]: ... @overload - def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload - def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def pareto(self, a: float, size: None = None) -> float: ... @overload - def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def weibull(self, a: float, size: None = None) -> float: ... @overload - def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def power(self, a: float, size: None = None) -> float: ... @overload - def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def standard_cauchy(self, size: None = None) -> float: ... @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -567,7 +550,7 @@ class RandomState: loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload @@ -583,7 +566,7 @@ class RandomState: loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload @@ -599,7 +582,7 @@ class RandomState: loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload @@ -615,19 +598,19 @@ class RandomState: mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload - def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # @overload @@ -645,13 +628,13 @@ class RandomState: mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, size: _ShapeLike | None = None, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # @overload def binomial(self, n: int, p: float, size: None = None) -> int: ... @overload - def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # @overload @@ -667,25 +650,25 @@ class RandomState: n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + ) -> NDArray[np.long]: ... # @overload def poisson(self, lam: float = 1.0, size: None = None) -> int: ... @overload - def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[long]: ... + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # @overload def zipf(self, a: float, size: None = None) -> int: ... @overload - def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # @overload def geometric(self, p: float, size: None = None) -> int: ... @overload - def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # @overload @@ -703,13 +686,13 @@ class RandomState: nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, size: _ShapeLike | None = None, - ) -> NDArray[long]: ... + ) -> NDArray[np.long]: ... # @overload def logseries(self, p: float, size: None = None) -> int: ... @overload - def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # def multivariate_normal( @@ -719,20 +702,20 @@ class RandomState: size: _ShapeLike | None = None, check_valid: Literal["warn", "raise", "ignore"] = "warn", tol: float = 1e-8, - ) -> NDArray[float64]: ... + ) -> NDArray[np.float64]: ... # - def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[long]: ... + def multinomial(self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... # - def dirichlet(self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[float64]: ... + def dirichlet(self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... # def shuffle(self, x: ArrayLike) -> None: ... # @overload - def permutation(self, x: int) -> NDArray[long]: ... + def permutation(self, x: int) -> NDArray[np.long]: ... @overload def permutation(self, x: ArrayLike) -> NDArray[Any]: ... From 30db985a90b7cf8490824bf0a75ba43f367eead7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:36:22 +0100 Subject: [PATCH 1327/1718] TYP/STY: `random.mtrand`: fix overlapping overloads --- numpy/random/mtrand.pyi | 329 +++++++++++++++++++++++++++++++--------- 1 file changed, 255 insertions(+), 74 deletions(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index fc30e230c247..bf4b5e652253 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -118,13 +118,19 @@ class RandomState: @overload def beta(self, a: float, b: float, size: None = None) -> float: ... @overload - def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def beta(self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def exponential(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... # @overload @@ -181,8 +187,8 @@ class RandomState: high: int | None = None, size: None = None, *, - dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, - ) -> np.int_: ... + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> np.int64: ... @overload def randint( self, @@ -190,8 +196,8 @@ class RandomState: high: int | None = None, size: None = None, *, - dtype: _DTypeLike[np.int64] | _Int64Codes, - ) -> np.int64: ... + dtype: _DTypeLike[np.int_] | _IntPCodes, + ) -> np.int_: ... @overload def randint( self, @@ -226,8 +232,8 @@ class RandomState: high: int | None = None, size: None = None, *, - dtype: _DTypeLike[np.uintp] | _UIntPCodes, - ) -> np.uint: ... + dtype: _DTypeLike[np.uint64] | _UInt64Codes, + ) -> np.uint64: ... @overload def randint( self, @@ -235,16 +241,17 @@ class RandomState: high: int | None = None, size: None = None, *, - dtype: _DTypeLike[np.uint64] | _UInt64Codes, - ) -> np.uint64: ... + dtype: _DTypeLike[np.uintp] | _UIntPCodes, + ) -> np.uint: ... @overload def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, - ) -> NDArray[np.int_]: ... + *, + dtype: _DTypeLike[np.int8] | _Int8Codes, + ) -> NDArray[np.int8] | Any: ... @overload def randint( self, @@ -252,8 +259,8 @@ class RandomState: high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[np.int8] | _Int8Codes, - ) -> NDArray[np.int8]: ... + dtype: _DTypeLike[np.int16] | _Int16Codes, + ) -> NDArray[np.int16] | Any: ... @overload def randint( self, @@ -261,8 +268,8 @@ class RandomState: high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[np.int16] | _Int16Codes, - ) -> NDArray[np.int16]: ... + dtype: _DTypeLike[np.int32] | _Int32Codes, + ) -> NDArray[np.int32] | Any: ... @overload def randint( self, @@ -270,17 +277,16 @@ class RandomState: high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, *, - dtype: _DTypeLike[np.int32] | _Int32Codes, - ) -> NDArray[np.int32]: ... + dtype: _DTypeLike[np.int64] | _Int64Codes, + ) -> NDArray[np.int64] | Any: ... @overload def randint( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, size: _ShapeLike | None = None, - *, - dtype: _DTypeLike[np.int64] | _Int64Codes, - ) -> NDArray[np.int64]: ... + dtype: type[int] | _DTypeLike[np.int_] | _IntPCodes = int, + ) -> NDArray[np.int_] | Any: ... @overload def randint( self, @@ -289,7 +295,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint8] | _UInt8Codes, - ) -> NDArray[np.uint8]: ... + ) -> NDArray[np.uint8] | Any: ... @overload def randint( self, @@ -298,7 +304,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint16] | _UInt16Codes, - ) -> NDArray[np.uint16]: ... + ) -> NDArray[np.uint16] | Any: ... @overload def randint( self, @@ -307,7 +313,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint32] | _UInt32Codes, - ) -> NDArray[np.uint32]: ... + ) -> NDArray[np.uint32] | Any: ... @overload def randint( self, @@ -316,7 +322,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLike[np.uint64] | _UInt64Codes, - ) -> NDArray[np.uint64]: ... + ) -> NDArray[np.uint64] | Any: ... @overload def randint( self, @@ -325,7 +331,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLikeInt, - ) -> NDArray[Any]: ... + ) -> NDArray[Any] | Any: ... # def bytes(self, length: int) -> py_bytes: ... @@ -343,7 +349,7 @@ class RandomState: def choice( self, a: int, - size: _ShapeLike | None = None, + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, ) -> NDArray[np.long]: ... @@ -359,7 +365,7 @@ class RandomState: def choice( self, a: ArrayLike, - size: _ShapeLike | None = None, + size: _ShapeLike, replace: bool = True, p: _ArrayLikeFloat_co | None = None, ) -> NDArray[Any]: ... @@ -373,24 +379,39 @@ class RandomState: size: None = None, ) -> float: ... @overload + def uniform( + self, + low: _ArrayLikeFloat_co, + high: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def uniform( self, low: _ArrayLikeFloat_co = 0.0, high: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload - def rand(self) -> float: ... + def rand(self, /) -> float: ... @overload - def rand(self, *args: int) -> NDArray[np.float64]: ... + def rand(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... # @overload - def randn(self) -> float: ... + def randn(self, /) -> float: ... @overload - def randn(self, *args: int) -> NDArray[np.float64]: ... + def randn(self, arg0: int, /, *args: int) -> NDArray[np.float64]: ... # @overload @@ -401,18 +422,33 @@ class RandomState: size: None = None, ) -> int: ... @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None, + size: _ShapeLike, + ) -> NDArray[np.long]: ... + @overload def random_integers( self, low: _ArrayLikeInt_co, high: _ArrayLikeInt_co | None = None, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.long]: ... + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: None = None, + ) -> NDArray[np.long] | Any: ... # @overload def standard_normal(self, size: None = None) -> float: ... @overload - def standard_normal(self, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def standard_normal(self, size: _ShapeLike) -> NDArray[np.float64]: ... # @overload @@ -423,18 +459,35 @@ class RandomState: size: None = None, ) -> float: ... @overload + def normal( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def normal( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def standard_gamma(self, shape: float, size: None = None) -> float: ... @overload - def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def standard_gamma(self, shape: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload @@ -445,18 +498,35 @@ class RandomState: size: None = None, ) -> float: ... @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def gamma( self, shape: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... @overload - def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def f(self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload @@ -473,14 +543,24 @@ class RandomState: dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def chisquare(self, df: float, size: None = None) -> float: ... @overload - def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def chisquare(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def chisquare(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload @@ -495,46 +575,61 @@ class RandomState: self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def standard_t(self, df: float, size: None = None) -> float: ... @overload - def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... @overload - def standard_t(self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def standard_t(self, df: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... @overload - def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def vonmises(self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def pareto(self, a: float, size: None = None) -> float: ... @overload - def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def pareto(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def pareto(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def weibull(self, a: float, size: None = None) -> float: ... @overload - def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def weibull(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def weibull(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def power(self, a: float, size: None = None) -> float: ... @overload - def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def power(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def power(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def standard_cauchy(self, size: None = None) -> float: ... @overload - def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def standard_cauchy(self, size: _ShapeLike) -> NDArray[np.float64]: ... # @overload @@ -545,12 +640,27 @@ class RandomState: size: None = None, ) -> float: ... @overload + def laplace( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def laplace( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload @@ -561,12 +671,27 @@ class RandomState: size: None = None, ) -> float: ... @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def gumbel( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload @@ -577,12 +702,27 @@ class RandomState: size: None = None, ) -> float: ... @overload + def logistic( + self, + loc: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def logistic( self, loc: _ArrayLikeFloat_co = 0.0, scale: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload @@ -593,24 +733,45 @@ class RandomState: size: None = None, ) -> float: ... @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co, + sigma: _ArrayLikeFloat_co, + size: _ShapeLike, + ) -> NDArray[np.float64]: ... + @overload def lognormal( self, mean: _ArrayLikeFloat_co = 0.0, sigma: _ArrayLikeFloat_co = 1.0, - size: _ShapeLike | None = None, + *, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... @overload - def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def rayleigh(self, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def rayleigh(self, scale: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.float64] | Any: ... # @overload def wald(self, mean: float, scale: float, size: None = None) -> float: ... @overload - def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.float64]: ... + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.float64]: ... + @overload + def wald(self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.float64] | Any: ... # @overload @@ -627,48 +788,58 @@ class RandomState: left: _ArrayLikeFloat_co, mode: _ArrayLikeFloat_co, right: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, + size: _ShapeLike, ) -> NDArray[np.float64]: ... + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: None = None, + ) -> NDArray[np.float64] | Any: ... # @overload def binomial(self, n: int, p: float, size: None = None) -> int: ... @overload - def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def binomial(self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... # @overload - def negative_binomial( - self, - n: float, - p: float, - size: None = None, - ) -> int: ... + def negative_binomial(self, n: float, p: float, size: None = None) -> int: ... @overload - def negative_binomial( - self, - n: _ArrayLikeFloat_co, - p: _ArrayLikeFloat_co, - size: _ShapeLike | None = None, - ) -> NDArray[np.long]: ... + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def negative_binomial(self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... # @overload def poisson(self, lam: float = 1.0, size: None = None) -> int: ... @overload - def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + def poisson(self, lam: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, *, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def poisson(self, lam: _ArrayLikeFloat_co = 1.0, size: None = None) -> NDArray[np.long] | Any: ... # @overload def zipf(self, a: float, size: None = None) -> int: ... @overload - def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + def zipf(self, a: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def zipf(self, a: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... # @overload def geometric(self, p: float, size: None = None) -> int: ... @overload - def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + def geometric(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def geometric(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... # @overload @@ -685,14 +856,24 @@ class RandomState: ngood: _ArrayLikeInt_co, nbad: _ArrayLikeInt_co, nsample: _ArrayLikeInt_co, - size: _ShapeLike | None = None, + size: _ShapeLike, ) -> NDArray[np.long]: ... + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None = None, + ) -> NDArray[np.long] | Any: ... # @overload def logseries(self, p: float, size: None = None) -> int: ... @overload - def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None) -> NDArray[np.long]: ... + def logseries(self, p: _ArrayLikeFloat_co, size: _ShapeLike) -> NDArray[np.long]: ... + @overload + def logseries(self, p: _ArrayLikeFloat_co, size: None = None) -> NDArray[np.long] | Any: ... # def multivariate_normal( From 203ab06d4af08f164b59c64ea6d9ebfb54a8c746 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:55:12 +0100 Subject: [PATCH 1328/1718] TYP: `RandomState.randint`: restore boolean dtype overloads --- numpy/random/mtrand.pyi | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index bf4b5e652253..066a56545f23 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -8,7 +8,9 @@ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _BoolCodes, _DTypeLike, + _DTypeLikeBool, _DTypeLikeInt, _Int8Codes, _Int16Codes, @@ -146,6 +148,15 @@ class RandomState: # @overload + def randint( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + ) -> bool: ... + @overload def randint( self, low: int, @@ -154,6 +165,15 @@ class RandomState: dtype: type[int] = int, ) -> int: ... @overload + def randint( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + ) -> np.bool: ... + @overload def randint( self, low: int, @@ -244,6 +264,24 @@ class RandomState: dtype: _DTypeLike[np.uintp] | _UIntPCodes, ) -> np.uint: ... @overload + def randint( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLikeInt, + ) -> np.integer | Any: ... + @overload + def randint( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + ) -> NDArray[np.bool] | Any: ... + @overload def randint( self, low: _ArrayLikeInt_co, @@ -331,7 +369,7 @@ class RandomState: size: _ShapeLike | None = None, *, dtype: _DTypeLikeInt, - ) -> NDArray[Any] | Any: ... + ) -> NDArray[np.integer] | Any: ... # def bytes(self, length: int) -> py_bytes: ... From 9f65aa0b6c76cbf07a029d42d4b9d4eaefab5c60 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 19:55:54 +0100 Subject: [PATCH 1329/1718] TYP: update type-tests for `RandomState` --- numpy/typing/tests/data/reveal/random.pyi | 425 +++++++++++----------- 1 file changed, 213 insertions(+), 212 deletions(-) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 4d00ef0d99aa..72f8c62f79e0 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -777,510 +777,511 @@ assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) assert_type(random_st.zipf(1.5), int) assert_type(random_st.zipf(1.5, size=None), int) assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long] | Any) assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) assert_type(random_st.weibull(0.5), float) assert_type(random_st.weibull(0.5, size=None), float) assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_t(0.5), float) assert_type(random_st.standard_t(0.5, size=None), float) assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.poisson(0.5), int) assert_type(random_st.poisson(0.5, size=None), int) assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.power(0.5), float) assert_type(random_st.power(0.5, size=None), float) assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.pareto(0.5), float) assert_type(random_st.pareto(0.5, size=None), float) assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.chisquare(0.5), float) assert_type(random_st.chisquare(0.5, size=None), float) assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.exponential(0.5), float) assert_type(random_st.exponential(0.5, size=None), float) assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.geometric(0.5), int) assert_type(random_st.geometric(0.5, size=None), int) assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.logseries(0.5), int) assert_type(random_st.logseries(0.5, size=None), int) assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.rayleigh(0.5), float) assert_type(random_st.rayleigh(0.5, size=None), float) assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(0.5), float) assert_type(random_st.standard_gamma(0.5, size=None), float) assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, 0.5), float) assert_type(random_st.vonmises(0.5, 0.5, size=None), float) assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, 0.5), float) assert_type(random_st.wald(0.5, 0.5, size=None), float) assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, 0.5), float) assert_type(random_st.uniform(0.5, 0.5, size=None), float) assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, 0.5), float) assert_type(random_st.beta(0.5, 0.5, size=None), float) assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, 0.5), float) assert_type(random_st.f(0.5, 0.5, size=None), float) assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, 0.5), float) assert_type(random_st.gamma(0.5, 0.5, size=None), float) assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, 0.5), float) assert_type(random_st.gumbel(0.5, 0.5, size=None), float) assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, 0.5), float) assert_type(random_st.laplace(0.5, 0.5, size=None), float) assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, 0.5), float) assert_type(random_st.logistic(0.5, 0.5, size=None), float) assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, 0.5), float) assert_type(random_st.lognormal(0.5, 0.5, size=None), float) assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, 0.5), float) assert_type(random_st.normal(0.5, 0.5, size=None), float) assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) -assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) -assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64] | Any) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64] | Any) assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, 0.5, 0.9), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) -assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64] | Any) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64] | Any) assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) assert_type(random_st.binomial(10, 0.5), int) assert_type(random_st.binomial(10, 0.5, size=None), int) assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, 0.5), int) assert_type(random_st.negative_binomial(10, 0.5, size=None), int) assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) -assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long] | Any) assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, 20, 10), int) assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) -assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long] | Any) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long] | Any) assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) assert_type(random_st.randint(0, 100), int) assert_type(random_st.randint(100), int) -assert_type(random_st.randint([100]), npt.NDArray[np.long]) -assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) +assert_type(random_st.randint([100]), npt.NDArray[np.long] | Any) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long] | Any) assert_type(random_st.randint(2, dtype=bool), bool) assert_type(random_st.randint(0, 2, dtype=bool), bool) -assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(2, dtype=np.bool), np.bool) assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) -assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool] | Any) assert_type(random_st.randint(256, dtype="u1"), np.uint8) assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype="uint8"), np.uint8) assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) -assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8] | Any) assert_type(random_st.randint(65536, dtype="u2"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) -assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16] | Any) assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32] | Any) assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) -assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint] | Any) assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) -assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64] | Any) assert_type(random_st.randint(128, dtype="i1"), np.int8) assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype="int8"), np.int8) assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(128, dtype=np.int8), np.int8) assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) -assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8] | Any) assert_type(random_st.randint(32768, dtype="i2"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype="int16"), np.int16) assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(32768, dtype=np.int16), np.int16) assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) -assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16] | Any) assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32] | Any) assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) -assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_] | Any) assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64] | Any) assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64] | Any) assert_type(random_st._bit_generator, np.random.BitGenerator) From 63727ab30f384c83ee7e3246e890d9f8ad071d3f Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 22:00:32 +0100 Subject: [PATCH 1330/1718] TYP: `fft._helper`: don't `from numpy import ...` --- numpy/fft/_helper.pyi | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 8f1a3c7bab89..9babeca5a56f 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,6 +1,6 @@ from typing import Any, Final, Literal as L, overload -from numpy import complexfloating, floating, generic, integer +import numpy as np from numpy._typing import ( ArrayLike, NDArray, @@ -14,29 +14,29 @@ __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] ### -integer_types: Final[tuple[type[int], type[integer]]] = ... +integer_types: Final[tuple[type[int], type[np.integer]]] = ... ### @overload -def fftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +def fftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def ifftshift[ScalarT: generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +def ifftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... # @overload -def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +def fftfreq(n: int | np.integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.floating]: ... @overload -def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +def fftfreq(n: int | np.integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.complexfloating]: ... # @overload -def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +def rfftfreq(n: int | np.integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.floating]: ... @overload -def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +def rfftfreq(n: int | np.integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.complexfloating]: ... From b56ec9fec7e334b3263d4d12dae61ef367105710 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 22 Jan 2026 22:49:42 +0100 Subject: [PATCH 1331/1718] TYP: `fft.[r]fftfreq` shape-typing and precise dtypes --- numpy/fft/_helper.pyi | 130 ++++++++++++++++++---- numpy/typing/tests/data/reveal/fft.pyi | 142 +++++++++++++++++++------ 2 files changed, 221 insertions(+), 51 deletions(-) diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 9babeca5a56f..05603a2046c3 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,23 +1,27 @@ from typing import Any, Final, Literal as L, overload import numpy as np -from numpy._typing import ( - ArrayLike, - NDArray, - _ArrayLike, - _ArrayLikeComplex_co, - _ArrayLikeFloat_co, - _ShapeLike, -) +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _Shape, _ShapeLike __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] ### -integer_types: Final[tuple[type[int], type[np.integer]]] = ... +type _Device = L["cpu"] + +type _IntLike = int | np.integer + +type _AsFloat64 = np.float64 | np.float32 | np.float16 | np.integer | np.bool +type _AsComplex128 = np.complex128 | np.complex64 +type _Inexact80 = np.longdouble | np.clongdouble + +type _Array[ShapeT: _Shape, ScalarT: np.generic] = np.ndarray[ShapeT, np.dtype[ScalarT]] +type _1D = tuple[int] ### +integer_types: Final[tuple[type[int], type[np.integer]]] = ... + @overload def fftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload @@ -29,14 +33,102 @@ def ifftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | No @overload def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -# -@overload -def fftfreq(n: int | np.integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.floating]: ... -@overload -def fftfreq(n: int | np.integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.complexfloating]: ... +# keep in sync with `rfftfreq` below +@overload # 0d +f64 (default) +def fftfreq( + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def fftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex +def fftfreq( + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def fftfreq[ScalarT: _Inexact80]( + n: _IntLike, + d: ScalarT, + device: _Device | None = None, +) -> _Array[_1D, ScalarT]: ... +@overload # nd +f64 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def fftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def fftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... -# -@overload -def rfftfreq(n: int | np.integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.floating]: ... -@overload -def rfftfreq(n: int | np.integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[np.complexfloating]: ... +# keep in sync with `fftfreq` above +@overload # 0d +f64 (default) +def rfftfreq( + n: _IntLike, + d: _AsFloat64 | float = 1.0, + device: _Device | None = None, +) -> _Array[_1D, np.float64]: ... +@overload # 0d c64 | c128 +def rfftfreq( + n: _IntLike, + d: _AsComplex128, + device: _Device | None = None, +) -> _Array[_1D, np.complex128]: ... +@overload # 0d +complex +def rfftfreq( + n: _IntLike, + d: complex, + device: _Device | None = None, +) -> _Array[_1D, np.complex128 | Any]: ... +@overload # 0d T: f80 | c160 +def rfftfreq[LongDoubleT: _Inexact80]( + n: _IntLike, + d: LongDoubleT, + device: _Device | None = None, +) -> _Array[_1D, LongDoubleT]: ... +@overload # nd +f64 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsFloat64], + device: _Device | None = None, +) -> _Array[ShapeT, np.float64]: ... +@overload # nd c64 | c128 +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, _AsComplex128], + device: _Device | None = None, +) -> _Array[ShapeT, np.complex128]: ... +@overload # nd T: f80 | c160 +def rfftfreq[ShapeT: _Shape, LongDoubleT: _Inexact80]( + n: _IntLike, + d: _Array[ShapeT, LongDoubleT], + device: _Device | None = None, +) -> _Array[ShapeT, LongDoubleT]: ... +@overload # nd +complex (fallback) +def rfftfreq[ShapeT: _Shape]( + n: _IntLike, + d: _Array[ShapeT, np.number | np.bool], + device: _Device | None = None, +) -> _Array[ShapeT, Any]: ... diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index dacd2b89777c..38a3d2f8e692 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -3,35 +3,113 @@ from typing import Any, assert_type import numpy as np import numpy.typing as npt -AR_f8: npt.NDArray[np.float64] -AR_c16: npt.NDArray[np.complex128] -AR_LIKE_f8: list[float] - -assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) - -assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) -assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) - -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) -assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) - -assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) -assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) -assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) -assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) - -assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) - -assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) +### + +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] + +_f64_nd: npt.NDArray[np.float64] +_c128_nd: npt.NDArray[np.complex128] +_py_float_1d: list[float] + +_i64: np.int64 +_f32: np.float16 +_f80: np.longdouble +_c64: np.complex64 +_c160: np.clongdouble + +_i64_2d: _Array2D[np.int64] +_f32_2d: _Array2D[np.float16] +_f80_2d: _Array2D[np.longdouble] +_c64_2d: _Array2D[np.complex64] +_c160_2d: _Array2D[np.clongdouble] + +_i64_nd: npt.NDArray[np.int64] +_f32_nd: npt.NDArray[np.float16] +_f80_nd: npt.NDArray[np.longdouble] +_c64_nd: npt.NDArray[np.complex64] +_c160_nd: npt.NDArray[np.clongdouble] + +### + +# fftshift + +assert_type(np.fft.fftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(_py_float_1d, axes=0), npt.NDArray[Any]) + +# ifftshift + +assert_type(np.fft.ifftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(_py_float_1d, axes=0), npt.NDArray[Any]) + +# fftfreq + +assert_type(np.fft.fftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.fftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.fftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.fftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.fftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) + +# rfftfreq (same as fftfreq) + +assert_type(np.fft.rfftfreq(5), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, True), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1.0), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, 1j), _Array1D[np.complex128 | Any]) + +assert_type(np.fft.rfftfreq(5, _i64), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32), _Array1D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80), _Array1D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64), _Array1D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160), _Array1D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_2d), _Array2D[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_2d), _Array2D[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_2d), _Array2D[np.clongdouble]) + +assert_type(np.fft.rfftfreq(5, _i64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f32_nd), npt.NDArray[np.float64]) +assert_type(np.fft.rfftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) +assert_type(np.fft.rfftfreq(5, _c64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) +... + +# the other fft functions + +assert_type(np.fft.fft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(_f64_nd, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(_f64_nd, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(_f64_nd, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(_f64_nd, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(_f64_nd), npt.NDArray[np.complex128]) + +assert_type(np.fft.fftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(_f64_nd), npt.NDArray[np.float64]) + +assert_type(np.fft.rfft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(_f64_nd), npt.NDArray[np.float64]) From 43cf743bad5bd264d9830df7eb0dedab329cc550 Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Fri, 23 Jan 2026 15:35:18 +0530 Subject: [PATCH 1332/1718] BUG: apply suggestions Co-authored-by: Joren Hammudoglu --- doc/release/upcoming_changes/30707.change.rst | 2 +- numpy/lib/_function_base_impl.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release/upcoming_changes/30707.change.rst b/doc/release/upcoming_changes/30707.change.rst index a99a5749a8df..7f3846b6de97 100644 --- a/doc/release/upcoming_changes/30707.change.rst +++ b/doc/release/upcoming_changes/30707.change.rst @@ -1,4 +1,4 @@ ``meshgrid`` now always returns a tuple ---------------------------------------------- +--------------------------------------- ``np.meshgrid`` previously used to return a list when ``sparse`` was true and ``copy`` was false. Now, it always returns a tuple regardless of the arguments. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 734319227c69..dffd7a5bf865 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -5195,7 +5195,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): if copy: output = tuple(x.copy() for x in output) - if sparse and copy == False: + if sparse and not copy: return tuple(output) return output From 77964f2c79ec1ad1a98fcb8fe5c33ef2773d9c43 Mon Sep 17 00:00:00 2001 From: antareepsarkar Date: Fri, 23 Jan 2026 16:23:59 +0530 Subject: [PATCH 1333/1718] add tests to confirm tuple --- numpy/lib/tests/test_function_base.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 7f6ae2a7f9bf..acd663511a7e 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2799,11 +2799,17 @@ def test_indexing(self): def test_sparse(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) - [A, B] = meshgrid([1, 2, 3, 4], [4, 5, 6], sparse=True, copy=False) assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) - assert_array_equal(A, np.array([[1, 2, 3, 4]])) - assert_array_equal(B, np.array([[4], [5], [6]])) + + def test_always_tuple(self): + [A, B] = meshgrid([1, 2], [3, 4], sparse=True, copy=False) + C = (np.array([[1, 2]])) + D = (np.array([[3], [4]])) + assert_array_equal(A, C) + assert_array_equal(B, D) + assert_equal(type(A), type(C)) + assert_equal(type(B), type(D)) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments From 5fdbaa83288535c7bec9540b5513e015701eb157 Mon Sep 17 00:00:00 2001 From: antareepsarkar Date: Fri, 23 Jan 2026 16:51:53 +0530 Subject: [PATCH 1334/1718] add tests to confirm tuple --- numpy/lib/tests/test_function_base.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index acd663511a7e..9efae73bb215 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2803,13 +2803,10 @@ def test_sparse(self): assert_array_equal(Y, np.array([[4], [5], [6], [7]])) def test_always_tuple(self): - [A, B] = meshgrid([1, 2], [3, 4], sparse=True, copy=False) - C = (np.array([[1, 2]])) - D = (np.array([[3], [4]])) - assert_array_equal(A, C) - assert_array_equal(B, D) - assert_equal(type(A), type(C)) - assert_equal(type(B), type(D)) + A = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True, copy=False) + B = meshgrid([], sparse=True, copy=False) + assert_equal(isinstance(A, tuple), True) + assert_equal(isinstance(B, tuple), True) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments From 5197c4ceb0a0e7f9449502d3173a43a383fb9ee2 Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Fri, 23 Jan 2026 16:57:49 +0530 Subject: [PATCH 1335/1718] apply suggestion on tests Co-authored-by: Joren Hammudoglu --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 9efae73bb215..f390f47b4d0e 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2805,8 +2805,8 @@ def test_sparse(self): def test_always_tuple(self): A = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True, copy=False) B = meshgrid([], sparse=True, copy=False) - assert_equal(isinstance(A, tuple), True) - assert_equal(isinstance(B, tuple), True) + assert isinstance(A, tuple) + assert isinstance(B, tuple) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments From 933eff524413115bd0e65e72ff0bd55b29ac1e4b Mon Sep 17 00:00:00 2001 From: Wei Bo Gao Date: Fri, 23 Jan 2026 21:30:41 +0800 Subject: [PATCH 1336/1718] MAINT: Fix E721 (type comparison) linter errors This removes the E721 ignore from ruff.toml and fixes the resulting type comparison errors by replacing '==' with 'is' or 'isinstance'. --- numpy/_core/_dtype.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 4 ++-- numpy/_core/tests/test_dtype.py | 2 +- numpy/_core/tests/test_nep50_promotions.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_scalarmath.py | 9 ++++----- numpy/_core/tests/test_umath.py | 20 ++++++++++---------- numpy/lib/_polynomial_impl.py | 4 +--- numpy/lib/tests/test_arraysetops.py | 17 +++++++++-------- numpy/lib/tests/test_io.py | 6 +++--- numpy/linalg/tests/test_linalg.py | 6 +++--- numpy/random/tests/test_smoke.py | 8 ++++---- ruff.toml | 1 - 13 files changed, 40 insertions(+), 43 deletions(-) diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 6a8a091b269c..58cfbbf8724a 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -124,7 +124,7 @@ def _scalar_str(dtype, short): else: return "'%sU%d'" % (byteorder, dtype.itemsize / 4) - elif dtype.type == str: + elif dtype.type is str: return "'T'" elif not type(dtype)._legacy: diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 5f643f8045ba..c45f41028b7b 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -274,8 +274,8 @@ def test_simple_cancast(self, from_Dt): for to_dt in [to_Dt(), to_Dt().newbyteorder()]: casting, (from_res, to_res), view_off = ( cast._resolve_descriptors((from_dt, to_dt))) - assert type(from_res) == from_Dt - assert type(to_res) == to_Dt + assert type(from_res) is from_Dt + assert type(to_res) is to_Dt if view_off is not None: # If a view is acceptable, this is "no" casting # and byte order must be matching. diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index da499f487dfb..f1503592cb8e 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -778,7 +778,7 @@ def test_subarray_cast_copies(self): arr = np.ones(3, dtype=[("f", "i", 3)]) cast = arr.astype(object) for fields in cast: - assert type(fields) == tuple and len(fields) == 1 + assert type(fields) is tuple and len(fields) == 1 subarr = fields[0] assert subarr.base is None assert subarr.flags.owndata diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 8d9d9e63ce38..abd99a92f24b 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -127,7 +127,7 @@ def test_nep50_complex_promotion(): with pytest.warns(RuntimeWarning, match=".*overflow"): res = np.complex64(3) + complex(2**300) - assert type(res) == np.complex64 + assert type(res) is np.complex64 def test_nep50_integer_conversion_errors(): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 664b7aef257d..45e238b27901 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -3581,7 +3581,7 @@ def test_dtype_str_bytes(self, likefunc, dtype): b = a[:, ::2] # Ensure b is not contiguous. kwargs = {'fill_value': ''} if likefunc == np.full_like else {} result = likefunc(b, dtype=dtype, **kwargs) - if dtype == str: + if dtype is str: assert result.strides == (16, 4) else: # dtype is bytes diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index bfbc9a54cbfe..c14d0b22af9d 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -1055,7 +1055,7 @@ def rop_func(self, other): # inheritance has to override, or this is correctly lost: res = op(myf_simple1(1), myf_simple2(2)) - assert type(res) == sctype or type(res) == np.bool + assert type(res) is sctype or type(res) is np.bool assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited # Two independent subclasses do not really define an order. This could @@ -1092,7 +1092,7 @@ def rop_func(self, other): assert op(myt(1), np.float64(2)) == __op__ assert op(np.float64(1), myt(2)) == __rop__ - if op in {operator.mod, operator.floordiv} and subtype == complex: + if op in {operator.mod, operator.floordiv} and subtype is complex: return # module is not support for complex. Do not test. if __rop__ == __op__: @@ -1106,12 +1106,11 @@ def rop_func(self, other): res = op(myt(1), np.float16(2)) expected = op(behaves_like(1), np.float16(2)) assert res == expected - assert type(res) == type(expected) + assert type(res) is type(expected) res = op(np.float32(2), myt(1)) expected = op(np.float32(2), behaves_like(1)) assert res == expected - assert type(res) == type(expected) - + assert type(res) is type(expected) # Same check for longdouble (compare via dtype to accept float64 when # longdouble has the identical size), which is currently not perfectly # consistent. diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index f7a4dae20bf5..385bae606cb2 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -217,25 +217,25 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=None, subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) r = np.add(a, 2, out=(None,), subok=subok) if subok: assert_(isinstance(r, ArrayWrap)) else: - assert_(type(r) == np.ndarray) + assert_(type(r) is np.ndarray) d = ArrayWrap([5.7]) o1 = np.empty((1,)) @@ -245,31 +245,31 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, o1, None, subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, None, o2, subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) r1, r2 = np.frexp(d, out=(o1, None), subok=subok) if subok: assert_(isinstance(r2, ArrayWrap)) else: - assert_(type(r2) == np.ndarray) + assert_(type(r2) is np.ndarray) r1, r2 = np.frexp(d, out=(None, o2), subok=subok) if subok: assert_(isinstance(r1, ArrayWrap)) else: - assert_(type(r1) == np.ndarray) + assert_(type(r1) is np.ndarray) with assert_raises(TypeError): # Out argument must be tuple, since there are multiple outputs. @@ -3702,7 +3702,7 @@ def _unwrap(self, objs): for obj in objs: if isinstance(obj, cls): obj = np.array(obj) - elif type(obj) != np.ndarray: + elif type(obj) is not np.ndarray: return NotImplemented result.append(obj) return result diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index e9d2d5d23fc6..9b2ff399baa5 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -5,7 +5,6 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit'] - import functools import re import warnings @@ -141,8 +140,7 @@ def poly(seq_of_zeros): seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype - # Let object arrays slip through, e.g. for arbitrary precision - if dt != object: + if dt.type is not NX.object_: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 4e8d503427de..92c2bae39f30 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -637,46 +637,46 @@ def check_all(self, a, b, i1, i2, c, dt): msg = base_msg.format('values', dt) v = unique(a) assert_array_equal(v, b, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index', dt) v, j = unique(a, True, False, False) assert_array_equal(v, b, msg) assert_array_equal(j, i1, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_inverse', dt) v, j = unique(a, False, True, False) assert_array_equal(v, b, msg) assert_array_equal(j, i2, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_counts', dt) v, j = unique(a, False, False, True) assert_array_equal(v, b, msg) assert_array_equal(j, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index and return_inverse', dt) v, j1, j2 = unique(a, True, True, False) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_index and return_counts', dt) v, j1, j2 = unique(a, True, False, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i1, msg) assert_array_equal(j2, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format('return_inverse and return_counts', dt) v, j1, j2 = unique(a, False, True, True) assert_array_equal(v, b, msg) assert_array_equal(j1, i2, msg) assert_array_equal(j2, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) msg = base_msg.format(('return_index, return_inverse ' 'and return_counts'), dt) @@ -685,9 +685,10 @@ def check_all(self, a, b, i1, i2, c, dt): assert_array_equal(j1, i1, msg) assert_array_equal(j2, i2, msg) assert_array_equal(j3, c, msg) - assert type(v) == type(b) + assert type(v) is type(b) def get_types(self): + types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 03b585443710..b54265338e06 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2441,9 +2441,9 @@ def test_auto_dtype_largeint(self): assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - assert_(test.dtype['f0'] == float) - assert_(test.dtype['f1'] == np.int64) - assert_(test.dtype['f2'] == np.int_) + assert_(test.dtype['f0'].type is np.float64) + assert_(test.dtype['f1'].type is np.int64) + assert_(test.dtype['f2'].type is np.int_) assert_allclose(test['f0'], 73786976294838206464.) assert_equal(test['f1'], 17179869184) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index f93e2bdeeb6c..2c4cc2891ab5 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1091,7 +1091,7 @@ def tz(M): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_one(self, dt): @@ -1102,7 +1102,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_two(self, dt): @@ -1114,7 +1114,7 @@ def tz(mat): for mat in self.rshft_all: tz(mat.astype(dt)) - if dt != object: + if np.dtype(dt).type is not np.object_: tz(self.stacked.astype(dt)) def test_power_is_minus_one(self, dt): diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 5353a72a1174..b30fddabd43e 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -63,8 +63,8 @@ def comp_state(state1, state2): if isinstance(state1, dict): for key in state1: identical &= comp_state(state1[key], state2[key]) - elif type(state1) != type(state2): - identical &= type(state1) == type(state2) + elif type(state1) is not type(state2): + identical &= type(state1) is type(state2) elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( state2, (list, tuple, np.ndarray))): for s1, s2 in zip(state1, state2): @@ -486,13 +486,13 @@ def test_pickle(self): rg = self._create_rng().rg pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(rg) == type(unpick)) + assert_(type(rg) is type(unpick)) assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) pick = pickle.dumps(rg) unpick = pickle.loads(pick) - assert_(type(rg) == type(unpick)) + assert_(type(rg) is type(unpick)) assert_(comp_state(rg.bit_generator.state, unpick.bit_generator.state)) diff --git a/ruff.toml b/ruff.toml index b4522910582e..08468d3741f1 100644 --- a/ruff.toml +++ b/ruff.toml @@ -62,7 +62,6 @@ ignore = [ "E302", # TODO: Expected 2 blank lines, found 1 "E402", # Module level import not at top of file "E712", # Avoid equality comparisons to `True` or `False` - "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check "E731", # Do not assign a `lambda` expression, use a `def` "E741", # Ambiguous variable name # pyflakes From 55dd3a1676661978c3683c8d4396560dee38451c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jan 2026 18:00:11 +0000 Subject: [PATCH 1337/1718] MAINT: Bump ruff from 0.14.13 to 0.14.14 in /requirements Bumps [ruff](https://github.com/astral-sh/ruff) from 0.14.13 to 0.14.14. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.14.13...0.14.14) --- updated-dependencies: - dependency-name: ruff dependency-version: 0.14.14 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 497bd07f403c..8dfd5d05e304 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,4 +1,4 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.13 +ruff==0.14.14 GitPython>=3.1.30 From 0de52d85639799b2e6bce9fec1582991b76f990c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jan 2026 18:00:22 +0000 Subject: [PATCH 1338/1718] MAINT: Bump hypothesis from 6.150.2 to 6.150.3 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.150.2 to 6.150.3. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.150.2...hypothesis-python-6.150.3) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.150.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 062ac0f1e7f5..3d70750e1506 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.150.2 +hypothesis==6.150.3 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index dd036f5e1267..2fc6698c0a5f 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.150.2 +hypothesis==6.150.3 pytest==9.0.2 pytest-cov==7.0.0 meson From b31abc35c59ed614d690b42b5cdcd0e2f4a4b11b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 24 Jan 2026 18:33:34 +0100 Subject: [PATCH 1339/1718] TYP: remove `_typing._FiniteNestedSequence` --- numpy/_typing/__init__.py | 1 - numpy/_typing/_array_like.py | 11 +---------- numpy/lib/_index_tricks_impl.pyi | 7 ++++--- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 4de797bd4e37..f69368a95e10 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -21,7 +21,6 @@ _ArrayLikeTD64_co as _ArrayLikeTD64_co, _ArrayLikeUInt_co as _ArrayLikeUInt_co, _ArrayLikeVoid_co as _ArrayLikeVoid_co, - _FiniteNestedSequence as _FiniteNestedSequence, _SupportsArray as _SupportsArray, _SupportsArrayFunc as _SupportsArrayFunc, ) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index b8cb2c7872c1..8bd6f4b2806e 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,4 +1,4 @@ -from collections.abc import Buffer, Callable, Collection, Sequence +from collections.abc import Buffer, Callable, Collection from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable import numpy as np @@ -36,15 +36,6 @@ def __array_function__( ) -> object: ... -# TODO: Wait until mypy supports recursive objects in combination with typevars -type _FiniteNestedSequence[T] = ( - T - | Sequence[T] - | Sequence[Sequence[T]] - | Sequence[Sequence[Sequence[T]]] - | Sequence[Sequence[Sequence[Sequence[T]]]] -) - # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` type _ArrayLike[ScalarT: np.generic] = ( _SupportsArray[np.dtype[ScalarT]] diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 97930196ecfd..2b21e754d5bc 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -23,7 +23,6 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _DTypeLike, - _FiniteNestedSequence, _HasDType, _NestedSequence, _SupportsArray, @@ -61,7 +60,7 @@ class ndenumerate(Generic[_ScalarT_co]): # noqa: UP046 @overload def __init__[ScalarT: np.generic]( self: ndenumerate[ScalarT], - arr: _FiniteNestedSequence[_SupportsArray[np.dtype[ScalarT]]], + arr: _NestedSequence[_SupportsArray[np.dtype[ScalarT]]] | _SupportsArray[np.dtype[ScalarT]], ) -> None: ... @overload def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... @@ -232,7 +231,9 @@ class IndexExpression(Generic[_BoolT_co]): def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... @overload -def ix_[DTypeT: np.dtype](*args: _FiniteNestedSequence[_HasDType[DTypeT]]) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... +def ix_[DTypeT: np.dtype]( + *args: _NestedSequence[_HasDType[DTypeT]] | _HasDType[DTypeT] +) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload From f590d096927d9165979d3a7fb3b72a07364344f2 Mon Sep 17 00:00:00 2001 From: Kader Miyanyedi <48386782+Kadermiyanyedi@users.noreply.github.com> Date: Mon, 26 Jan 2026 13:18:50 +0300 Subject: [PATCH 1340/1718] MAINT: Enable linting with ruff E501 (#30630) Co-authored-by: Matti Picus Co-authored-by: Joren Hammudoglu --- numpy/_core/tests/test_cpu_features.py | 25 ++- numpy/_core/tests/test_nditer.py | 196 +++++++++++++--- numpy/_core/tests/test_umath.py | 299 ++++++++++++++++++++----- numpy/_typing/_array_like.py | 19 +- ruff.toml | 6 - 5 files changed, 441 insertions(+), 104 deletions(-) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 431fcb40b324..c95886752949 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -79,7 +79,11 @@ def test_features(self): self.load_flags() for gname, features in self.features_groups.items(): test_features = [self.cpu_have(f) for f in features] - assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + assert_features_equal( + __cpu_features__.get(gname), + all(test_features), + gname, + ) for feature_name in self.features: cpu_have = self.cpu_have(feature_name) @@ -124,7 +128,9 @@ def load_flags_auxv(self): " therefore this test class cannot be properly executed." ), ) -@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") +@pytest.mark.thread_unsafe( + reason="setup & tmp_path_factory threads-unsafe, modifies environment variables", +) class TestEnvPrivation: cwd = pathlib.Path(__file__).parent.resolve() env = os.environ.copy() @@ -336,7 +342,8 @@ def test_impossible_feature_enable(self): machine = platform.machine() is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) @pytest.mark.skipif( - not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" + not (is_linux or is_cygwin) or not is_x86, + reason="Only for Linux and x86", ) class Test_X86_Features(AbstractTest): features = [] @@ -359,7 +366,9 @@ class Test_X86_Features(AbstractTest): "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", "VAES", "VPCLMULQDQ", "GFNI" ] - features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] + features_groups["AVX512_SPR"] = ( + features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] + ) features_map = { "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", @@ -420,14 +429,18 @@ def load_flags(self): else: self.features_map = { # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) - # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # doesn't provide information about ASIMD + # so we assume that ASIMD is supported # if the kernel reports any one of the following ARM8 features. "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") } is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) -@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +@pytest.mark.skipif( + not is_linux or not is_loongarch, + reason="Only for Linux and LoongArch", +) class Test_LOONGARCH_Features(AbstractTest): features = ["LSX"] diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index d2fc69a03b5f..775086353501 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -262,43 +262,164 @@ def test_iter_best_order_multi_index_3d(): a = arange(12) # 3D C-order i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), - (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + ], + ) # 3D Fortran-order i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), - (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + ], + ) # 3D reversed C-order i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), - (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (1, 0, 1), + (1, 1, 0), + (1, 1, 1), + (1, 2, 0), + (1, 2, 1), + (0, 0, 0), + (0, 0, 1), + (0, 1, 0), + (0, 1, 1), + (0, 2, 0), + (0, 2, 1), + ], + ) i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), - (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (0, 2, 1), + (0, 1, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 1), + (1, 2, 0), + (1, 2, 1), + (1, 1, 0), + (1, 1, 1), + (1, 0, 0), + (1, 0, 1), + ], + ) i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), - (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (0, 0, 0), + (0, 1, 1), + (0, 1, 0), + (0, 2, 1), + (0, 2, 0), + (1, 0, 1), + (1, 0, 0), + (1, 1, 1), + (1, 1, 0), + (1, 2, 1), + (1, 2, 0), + ], + ) # 3D reversed Fortran-order - i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), - (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), - (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], - ['multi_index'], [['readonly']]) - assert_equal(iter_multi_index(i), - [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), - (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + i = nditer( + a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], + [['readonly']], + ) + assert_equal( + iter_multi_index(i), + [ + (1, 0, 0), + (0, 0, 0), + (1, 1, 0), + (0, 1, 0), + (1, 2, 0), + (0, 2, 0), + (1, 0, 1), + (0, 0, 1), + (1, 1, 1), + (0, 1, 1), + (1, 2, 1), + (0, 2, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 2, 0), + (1, 2, 0), + (0, 1, 0), + (1, 1, 0), + (0, 0, 0), + (1, 0, 0), + (0, 2, 1), + (1, 2, 1), + (0, 1, 1), + (1, 1, 1), + (0, 0, 1), + (1, 0, 1), + ], + ) + i = nditer( + a.reshape(2, 3, 2).copy(order="F")[:, :, ::-1], + ["multi_index"], + [["readonly"]], + ) + assert_equal( + iter_multi_index(i), + [ + (0, 0, 1), + (1, 0, 1), + (0, 1, 1), + (1, 1, 1), + (0, 2, 1), + (1, 2, 1), + (0, 0, 0), + (1, 0, 0), + (0, 1, 0), + (1, 1, 0), + (0, 2, 0), + (1, 2, 0), + ], + ) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering @@ -1223,8 +1344,14 @@ def test_iter_copy_if_overlap(): x = arange(10) a = x b = x - i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], - ['readwrite', 'overlap_assume_elementwise']]) + i = nditer( + [a, b], + ["copy_if_overlap"], + [ + ["readonly", "overlap_assume_elementwise"], + ["readwrite", "overlap_assume_elementwise"], + ], + ) with i: assert_(i.operands[0] is a and i.operands[1] is b) with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: @@ -2646,7 +2773,10 @@ def test_0d(self): i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [list(j) for _ in i] - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + assert_equal( + vals, + [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]], + ) i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 385bae606cb2..fa7622cca482 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -657,7 +657,10 @@ def test_zero_division_complex(self): def test_floor_division_complex(self): # check that floor division, divmod and remainder raises type errors - x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) + x = np.array( + [.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], + dtype=np.complex128, + ) with pytest.raises(TypeError): x // 7 with pytest.raises(TypeError): @@ -704,7 +707,11 @@ def test_floor_division_corner_cases(self, dtype): fzer = np.array(0.0, dtype=dtype) finf = np.array(np.inf, dtype=dtype) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) + warnings.filterwarnings( + 'ignore', + "invalid value encountered in floor_divide", + RuntimeWarning, + ) div = np.floor_divide(fnan, fone) assert np.isnan(div), f"div: {div}" div = np.floor_divide(fone, fnan) @@ -860,8 +867,16 @@ def test_float_divmod_corner_cases(self): fzer = np.array(0.0, dtype=dt) finf = np.array(np.inf, dtype=dt) with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) - warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) + warnings.filterwarnings( + "ignore", + "invalid value encountered in divmod", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "divide by zero encountered in divmod", + RuntimeWarning, + ) div, rem = np.divmod(fone, fzer) assert np.isinf(div), f'dt: {dt}, div: {rem}' assert np.isnan(rem), f'dt: {dt}, rem: {rem}' @@ -899,8 +914,16 @@ def test_float_remainder_corner_cases(self): # Check nans, inf with warnings.catch_warnings(): - warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) - warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) + warnings.filterwarnings( + "ignore", + "invalid value encountered in remainder", + RuntimeWarning, + ) + warnings.filterwarnings( + "ignore", + "invalid value encountered in fmod", + RuntimeWarning, + ) for dt in np.typecodes['Float']: fone = np.array(1.0, dtype=dt) fzer = np.array(0.0, dtype=dt) @@ -1121,15 +1144,36 @@ def test_power_complex(self): assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) - assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) - assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, - (-117 - 44j) / 15625]) - assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), - ncu.sqrt(3 + 4j)]) + assert_almost_equal( + x**(-2), + [1 / (1 + 2j)**2, + 1 / (2 + 3j)**2, + 1 / (3 + 4j)**2], + ) + assert_almost_equal( + x**(-3), + [(-11 + 2j) / 125, + (-46 - 9j) / 2197, + (-117 - 44j) / 15625], + ) + assert_almost_equal( + x**(0.5), + [ncu.sqrt(1 + 2j), + ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)], + ) norm = 1. / ((x**14)[0]) - assert_almost_equal(x**14 * norm, - [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, - 5583548873 + 2465133864j]]) + assert_almost_equal( + x**14 * norm, + [ + i * norm + for i in [ + -76443 + 16124j, + 23161315 + 58317492j, + 5583548873 + 2465133864j, + ] + ], + ) # Ticket #836 def assert_complex_equal(x, y): @@ -1413,8 +1457,16 @@ def test_log_strides(self): y_true = np.log(x_f64) y_special = np.log(x_special) for jj in strides: - assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.log(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) + assert_array_almost_equal_nulp( + np.log(x_special[::jj]), + y_special[::jj], + nulp=2, + ) # Reference values were computed with mpmath, with mp.dps = 200. @pytest.mark.parametrize( @@ -1464,7 +1516,11 @@ def test_exp_strides(self): x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) y_true = np.exp(x_f64) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp( + np.exp(x_f64[::jj]), + y_true[::jj], + nulp=2, + ) class TestSpecialFloats: def test_exp_values(self): @@ -1904,15 +1960,53 @@ def test_divide_spurious_fpexception(self, dtype): np.zeros(128 + 1, dtype=dt) / subnorm class TestFPClass: - @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, - 2, 4, 5, 6, 7, 8, 9, 10]) + @pytest.mark.parametrize( + "stride", + [-5, -4, -3, -2, -1, 1, 2, 4, 5, 6, 7, 8, 9, 10], + ) def test_fpclass(self, stride): - arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') - arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') - nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 - inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 - sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 - finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 + arr_f64 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 2.2251e-308, + -2.2251e-308, + ], + dtype="d", + ) + arr_f32 = np.array( + [ + np.nan, + -np.nan, + np.inf, + -np.inf, + -1.0, + 1.0, + -0.0, + 0.0, + 1.4013e-045, + -1.4013e-045, + ], + dtype="f", + ) + nan = np.array( + [True, True, False, False, False, False, False, False, False, False], + ) + inf = np.array( + [False, False, True, True, False, False, False, False, False, False], + ) + sign = np.array( + [False, True, False, True, True, False, True, False, False, True], + ) + finite = np.array( + [False, False, False, False, True, True, True, True, True, True], + ) assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) @@ -2007,18 +2101,29 @@ def test_ldexp(self, dtype, stride): mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') out = np.zeros(8, dtype=dtype) - assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal( + np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), + np.ones(8, dtype=dtype)[::stride], + ) assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) class TestFRExp: @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) @pytest.mark.parametrize("dtype", ['f', 'd']) - @pytest.mark.skipif(not sys.platform.startswith('linux'), - reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.skipif( + not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux", + ) @pytest.mark.xfail(IS_MUSL, reason="gh23049") def test_frexp(self, dtype, stride): - arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) - mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + arr = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], + dtype=dtype, + ) + mant_true = np.array( + [np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], + dtype=dtype, + ) exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') out_mant = np.ones(8, dtype=dtype) out_exp = 2 * np.ones(8, dtype='i') @@ -2101,8 +2206,16 @@ def test_sincos_float32(self): assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) # test aliasing(issue #17761) tx_f32 = x_f32.copy() - assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) - assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + assert_array_max_ulp( + np.sin(x_f32, out=x_f32), + np.float32(np.sin(x_f64)), + maxulp=2, + ) + assert_array_max_ulp( + np.cos(tx_f32, out=tx_f32), + np.float32(np.cos(x_f64)), + maxulp=2, + ) def test_strided_float32(self): np.random.seed(42) @@ -2117,10 +2230,12 @@ def test_strided_float32(self): sin_true = np.sin(x_f32_large) cos_true = np.cos(x_f32_large) for jj in strides: - assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) - assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + x_slice = x_f32[::jj] + x_large = x_f32_large[::jj] + assert_array_almost_equal_nulp(np.exp(x_slice), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_slice), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_large), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_large), cos_true[::jj], nulp=2) class TestLogAddExp(_FilterInvalids): def test_logaddexp_values(self): @@ -2242,24 +2357,38 @@ def test_no_fpe(self): def assert_arctan2_isnan(x, y): - assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") + assert_( + np.isnan(ncu.arctan2(x, y)), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan", + ) def assert_arctan2_ispinf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf", + ) def assert_arctan2_isninf(x, y): - assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") + assert_( + (np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf", + ) def assert_arctan2_ispzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") + assert_( + (ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0", + ) def assert_arctan2_isnzero(x, y): - assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") - + assert_( + (ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), + f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0", + ) class TestArctan2SpecialValues: def test_one_one(self): @@ -2423,9 +2552,15 @@ def test_strided_array(self): out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) assert_equal(np.maximum(arr1, arr2), maxtrue) assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) - assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal( + np.maximum(arr1[:4:], arr2[::2]), + np.array([-2.0, np.nan, 10.0, 1.0]), + ) assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) - assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal( + np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-2.0, 10.0, np.nan]), + ) assert_equal(out, out_maxtrue) def test_precision(self): @@ -2515,9 +2650,15 @@ def test_strided_array(self): out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) assert_equal(np.minimum(arr1, arr2), mintrue) assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) - assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal( + np.minimum(arr1[:4:], arr2[::2]), + np.array([-4.0, np.nan, 0.0, 0.0]), + ) assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) - assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal( + np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), + np.array([-4.0, 1.0, np.nan]), + ) assert_equal(out, out_mintrue) def test_precision(self): @@ -4313,7 +4454,15 @@ def test_branch_cuts_complex64(self): _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) - _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut( + np.arcsinh, + [0 - 2j, 2j], + [1, 1], + -1, + 1, + True, + np.complex64, + ) _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) @@ -4322,9 +4471,33 @@ def test_branch_cuts_complex64(self): _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) - _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) - _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut( + np.arcsinh, + [-2, 2, 0], + [1j, 1j, 1], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arccosh, + [0 - 2j, 2j, 2], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) + _check_branch_cut( + np.arctanh, + [0 - 2j, 2j, 0], + [1, 1, 1j], + 1, + 1, + False, + np.complex64, + ) def test_against_cmath(self): import cmath @@ -4512,7 +4685,10 @@ def mul(a, b): # with no identity (not reorderable) mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) - assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises( + ValueError, + lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), + ) assert_raises(ValueError, lambda: mul_ufunc.reduce([])) @@ -4567,15 +4743,28 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, x = x0[jr] x.real = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) - assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) + assert_( + np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), + (y0[jr], ym), + ) + assert_( + np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), + (y0[jr], ym), + ) if np.any(ji): x = x0[ji] x.imag = ncu.NZERO ym = f(x) - assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) - assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) + assert_( + np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), + (y0[ji], ym), + ) + assert_( + np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), + (y0[ji], ym), + ) + def test_copysign(): assert_(np.copysign(1, -1) == -1) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 8bd6f4b2806e..5c249775f810 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -59,10 +59,16 @@ def __array_function__( type _ArrayLikeBool_co = _DualArrayLike[np.dtype[np.bool], bool] type _ArrayLikeUInt_co = _DualArrayLike[np.dtype[np.bool | np.unsignedinteger], bool] type _ArrayLikeInt_co = _DualArrayLike[np.dtype[np.bool | np.integer], int] -type _ArrayLikeFloat_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.floating], float] +type _ArrayLikeFloat_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.floating], + float, +] type _ArrayLikeComplex_co = _DualArrayLike[np.dtype[np.bool | np.number], complex] type _ArrayLikeNumber_co = _ArrayLikeComplex_co -type _ArrayLikeTD64_co = _DualArrayLike[np.dtype[np.bool | np.integer | np.timedelta64], int] +type _ArrayLikeTD64_co = _DualArrayLike[ + np.dtype[np.bool | np.integer | np.timedelta64], + int, +] type _ArrayLikeDT64_co = _ArrayLike[np.datetime64] type _ArrayLikeObject_co = _ArrayLike[np.object_] @@ -70,10 +76,15 @@ def __array_function__( type _ArrayLikeBytes_co = _DualArrayLike[np.dtype[np.bytes_], bytes] type _ArrayLikeStr_co = _DualArrayLike[np.dtype[np.str_], str] type _ArrayLikeString_co = _DualArrayLike[StringDType, str] -type _ArrayLikeAnyString_co = _DualArrayLike[np.dtype[np.character] | StringDType, bytes | str] +type _ArrayLikeAnyString_co = _DualArrayLike[ + np.dtype[np.character] | StringDType, + bytes | str, +] type __Float64_co = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool -type __Complex128_co = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +type __Complex128_co = ( + np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +) type _ArrayLikeFloat64_co = _DualArrayLike[np.dtype[__Float64_co], float] type _ArrayLikeComplex128_co = _DualArrayLike[np.dtype[__Complex128_co], complex] diff --git a/ruff.toml b/ruff.toml index 08468d3741f1..7b6c340c0fa1 100644 --- a/ruff.toml +++ b/ruff.toml @@ -79,17 +79,11 @@ ignore = [ "bench_*.py" = ["B015", "B018"] "test*.py" = ["B015", "B018", "E201", "E714"] -"numpy/_core/tests/test_cpu_features.py" = ["E501"] -"numpy/_core/tests/test_nditer*py" = ["E501"] -"numpy/_core/tests/test_umath.py" = ["E501"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] "numpy/lib/tests/test_format.py" = ["E501"] "numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] -# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length -"numpy/_typing/_array_like.py" = ["E501"] -"numpy/_typing/_dtype_like.py" = ["E501"] "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream "numpy/typing/tests/data/*" = ["B015", "B018", "E501"] From 07bbf09bb9ab3599f7bd37d2dc7be90993abda70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jan 2026 19:48:02 +0000 Subject: [PATCH 1341/1718] MAINT: Bump hypothesis from 6.150.3 to 6.151.2 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.150.3 to 6.151.2. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.150.3...hypothesis-python-6.151.2) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 3d70750e1506..9e53f827fe07 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.150.3 +hypothesis==6.151.2 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 2fc6698c0a5f..72e2faafb1b1 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.150.3 +hypothesis==6.151.2 pytest==9.0.2 pytest-cov==7.0.0 meson From 1e5b6c56e2285b8e4b438f6ae07a2832ab821c4f Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Tue, 27 Jan 2026 17:48:16 +0900 Subject: [PATCH 1342/1718] BUG: Ensure stable data address in scalar __array_interface__ (#30538) Implement `__array_interface__` for NumPy scalar types to avoid creating a temporary array. Set the read-only flag in the `__array_interface__` dict for NumPy scalars. As a consequence of this change, `numpy.ctypeslib.as_ctypes` now raises TypeError when NumPy scalar types are passed directly. --- doc/release/upcoming_changes/30538.change.rst | 18 +++ numpy/_core/src/multiarray/common.c | 58 +++++++++ numpy/_core/src/multiarray/common.h | 4 + numpy/_core/src/multiarray/descriptor.c | 36 ++++++ numpy/_core/src/multiarray/descriptor.h | 2 + numpy/_core/src/multiarray/getset.c | 112 +++++------------- numpy/_core/src/multiarray/scalartypes.c.src | 73 ++++++++++-- numpy/tests/test_ctypeslib.py | 21 ++++ 8 files changed, 233 insertions(+), 91 deletions(-) create mode 100644 doc/release/upcoming_changes/30538.change.rst diff --git a/doc/release/upcoming_changes/30538.change.rst b/doc/release/upcoming_changes/30538.change.rst new file mode 100644 index 000000000000..8372e94b9bd8 --- /dev/null +++ b/doc/release/upcoming_changes/30538.change.rst @@ -0,0 +1,18 @@ +``numpy.ctypeslib.as_ctypes`` now does not support scalar types +---------------------------------------------------------------- +The function ``numpy.ctypeslib.as_ctypes`` has been updated to only accept ``numpy.ndarray``. +Passing a scalar type (e.g., ``numpy.int32(5)``) will now raise a ``TypeError``. +This change was made to avoid the issue `gh-30354 `__ +and to enforce the readonly nature of scalar types in NumPy. +The previous behavior relied on undocumented implicit temporary arrays and was not well-defined. +Users who need to convert scalar types to ctypes should first convert them to an array +(e.g., ``numpy.asarray``) before passing them to ``numpy.ctypeslib.as_ctypes``. + + +``__array_interface__`` changes on scalars +------------------------------------------ +Scalars now export the ``__array_interface__`` directly rather than including +an array copy as a ``__ref`` entry. This means that scalars are now exported +as read-only while they previously exported as writeable. +The path via ``__ref`` was undocumented and not consistently used even +within NumPy itself. diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 2e9bcbf29e8f..501472807713 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -477,3 +477,61 @@ check_is_convertible_to_scalar(PyArrayObject *v) "only 0-dimensional arrays can be converted to Python scalars"); return -1; } + +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape) +{ + PyObject *inter = NULL; + PyObject *version = NULL; + int ret; + + inter = PyDict_New(); + if (inter == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "data", dataptr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "strides", strides); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "descr", descr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "typestr", typestr); + if (ret < 0) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "shape", shape); + if (ret < 0) { + goto fail; + } + + version = PyLong_FromLong(3); + if (version == NULL) { + goto fail; + } + + ret = PyDict_SetItemString(inter, "version", version); + if (ret < 0) { + goto fail; + } + Py_XDECREF(version); + return inter; + + +fail: + Py_XDECREF(inter); + Py_XDECREF(version); + return NULL; + +} diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index db7bc64733db..c0b5c043c7a3 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -22,6 +22,10 @@ extern "C" { #define error_converting(x) (((x) == -1) && PyErr_Occurred()) +NPY_NO_EXPORT PyObject * +build_array_interface(PyObject *dataptr, PyObject *descr, PyObject *strides, + PyObject *typestr, PyObject *shape); + NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 0d1bf6b89e1a..0bbc6358f75b 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -3779,6 +3779,42 @@ descr_subscript(PyArray_Descr *self, PyObject *op) } } +static PyObject * +array_typestr_get(PyArray_Descr *self) +{ + return arraydescr_protocol_typestr_get(self, NULL); +} + + +NPY_NO_EXPORT PyObject * +array_protocol_descr_get(PyArray_Descr *self) +{ + PyObject *res; + PyObject *dobj; + + res = arraydescr_protocol_descr_get(self, NULL); + if (res) { + return res; + } + PyErr_Clear(); + + /* get default */ + dobj = PyTuple_New(2); + if (dobj == NULL) { + return NULL; + } + PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); + PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); + res = PyList_New(1); + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } + PyList_SET_ITEM(res, 0, dobj); + return res; +} + + static PySequenceMethods descr_as_sequence = { (lenfunc) descr_length, /* sq_length */ (binaryfunc) NULL, /* sq_concat */ diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 284afabe96fc..233c434fd6b4 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -29,6 +29,8 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_typestr_get( NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get( PyArray_Descr *self, void *); +NPY_NO_EXPORT PyObject *array_protocol_descr_get(PyArray_Descr *self); + /* * offset: A starting offset. * alignment: A power-of-two alignment. diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 1df9af5d0031..cec1ae275b71 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -215,12 +215,6 @@ array_priority_get(PyArrayObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) return PyFloat_FromDouble(NPY_PRIORITY); } -static PyObject * -array_typestr_get(PyArrayObject *self) -{ - return arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); -} - static PyObject * array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { @@ -228,34 +222,6 @@ array_descr_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) return (PyObject *)PyArray_DESCR(self); } -static PyObject * -array_protocol_descr_get(PyArrayObject *self) -{ - PyObject *res; - PyObject *dobj; - - res = arraydescr_protocol_descr_get(PyArray_DESCR(self), NULL); - if (res) { - return res; - } - PyErr_Clear(); - - /* get default */ - dobj = PyTuple_New(2); - if (dobj == NULL) { - return NULL; - } - PyTuple_SET_ITEM(dobj, 0, PyUnicode_FromString("")); - PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); - res = PyList_New(1); - if (res == NULL) { - Py_DECREF(dobj); - return NULL; - } - PyList_SET_ITEM(res, 0, dobj); - return res; -} - static PyObject * array_protocol_strides_get(PyArrayObject *self) { @@ -295,65 +261,49 @@ array_ctypes_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) static PyObject * array_interface_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyObject *dict; - PyObject *obj; + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyObject *dict = NULL; - dict = PyDict_New(); - if (dict == NULL) { - return NULL; + dataptr = array_dataptr_get(self, NULL); + if (dataptr == NULL) { + goto finish; } - int ret; - - /* dataptr */ - obj = array_dataptr_get(self, NULL); - ret = PyDict_SetItemString(dict, "data", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + strides = array_protocol_strides_get(self); + if (strides == NULL) { + goto finish; } - obj = array_protocol_strides_get(self); - ret = PyDict_SetItemString(dict, "strides", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + descr = array_protocol_descr_get(PyArray_DESCR(self)); + if (descr == NULL) { + goto finish; } - obj = array_protocol_descr_get(self); - ret = PyDict_SetItemString(dict, "descr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + typestr = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); + if (typestr == NULL) { + goto finish; } - obj = arraydescr_protocol_typestr_get(PyArray_DESCR(self), NULL); - ret = PyDict_SetItemString(dict, "typestr", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; - } - - obj = array_shape_get(self, NULL); - ret = PyDict_SetItemString(dict, "shape", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; + shape = array_shape_get(self, NULL); + if (shape == NULL) { + goto finish; } - obj = PyLong_FromLong(3); - ret = PyDict_SetItemString(dict, "version", obj); - Py_DECREF(obj); - if (ret < 0) { - Py_DECREF(dict); - return NULL; - } + dict = build_array_interface( + dataptr, descr, strides, typestr, shape + ); + goto finish; +finish: + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(descr); + Py_XDECREF(typestr); return dict; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 48ad4ab540a9..e23fcef06574 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -20,6 +20,7 @@ #include "mapping.h" #include "ctors.h" #include "dtypemeta.h" +#include "descriptor.h" #include "usertypes.h" #include "number.h" #include "numpyos.h" @@ -1787,26 +1788,78 @@ gentype_shape_get(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) } +static PyObject * +gentype_dataptr_get(PyObject *self) +{ + return Py_BuildValue( + "NO", + PyLong_FromVoidPtr(scalar_value(self, NULL)), + Py_True + ); +} + + static PyObject * gentype_interface_get(PyObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *arr; - PyObject *inter; + PyObject *dataptr = NULL; + PyObject *strides = NULL; + PyObject *shape = NULL; + PyObject *descr = NULL; + PyObject *typestr = NULL; + PyArray_Descr *array_descr = NULL; + PyObject *inter = NULL; - arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) { - return NULL; + + array_descr = PyArray_DescrFromScalar(self); + if (array_descr == NULL) { + goto finish; } - inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) { - PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + + /* dataptr */ + dataptr = gentype_dataptr_get(self); + if (dataptr == NULL) { + goto finish; } - Py_DECREF(arr); + + /* strides */ + strides = gentype_shape_get(self, NULL); + if (strides == NULL) { + goto finish; + } + + /* descr */ + descr = array_protocol_descr_get(array_descr); + if (descr == NULL) { + goto finish; + } + + /* typestr */ + typestr = arraydescr_protocol_typestr_get(array_descr, NULL); + if (typestr == NULL) { + goto finish; + } + + /* shape */ + shape = gentype_shape_get(self, NULL); + if (shape == NULL) { + goto finish; + } + + inter = build_array_interface(dataptr, descr, strides, typestr, shape); + goto finish; + +finish: + Py_XDECREF(descr); + Py_XDECREF(dataptr); + Py_XDECREF(strides); + Py_XDECREF(shape); + Py_XDECREF(typestr); + Py_XDECREF(array_descr); return inter; } - static PyObject * gentype_typedescr_get(PyObject *self, void *NPY_UNUSED(ignored)) { diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index b88910ce457e..659c3d639e97 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -381,3 +381,24 @@ def test_overlapping(self): 'formats': [np.uint32, np.uint32] }) assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) + + def test_cannot_convert_to_ctypes(self): + + _type_to_value = { + np.str_: "aa", + np.bool: True, + np.datetime64: "2026-01-01", + } + for _scalar_type in np.sctypeDict.values(): + if _scalar_type == np.object_: + continue + + if _scalar_type in _type_to_value: + numpy_scalar = _scalar_type(_type_to_value[_scalar_type]) + else: + numpy_scalar = _scalar_type(1) + + with pytest.raises( + TypeError, match="readonly arrays unsupported" + ): + np.ctypeslib.as_ctypes(numpy_scalar) From 5a18ce375179e2e55465e055607dec328af5d223 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 27 Jan 2026 17:52:54 +0530 Subject: [PATCH 1343/1718] BUG: fix thread safety of `array_getbuffer` (#30667) Add critical sections around buffer info creation/mutation. --- numpy/_core/src/multiarray/buffer.c | 7 ++++++- numpy/_core/tests/test_multithreading.py | 26 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index a6c683f26a8b..908553462bfe 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -793,8 +793,10 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags) } /* Fill in information (and add it to _buffer_info if necessary) */ + Py_BEGIN_CRITICAL_SECTION(self); info = _buffer_get_info( &((PyArrayObject_fields *)self)->_buffer_info, obj, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { goto fail; } @@ -880,7 +882,10 @@ void_getbuffer(PyObject *self, Py_buffer *view, int flags) * to find the correct format. This format must also be stored, since * at least in theory it can change (in practice it should never change). */ - _buffer_info_t *info = _buffer_get_info(&scalar->_buffer_info, self, flags); + _buffer_info_t *info = NULL; + Py_BEGIN_CRITICAL_SECTION(scalar); + info = _buffer_get_info(&scalar->_buffer_info, self, flags); + Py_END_CRITICAL_SECTION(); if (info == NULL) { Py_DECREF(self); return -1; diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 83484fb4c131..d0f2ebce24e8 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -375,3 +375,29 @@ def replace_list_items(b): finally: if len(tasks) < 5: b.abort() + +def test_array__buffer__thread_safety(): + import inspect + arr = np.arange(1000) + flags = [inspect.BufferFlags.STRIDED, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + arr.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) + +def test_void_dtype__buffer__thread_safety(): + import inspect + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert isinstance(x, np.void) + flags = [inspect.BufferFlags.STRIDES, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + x.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) From 91451d506a345cc922deb5966ee65cd852be599e Mon Sep 17 00:00:00 2001 From: Vineet Kumar Date: Tue, 27 Jan 2026 20:22:55 +0530 Subject: [PATCH 1344/1718] DOC: fix typo in array.rst regarding sorting requirements --- doc/source/reference/c-api/array.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d28e535f9428..d2bdae695933 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2365,7 +2365,7 @@ Item selection and manipulation Return an array with the items of ``self`` sorted along ``axis``. The array is sorted using an algorithm whose properties are specified by the value of - ``kind``, an integer/enum specifying the reguirements of the sorting + ``kind``, an integer/enum specifying the requirements of the sorting algorithm used. If ``self* ->descr`` is a data-type with fields defined, then ``self->descr->names`` is used to determine the sort order. A comparison where the first field is equal will use the second field and so on. To @@ -2382,7 +2382,7 @@ Item selection and manipulation Return an array of indices such that selection of these indices along the given ``axis`` would return a sorted version of ``self``. The array is sorted using an algorithm whose properties are specified by ``kind``, an - integer/enum specifying the reguirements of the sorting algorithm used. If + integer/enum specifying the requirements of the sorting algorithm used. If ``self->descr`` is a data-type with fields defined, then ``self->descr->names`` is used to determine the sort order. A comparison where the first field is equal will use the second field and so on. To From 2ccea713e1bbb447666c00932c1020aa2572d4c2 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Tue, 27 Jan 2026 20:06:54 -0500 Subject: [PATCH 1345/1718] MAINT: Simplify array setstate by using general deallocation code (#30414) Co-authored-by: Sebastian Berg Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/arrayobject.c | 99 +++++++++++++------- numpy/_core/src/multiarray/arrayobject.h | 7 ++ numpy/_core/src/multiarray/methods.c | 69 ++------------ numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 1 + 5 files changed, 82 insertions(+), 95 deletions(-) diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index fe2ec25558c5..a47abc372c98 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -358,80 +358,87 @@ PyArray_ResolveWritebackIfCopy(PyArrayObject * self) /*********************** end C-API functions **********************/ -/* dealloc must not raise an error, best effort try to write - to stderr and clear the error -*/ - -static inline void -WARN_IN_DEALLOC(PyObject* warning, const char * msg) { - if (PyErr_WarnEx(warning, msg, 1) < 0) { - PyObject * s; - - s = PyUnicode_FromString("array_dealloc"); - if (s) { - PyErr_WriteUnraisable(s); - Py_DECREF(s); - } - else { - PyErr_WriteUnraisable(Py_None); - } +/* + * During dealloc we cannot propagate errors so if unraisable is set + * we simply print out the error message and convert the error into + * success (returning 0). + */ +static inline int +write_and_clear_error_if_unraisable(int status, npy_bool unraisable) +{ + if (status < 0 && unraisable) { + PyErr_WriteUnraisable(npy_interned_str.array_dealloc); + return 0; } + return status; } /* array object functions */ -static void -array_dealloc(PyArrayObject *self) +/* + * Much of the actual work for dealloc, split off for use in __setstate__ + * via clear_array_attributes function defined below. + * If not unraisable, will return -1 on error, 0 on success. + * If unraisable, always succeeds, though may print errors and warnings. + */ +static int +_clear_array_attributes(PyArrayObject *self, npy_bool unraisable) { PyArrayObject_fields *fa = (PyArrayObject_fields *)self; if (_buffer_info_free(fa->_buffer_info, (PyObject *)self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } + fa->_buffer_info = NULL; - if (fa->weakreflist != NULL) { - PyObject_ClearWeakRefs((PyObject *)self); - } if (fa->base) { - int retval; if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) { - char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. " + char const * msg = "WRITEBACKIFCOPY detected in clearing of array. " " Required call to PyArray_ResolveWritebackIfCopy or " "PyArray_DiscardWritebackIfCopy is missing."; + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } /* * prevent reaching 0 twice and thus recursing into dealloc. * Increasing sys.gettotalrefcount, but path should not be taken. */ Py_INCREF(self); - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); retval = PyArray_ResolveWritebackIfCopy(self); - if (retval < 0) - { - PyErr_Print(); - PyErr_Clear(); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; } } /* * If fa->base is non-NULL, it is something * to DECREF -- either a view or a buffer object */ - Py_XDECREF(fa->base); + Py_CLEAR(fa->base); } if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) { /* Free any internal references */ if (PyDataType_REFCHK(fa->descr)) { if (PyArray_ClearArray(self) < 0) { - PyErr_WriteUnraisable(NULL); + if (write_and_clear_error_if_unraisable(-1, unraisable) < 0) { + return -1; + } } } + /* mem_handler can be absent if NPY_ARRAY_OWNDATA arbitrarily set */ if (fa->mem_handler == NULL) { if (npy_global_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; - WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg); + int retval = PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1); + if (write_and_clear_error_if_unraisable(retval, unraisable) < 0) { + return -1; + } } // Guess at malloc/free ??? free(fa->data); @@ -442,16 +449,38 @@ array_dealloc(PyArrayObject *self) nbytes = 1; } PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); - Py_DECREF(fa->mem_handler); + Py_CLEAR(fa->mem_handler); } + fa->data = NULL; } /* must match allocation in PyArray_NewFromDescr */ npy_free_cache_dim(fa->dimensions, 2 * fa->nd); - Py_DECREF(fa->descr); + fa->dimensions = NULL; + Py_CLEAR(fa->descr); + return 0; +} + +static void +array_dealloc(PyArrayObject *self) +{ + // NPY_TRUE flags that errors are unraisable. + int ret = _clear_array_attributes(self, NPY_TRUE); + assert(ret == 0); // should always succeed if unraisable. + // Only done on actual deallocation, nothing allocated by numpy. + if (((PyArrayObject_fields *)self)->weakreflist != NULL) { + PyObject_ClearWeakRefs((PyObject *)self); + } Py_TYPE(self)->tp_free((PyObject *)self); } +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self) +{ + // NPY_FALSE flags that errors can be raised. + return _clear_array_attributes(self, NPY_FALSE); +} + /*NUMPY_API * Prints the raw data of the ndarray in a form useful for debugging * low-level C issues. diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 8d6f84faa6b1..d0f95dc228c7 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -19,6 +19,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op); NPY_NO_EXPORT int array_might_be_written(PyArrayObject *obj); +/* + * For use in __setstate__, where pickle gives us an instance on which we + * have to replace all the actual data. Returns 0 on success, -1 on error. + */ +NPY_NO_EXPORT int +clear_array_attributes(PyArrayObject *self); + /* * This flag is used to mark arrays which we would like to, in the future, * turn into views. It causes a warning to be issued on the first attempt to diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 24972fec8975..419a3c1bd2d0 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2073,17 +2073,6 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } - /* - * Reassigning fa->descr messes with the reallocation strategy, - * since fa could be a 0-d or scalar, and then - * PyDataMem_UserFREE will be confused - */ - size_t n_tofree = PyArray_NBYTES(self); - if (n_tofree == 0) { - n_tofree = 1; - } - Py_XDECREF(PyArray_DESCR(self)); - fa->descr = typecode; Py_INCREF(typecode); nd = PyArray_IntpFromSequence(shape, dimensions, NPY_MAXDIMS); if (nd < 0) { @@ -2097,31 +2086,19 @@ array_setstate(PyArrayObject *self, PyObject *args) * copy from the pickled data (may not match allocation currently if 0). * Compare with `PyArray_NewFromDescr`, raise MemoryError for simplicity. */ - npy_bool empty = NPY_FALSE; - nbytes = 1; + nbytes = typecode->elsize; for (int i = 0; i < nd; i++) { if (dimensions[i] < 0) { PyErr_SetString(PyExc_TypeError, "impossible dimension while unpickling array"); return NULL; } - if (dimensions[i] == 0) { - empty = NPY_TRUE; - } overflowed = npy_mul_sizes_with_overflow( &nbytes, nbytes, dimensions[i]); if (overflowed) { return PyErr_NoMemory(); } } - overflowed = npy_mul_sizes_with_overflow( - &nbytes, nbytes, PyArray_ITEMSIZE(self)); - if (overflowed) { - return PyErr_NoMemory(); - } - if (empty) { - nbytes = 0; - } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { if (!PyList_Check(rawdata)) { @@ -2135,11 +2112,8 @@ array_setstate(PyArrayObject *self, PyObject *args) /* Backward compatibility with Python 2 NumPy pickles */ if (PyUnicode_Check(rawdata)) { - PyObject *tmp; - tmp = PyUnicode_AsLatin1String(rawdata); - Py_DECREF(rawdata); - rawdata = tmp; - if (tmp == NULL) { + Py_SETREF(rawdata, PyUnicode_AsLatin1String(rawdata)); + if (rawdata == NULL) { /* More informative error message */ PyErr_SetString(PyExc_ValueError, ("Failed to encode latin1 string when unpickling a Numpy array. " @@ -2167,32 +2141,13 @@ array_setstate(PyArrayObject *self, PyObject *args) return NULL; } } - - if ((PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { - /* - * Allocation will never be 0, see comment in ctors.c - * line 820 - */ - PyObject *handler = PyArray_HANDLER(self); - if (handler == NULL) { - /* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */ - PyErr_SetString(PyExc_RuntimeError, - "no memory handler found but OWNDATA flag set"); - return NULL; - } - PyDataMem_UserFREE(PyArray_DATA(self), n_tofree, handler); - PyArray_CLEARFLAGS(self, NPY_ARRAY_OWNDATA); - } - Py_XDECREF(PyArray_BASE(self)); - fa->base = NULL; - - PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); - - if (PyArray_DIMS(self) != NULL) { - npy_free_cache_dim_array(self); - fa->dimensions = NULL; + /* + * Get rid of everything on self, and then populate with pickle data. + */ + if (clear_array_attributes(self) < 0) { + return NULL; } - + fa->descr = typecode; fa->flags = NPY_ARRAY_DEFAULT; fa->nd = nd; @@ -2222,11 +2177,8 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the handler in case the default is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { - Py_CLEAR(fa->mem_handler); Py_DECREF(rawdata); return NULL; } @@ -2274,7 +2226,6 @@ array_setstate(PyArrayObject *self, PyObject *args) } else { /* The handlers should never be called in this case */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = NULL; fa->data = datastr; if (PyArray_SetBaseObject(self, rawdata) < 0) { @@ -2288,9 +2239,7 @@ array_setstate(PyArrayObject *self, PyObject *args) if (num == 0) { num = 1; } - /* Store the functions in case the default handler is modified */ - Py_XDECREF(fa->mem_handler); fa->mem_handler = PyDataMem_GetHandler(); if (fa->mem_handler == NULL) { return NULL; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 997c798c665d..1c31dcd5d810 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -69,6 +69,7 @@ intern_strings(void) INTERN_STRING(copy, "copy"); INTERN_STRING(dl_device, "dl_device"); INTERN_STRING(max_version, "max_version"); + INTERN_STRING(array_dealloc, "array_dealloc"); return 0; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index f3d1135ec044..68b3d27c8160 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -48,6 +48,7 @@ typedef struct npy_interned_str_struct { PyObject *copy; PyObject *dl_device; PyObject *max_version; + PyObject *array_dealloc; } npy_interned_str_struct; /* From 28ebd843aef940b509f7e1dd1092f7e2a6839e2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jan 2026 17:57:37 +0000 Subject: [PATCH 1346/1718] MAINT: Bump hypothesis from 6.151.2 to 6.151.3 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.2 to 6.151.3. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.2...hypothesis-python-6.151.3) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 9e53f827fe07..e661596e1933 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.2 +hypothesis==6.151.3 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 72e2faafb1b1..bea090fbd0c7 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.2 +hypothesis==6.151.3 pytest==9.0.2 pytest-cov==7.0.0 meson From ff64def9e3bae83bdbc4c26a8cdda3c21768c7fc Mon Sep 17 00:00:00 2001 From: clintonsteiner <47841949+clintonsteiner@users.noreply.github.com> Date: Wed, 28 Jan 2026 20:28:35 -0600 Subject: [PATCH 1347/1718] Update dependabot.yml - add cooldown days parameter Add cooldown to guard against supply chain attacks Dependency cooldowns are a free, easy, and incredibly effective way to mitigate the large majority of open source supply chain attacks. More individual projects should apply cooldowns (via tools like Dependabot and Renovate) to their dependencies, and packaging ecosystems should invest in first-class support for cooldowns directly in their package managers. https://blog.yossarian.net/2025/11/21/We-should-all-be-using-dependency-cooldowns --- .github/dependabot.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4a5d738e6613..76e3f31a96e2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,8 @@ updates: directory: / schedule: interval: daily + cooldown: + default-days: 7 commit-message: prefix: "MAINT" labels: @@ -15,6 +17,8 @@ updates: directory: /requirements schedule: interval: daily + cooldown: + default-days: 7 commit-message: prefix: "MAINT" labels: From c9e1ca702f6de5cc1b296b4de82dd26c64c10bbc Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 28 Jan 2026 23:57:01 -0700 Subject: [PATCH 1348/1718] MAINT: correct typo in NumPy 2.5 C API version (#30740) --- numpy/_core/include/numpy/numpyconfig.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 40b5f1454d67..4bfe3ab09dea 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -79,7 +79,7 @@ #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 #define NPY_2_4_API_VERSION 0x00000015 -#define NPY_2_5_API_VERSION 0x00000016 +#define NPY_2_5_API_VERSION 0x00000015 /* From aaa2628ed82e9d962a1eb3a3556880a96863455b Mon Sep 17 00:00:00 2001 From: agnuspaul98 Date: Fri, 30 Jan 2026 11:10:41 +0530 Subject: [PATCH 1349/1718] DOC: add missing numpy import in emath example --- doc/source/reference/routines.emath.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/reference/routines.emath.rst b/doc/source/reference/routines.emath.rst index 7751c922b677..4b4ee208734f 100644 --- a/doc/source/reference/routines.emath.rst +++ b/doc/source/reference/routines.emath.rst @@ -15,6 +15,7 @@ domains of the input. For example, for functions like `log` with branch cuts, the versions in this module provide the mathematically valid answers in the complex plane:: + >>> import numpy as np >>> import math >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) True From b0a3e7435f342dcfd9d59e23d33640f1de1aff8a Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 30 Jan 2026 18:53:17 +0100 Subject: [PATCH 1350/1718] NEP 57: NumPy platform support [skip ci] --- doc/neps/nep-0057-numpy-platform-support.rst | 246 +++++++++++++++++++ 1 file changed, 246 insertions(+) create mode 100644 doc/neps/nep-0057-numpy-platform-support.rst diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst new file mode 100644 index 000000000000..9e555a74839c --- /dev/null +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -0,0 +1,246 @@ +.. _NEP47: + +=============================== +NEP 57 — NumPy platform support +=============================== + +:Author: Ralf Gommers +:Status: Draft +:Type: Process +:Created: 2026-01-30 +:Resolution: - + +.. note:: + + This NEP is drafted as a policy specific to NumPy rather than a SPEC + for several reasons that are all project-specific: + + * it involves committing to a nontrivial amount of maintainer effort, + * personal commitment from a maintainer may make the difference between a + yes and a no of supporting a platform (e.g., NumPy supported PyPy for a + long time because of the efforts of one maintainer) + * support for a platform being possible at all may depend on features of the + code base (e.g., NumPy supports 32-bit Python on Windows while SciPy does + not because there's no suitable compiler toolchain for it) + * the number of wheels depends on whether the Stable ABI can be used (NumPy + is more performance-sensitive for small arrays, so can't use it) + + +Abstract +-------- + +This PEP documents how a platform - i.e., a specific operating system, CPU +architecture and CPython interpreter - becomes supported in NumPy, what +platforms are currently supported, and were supported in the (recent) past. + + +Motivation and scope +-------------------- + +*This policy is being drafted now because there is a lot of interest in +extending the number of platforms NumPy supports through wheels in particular.* + +The scope of this NEP includes: + +- The definition of tiers of support for platforms by NumPy +- Policies and decision making for moving a platform to a different tier + +Out of scope for this NEP are: + +- Binary distributions of NumPy outside of PyPI +- Partial testing in CI (e.g., testing only SIMD-specific code under QEMU) +- More detailed breakdowns of wheels and support matrices, like compiler flavor + and minimum version, or the BLAS library that is used in the build. + + +Support tiers +------------- + +*This section is inspired by PEP 11 (CPython platform support), although +definitions are not matching, because NumPy is not nearly as large a project as +CPython.* + +Platform support is broken down into tiers. Each tier comes with different +requirements which lead to different promises being made about support. + +To be promoted to a tier, steering council support is required and is expected +to be driven by team consensus. Demotion to a lower tier occurs when the +requirements of the current tier are no longer met for a platform for an +extended period of time based on the judgment of the steering council. For +platforms which no longer meet the requirements of any tier by the middle of a +new feature release cycle, an announcement will be made to warn the community +of the pending removal of support for the platform. If the platform is not +brought into line for at least one of the tiers by the first release candidate, +it will be listed as unsupported in this NEP. + + +General principles +~~~~~~~~~~~~~~~~~~ + +1. Maintainer effort is expensive, and we collectively have limited bandwidth - + hence platform support is strongly influenced by the willingness of one or + more maintainers to put in that effort. + + - Maintainers are trusted by the whole team. We generally do not question + *why* a maintainer is motivated to put in the effort. If they are being + paid for their effort or doing it as part of their job, that is fine - + however they should disclose this to the Steering Council, and indicate + whether long-term support is conditional on their employment or contractor + status for the support tiers that include releasing wheels to PyPI. + *Rationale: releasing wheels to PyPI is a long-term commitment by the + project as a whole, see the backwards compatibility section below.* + +2. CI support with native runners for the platform is strongly preferred. Free + is best, however decisions on paid CI are up to the Steering Council. + Emulation for running the test suite (e.g., under QEMU) or self-hosted + buildbots are slower and less reliable, hence not preferred. + +3. There should be broad enough demand for support for the platform for the + tiers that include releasing wheels to PyPI. + + - A previously used rule of thumb: >=0.5% of the user base should be on this + platform. There may be reasons to deviate from this rule of thumb. + +4. Adding a non-wheel CI job for a platform to the NumPy CI matrix is much + cheaper, and easily reverted in case of problems. The bar for adding such + jobs is low, and assessed on a case-by-case basis. + +5. For all platforms in any supported tier: the relevant prerequisites in our + dependencies have been met. E.g., build tools have support, and for wheels + there is support in CPython, PyPI, cibuildwheel, manylinux, and + ``scipy-openblas64`` or another easily-integrated BLAS library. + +6. Decision making: + + - Moving a platform to a lower support tier must be discussed on the mailing list. + - Moving a platform to a higher support tier this includes releasing wheels + on PyPI for that platform must be discussed on the mailing list. + - Adding an entry for a platform for an unsupported platform or one without + wheels can be done on GitHub, assuming it's clear from the discussion that + the relevant maintainers agree. + + +Tier 1 +~~~~~~ + +- Must have regular CI support on GitHub or (exceptionally) through another + well-integrated CI platform that the release team and steering council deem + acceptable. +- The NumPy team releases wheels on PyPI for this platform +- CI failures (either regular CI or wheel build CI) block releases. +- All maintainers developers are responsible to keep the ``main`` branch and + wheel builds working. + +Tier 1 platforms: + +- Windows x86-64 +- Windows arm64 +- Windows x86 (32-bit Python: note this is shipped without BLAS, it's legacy) +- Linux x86-64 (manylinux) +- Linux aarch64 (manylinux) +- macOS arm64 +- macOS x86-64 (expected to move to unsupported by 2027/28 once the platform is dropped by GitHub) + + +Tier 2 +~~~~~~ + +- Must have regular CI support, either as defined for Tier 1 or through a + reliable self-hosted service. +- The NumPy team releases wheels on PyPI for this platform +- CI failures block releases. +- Must have at least one maintainer who commits to take primary and long-term + responsibility for keeping the ``main`` branch and wheel builds working. + +Tier 2 platforms: + +- Linux x86-64 (musllinux) +- Linux aarch64 (musllinux) +- Free-threaded CPython + + +Tier 3 +~~~~~~ + +- Is supported as part of NumPy's regular CI setup for the ``main`` branch. CI + support as defined for Tier 2. +- No wheels are released on PyPI for this platform +- CI failures block releases (skips may be applied a bit more liberally). +- Must have at least one maintainer or a regular contributor trusted by the + NumPy maintainers who commits to take responsibility for CI on the ``main`` + branch working. + +Tier 3 platforms: + +- FreeBSD (note: runs on Cirrus CI) +- Linux ppc64le (note: runs on IBM-provided self-hosted runners) +- Emscripten/Pyodide + + +Unsupported platforms +~~~~~~~~~~~~~~~~~~~~~ + +All platforms not listed in the above tiers are unsupported by the NumPy team. +We do not develop and test on such platforms, and so cannot provide any +promises that NumPy will work on them. + +However, the code base does include unsupported code – that is, code specific +to unsupported platforms. Contributions in this area are welcome as long as +they: + +- pose a minimal maintenance burden to the core team, and +- benefit substantially more people than the contributor. + +Unsupported platforms (previously in a supported tier, may be an incomplete +list): + +- PyPy +- macOS ppc64, universal, universal2 +- Linux i686 +- Linux on IBM Z (s390x) + +Unsupported platforms (known interest in moving to a higher tier): + +- iOS +- Android +- RISC-V +- WASI + + +Backward compatibility +---------------------- + +Moving a platform to a lower tier of support is generally backwards compatible. +The exception is stopping to release wheels on PyPI for a platform. That causes +signifcant disruption for existing users on that platform. Their install commands +(e.g., ``pip install numpy``) may stop working because if a new release no longer +has wheels for the platform, by default ``pip`` will try to build from source rather +than using a wheel from an older version of ``numpy``. Therefore, we should be very +reluctant to drop wheels for any platform. + + +Discussion +---------- + +- `ENH: Provide Windows ARM64 wheels (numpy#22530) `__ +- `Releasing PowerPC (ppc64le) wheels? (numpy#22318) `__ +- `MAINT: drop support for PyPy (numpy#30416) `__ +- `ENH: Build and distribute manylinux wheels for riscv64 `__ +- `BLD: Add support for building iOS wheels (numpy#28759) `__ +- `BLD: Add Android support `__ +- `ENH: WASI Build `__ +- `PEP 11 - CPython platform support `__ +- `Debian's supported architectures `__ +- `Discussion about supported platforms for wheels (scientific-python issue/discussion (Nov 2025) `__ +- `What platforms should wheels be provided for by default? (Packaging Discourse thread, 2026) `__ +- `Expectations that projects provide ever more wheels (pypackaging-native) `__ + + +References and footnotes +------------------------ + + +Copyright +--------- + +This document has been placed in the public domain. From a7cc8290d7991355595e7e2d250c1d0364f3bd33 Mon Sep 17 00:00:00 2001 From: Vineet Kumar Date: Sat, 31 Jan 2026 14:42:16 +0530 Subject: [PATCH 1351/1718] DOC: fix typos in RELEASE_WALKTHROUGH and NEP 43 --- doc/RELEASE_WALKTHROUGH.rst | 4 ++-- doc/neps/nep-0043-extensible-ufuncs.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index ffade434a7f1..c8e3da3095b5 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -136,7 +136,7 @@ Test the wheel builds After the release PR is merged, go to the ``numpy-release`` repository in your browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button in ``actions``. Make sure that the upload -target is ``none`` in the *evironment* dropdown. The wheels take about 1 hour +target is ``none`` in the *environment* dropdown. The wheels take about 1 hour to build, but sometimes GitHub is very slow. If some wheel builds fail for unrelated reasons, you can re-run them as normal in the GitHub Actions UI with ``re-run failed``. After the wheels are built review the results, checking that @@ -187,7 +187,7 @@ If you need to delete the tag due to error:: Go to the ``numpy-release`` repository in your browser and manually trigger the workflow on the ``maintenance/2.4.x`` branch using the ``Run workflow`` button in ``actions``. Make sure that the upload target is ``pypi`` in the -*evironment* dropdown. The wheels take about 1 hour to build, but sometimes +*environment* dropdown. The wheels take about 1 hour to build, but sometimes GitHub is very slow. If some wheel builds fail for unrelated reasons, you can re-run them as normal in the GitHub Actions UI with ``re-run failed``. After the wheels are built review the results, checking that the number of artifacts diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 4bac8d7a3282..1370e14d6c4e 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -241,7 +241,7 @@ to define string equality, will be added to a ufunc. nin = 1 nout = 1 # DTypes are stored on the BoundArrayMethod and not on the internal - # ArrayMethod, to reference cyles. + # ArrayMethod, to reference cycles. DTypes = (String, String, Bool) def resolve_descriptors(self: ArrayMethod, DTypes, given_descrs): From cc02d4db9695503938909a0335f56ee364e16bc0 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 31 Jan 2026 03:59:58 -0700 Subject: [PATCH 1352/1718] CI: add smoke test CI coverage for Python 3.15 (#30741) --- .github/workflows/linux.yml | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 15d3ef45e327..fd8e7f8e40bc 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -67,7 +67,7 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.12", "3.13", "3.14", "3.14t"] + version: ["3.12", "3.14t"] steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: @@ -107,6 +107,28 @@ jobs: cd tools pytest --timeout=600 --durations=10 --pyargs numpy -m "not slow" + all_versions: + # like the smoke tests but runs on more Python versions + needs: [smoke_test] + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + env: + MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" + strategy: + matrix: + version: ["3.13", "3.14", "3.15-dev", "3.15t-dev"] + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: ${{ matrix.version }} + - uses: ./.github/meson_actions + full: # Install as editable, then run the full test suite with code coverage needs: [smoke_test] From 4227ede02d4f8c6fdf27179a68a700109da0bdad Mon Sep 17 00:00:00 2001 From: Ali Hamdan Date: Sun, 1 Feb 2026 00:31:58 +0100 Subject: [PATCH 1353/1718] TYP: Fix class annotations using names shadowed by a class member (#30754) --- numpy/__init__.pyi | 318 ++++++++++++++++---------------- numpy/_core/arrayprint.pyi | 6 +- numpy/dtypes.pyi | 4 +- numpy/lib/_nanfunctions_impl.py | 6 +- numpy/ma/core.pyi | 60 +++--- numpy/random/_generator.pyi | 4 +- 6 files changed, 203 insertions(+), 195 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5e8f600bd951..531b1bb8711c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4,7 +4,8 @@ import datetime as dt import inspect import sys from abc import abstractmethod -from builtins import bool as py_bool +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bool as py_bool, str as py_str, type as py_type from decimal import Decimal from fractions import Fraction from types import EllipsisType, ModuleType, MappingProxyType, GenericAlias @@ -784,7 +785,8 @@ type _BuiltinObjectLike = ( ) # fmt: skip # Introduce an alias for `dtype` to avoid naming conflicts. -type _dtype[ScalarT: generic] = dtype[ScalarT] +# NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` +_dtype = dtype type _ByteOrderChar = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters @@ -1003,7 +1005,7 @@ class _HasRealAndImag[RealT, ImagT](Protocol): @type_check_only class _HasTypeWithRealAndImag[RealT, ImagT](Protocol): @property - def type(self, /) -> type[_HasRealAndImag[RealT, ImagT]]: ... + def type(self, /) -> py_type[_HasRealAndImag[RealT, ImagT]]: ... @type_check_only class _HasDTypeWithRealAndImag[RealT, ImagT](Protocol): @@ -1071,7 +1073,7 @@ test: Final[PytestTester] = ... @type_check_only class _DTypeMeta(type): @property - def type(cls, /) -> type[generic] | None: ... + def type(cls, /) -> py_type[generic] | None: ... @property def _abstract(cls, /) -> bool: ... @property @@ -1083,18 +1085,18 @@ class _DTypeMeta(type): @final class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 - names: tuple[str, ...] | None + names: tuple[py_str, ...] | None def __hash__(self) -> int: ... # `None` results in the default dtype @overload def __new__( cls, - dtype: type[float64 | ct.c_double] | _Float64Codes | None, + dtype: py_type[float64 | ct.c_double] | _Float64Codes | None, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ... + metadata: dict[py_str, Any] = ... ) -> dtype[float64]: ... # Overload for `dtype` instances, scalar types, and instances that have a @@ -1106,7 +1108,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[ScalarT]: ... # Builtin types @@ -1122,56 +1124,56 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[py_bool | bool_ | ct.c_bool] | _BoolCodes, + dtype: py_type[py_bool | bool_ | ct.c_bool] | _BoolCodes, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[bool_]: ... @overload def __new__( cls, - dtype: type[int], # also accepts `type[py_bool]` + dtype: py_type[int], # also accepts `type[py_bool]` align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int_ | Any]: ... @overload def __new__( cls, - dtype: type[float], # also accepts `type[int | bool]` + dtype: py_type[float], # also accepts `type[int | bool]` align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[float64 | Any]: ... @overload def __new__( cls, - dtype: type[complex], # also accepts `type[float | int | bool]` + dtype: py_type[complex], # also accepts `type[float | int | bool]` align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complex128 | Any]: ... @overload def __new__( cls, - dtype: type[bytes | ct.c_char] | _BytesCodes, + dtype: py_type[bytes | ct.c_char] | _BytesCodes, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[bytes_]: ... @overload def __new__( cls, - dtype: type[str] | _StrCodes, + dtype: py_type[py_str] | _StrCodes, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[str_]: ... # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to # be run with the (undocumented) `--disable-memoryview-promotion` flag, @@ -1182,134 +1184,134 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + dtype: py_type[void | memoryview] | _VoidDTypeLike | _VoidCodes, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[void]: ... # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, # and is therefore not included here @overload def __new__( cls, - dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + dtype: py_type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[object_]: ... # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _UInt8Codes | type[ct.c_uint8], + dtype: _UInt8Codes | py_type[ct.c_uint8], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: _UInt16Codes | type[ct.c_uint16 | ct.c_ushort], + dtype: _UInt16Codes | py_type[ct.c_uint16 | ct.c_ushort], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], + dtype: _UInt32Codes | _UIntCCodes | py_type[ct.c_uint32 | ct.c_uint], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint32]: ... @overload def __new__( cls, - dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + dtype: _UInt64Codes | _ULongLongCodes | py_type[ct.c_uint64 | ct.c_ulonglong], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint64]: ... @overload def __new__( cls, - dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + dtype: _UIntPCodes | py_type[ct.c_void_p | ct.c_size_t], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uintp]: ... @overload def __new__( cls, - dtype: _ULongCodes | type[ct.c_ulong], + dtype: _ULongCodes | py_type[ct.c_ulong], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[uint32 | uint64]: ... # `signedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: _Int8Codes | type[ct.c_int8], + dtype: _Int8Codes | py_type[ct.c_int8], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int8]: ... @overload def __new__( cls, - dtype: _Int16Codes | type[ct.c_int16 | ct.c_short], + dtype: _Int16Codes | py_type[ct.c_int16 | ct.c_short], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int16]: ... @overload def __new__( cls, - dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + dtype: _Int32Codes | _IntCCodes | py_type[ct.c_int32 | ct.c_int], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int32]: ... @overload def __new__( cls, - dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + dtype: _Int64Codes | _LongLongCodes | py_type[ct.c_int64 | ct.c_longlong], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int64]: ... @overload def __new__( cls, - dtype: _IntPCodes | type[intp | ct.c_ssize_t], + dtype: _IntPCodes | py_type[intp | ct.c_ssize_t], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[intp]: ... @overload def __new__( cls, - dtype: _LongCodes | type[ct.c_long], + dtype: _LongCodes | py_type[ct.c_long], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[int32 | int64]: ... # `floating` string-based representations and ctypes @@ -1320,26 +1322,26 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[float16]: ... @overload def __new__( cls, - dtype: _Float32Codes | type[ct.c_float], + dtype: _Float32Codes | py_type[ct.c_float], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[float32]: ... # float64 codes are covered by overload 1 @overload def __new__( cls, - dtype: _LongDoubleCodes | type[ct.c_longdouble], + dtype: _LongDoubleCodes | py_type[ct.c_longdouble], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[longdouble]: ... # `complexfloating` string-based representations and ctypes @@ -1351,7 +1353,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( @@ -1360,7 +1362,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( @@ -1369,35 +1371,35 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[clongdouble]: ... else: @overload def __new__( cls, - dtype: _Complex64Codes | type[ct.c_float_complex], + dtype: _Complex64Codes | py_type[ct.c_float_complex], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complex64]: ... @overload def __new__( cls, - dtype: _Complex128Codes | type[ct.c_double_complex], + dtype: _Complex128Codes | py_type[ct.c_double_complex], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complex128]: ... @overload def __new__( cls, - dtype: _CLongDoubleCodes | type[ct.c_longdouble_complex], + dtype: _CLongDoubleCodes | py_type[ct.c_longdouble_complex], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[clongdouble]: ... # datetime64 @@ -1408,7 +1410,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[datetime64[dt.date]]: ... @overload # datetime64[{h,m,s,ms,us}] def __new__( @@ -1417,7 +1419,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[datetime64[dt.datetime]]: ... @overload # datetime64[{ns,ps,fs,as}] def __new__( @@ -1426,7 +1428,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[datetime64[int]]: ... @overload # datetime64[?] def __new__( @@ -1435,7 +1437,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[datetime64]: ... # timedelta64 @@ -1446,7 +1448,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[timedelta64[dt.timedelta]]: ... @overload # timedelta64[{Y,M,ns,ps,fs,as}] def __new__( @@ -1455,7 +1457,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[timedelta64[int]]: ... @overload # timedelta64[?] def __new__( @@ -1464,7 +1466,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[timedelta64]: ... # `StringDType` requires special treatment because it has no scalar type @@ -1475,7 +1477,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtypes.StringDType: ... # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @@ -1486,7 +1488,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[unsignedinteger]: ... @overload def __new__( @@ -1495,7 +1497,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[signedinteger]: ... @overload def __new__( @@ -1504,7 +1506,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[integer]: ... @overload def __new__( @@ -1513,7 +1515,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[floating]: ... @overload def __new__( @@ -1522,7 +1524,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[complexfloating]: ... @overload def __new__( @@ -1531,27 +1533,27 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[inexact]: ... @overload def __new__( cls, - dtype: _CharacterCodes | type[bytes | str | ct.c_char], + dtype: _CharacterCodes | py_type[bytes | py_str | ct.c_char], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[character]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload def __new__( cls, - dtype: str, + dtype: py_str, align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype: ... # Catch-all overload for object-likes @@ -1563,19 +1565,19 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @overload def __new__( cls, - dtype: type[object], + dtype: py_type[object], align: py_bool = False, copy: py_bool = False, *, - metadata: dict[str, Any] = ..., + metadata: dict[py_str, Any] = ..., ) -> dtype[object_ | Any]: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[str], /) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[py_str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: str | SupportsIndex, /) -> dtype: ... + def __getitem__(self: dtype[void], key: py_str | SupportsIndex, /) -> dtype: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload @@ -1631,7 +1633,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def kind(self) -> _DTypeKind: ... @property - def metadata(self) -> MappingProxyType[str, Any] | None: ... + def metadata(self) -> MappingProxyType[py_str, Any] | None: ... @property def name(self) -> LiteralString: ... @property @@ -1646,7 +1648,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 @property def str(self) -> LiteralString: ... @property - def type(self) -> type[_ScalarT_co]: ... + def type(self) -> py_type[_ScalarT_co]: ... @type_check_only class _ArrayOrScalarCommon: @@ -2079,12 +2081,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def size(self) -> int: ... @property - def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def real[ScalarT: generic](self: _HasDTypeWithRealAndImag[ScalarT, object], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @real.setter def real(self, value: ArrayLike, /) -> None: ... @property - def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def imag[ScalarT: generic](self: _HasDTypeWithRealAndImag[object, ScalarT], /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @imag.setter def imag(self, value: ArrayLike, /) -> None: ... @@ -2105,7 +2107,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __array__(self, dtype: None = None, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__[DTypeT: dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /, *, copy: py_bool | None = None) -> ndarray[_ShapeT_co, DTypeT]: ... def __array_ufunc__( self, @@ -2128,7 +2130,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # grant subclasses a bit more flexibility def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... - def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( self, array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[Any, ...], int] | None = ..., @@ -2146,11 +2148,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # can be of any shape def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, _dtype[void]]: ... @overload # flexible | object_ | bool def __setitem__( - self: ndarray[Any, dtype[flexible | object_ | bool_] | dtypes.StringDType], + self: ndarray[Any, _dtype[flexible | object_ | bool_] | dtypes.StringDType], key: _ToIndices, value: object, /, @@ -2229,15 +2231,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # keep in sync with `ma.MaskedArray.tolist` @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` - def tolist[T](self: ndarray[tuple[Never], dtype[generic[T]]], /) -> Any: ... + def tolist[T](self: ndarray[tuple[Never], _dtype[generic[T]]], /) -> Any: ... @overload - def tolist[T](self: ndarray[tuple[()], dtype[generic[T]]], /) -> T: ... + def tolist[T](self: ndarray[tuple[()], _dtype[generic[T]]], /) -> T: ... @overload - def tolist[T](self: ndarray[tuple[int], dtype[generic[T]]], /) -> list[T]: ... + def tolist[T](self: ndarray[tuple[int], _dtype[generic[T]]], /) -> list[T]: ... @overload - def tolist[T](self: ndarray[tuple[int, int], dtype[generic[T]]], /) -> list[list[T]]: ... + def tolist[T](self: ndarray[tuple[int, int], _dtype[generic[T]]], /) -> list[list[T]]: ... @overload - def tolist[T](self: ndarray[tuple[int, int, int], dtype[generic[T]]], /) -> list[list[list[T]]]: ... + def tolist[T](self: ndarray[tuple[int, int, int], _dtype[generic[T]]], /) -> list[list[list[T]]]: ... @overload def tolist(self, /) -> Any: ... @@ -2366,7 +2368,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: None, kind: _PartitionKind = "introselect", order: None = None, - ) -> ndarray[tuple[int], dtype[intp]]: ... + ) -> ndarray[tuple[int], _dtype[intp]]: ... @overload # axis: index (default) def argpartition( self, @@ -2375,7 +2377,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, - ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... @overload # void, axis: None def argpartition( self: NDArray[void], @@ -2384,7 +2386,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> ndarray[tuple[int], dtype[intp]]: ... + ) -> ndarray[tuple[int], _dtype[intp]]: ... @overload # void, axis: index (default) def argpartition( self: NDArray[void], @@ -2393,7 +2395,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... # keep in sync with `ma.MaskedArray.diagonal` def diagonal( @@ -2413,7 +2415,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... # `nonzero()` raises for 0d arrays/generics - def nonzero(self) -> tuple[ndarray[tuple[int], dtype[intp]], ...]: ... + def nonzero(self) -> tuple[ndarray[tuple[int], _dtype[intp]], ...]: ... @overload def searchsorted( @@ -2626,7 +2628,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): casting: _CastingKind = ..., subok: py_bool = ..., copy: py_bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def astype( self, @@ -2635,21 +2637,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): casting: _CastingKind = ..., subok: py_bool = ..., copy: py_bool | _CopyMode = ..., - ) -> ndarray[_ShapeT_co, dtype]: ... + ) -> ndarray[_ShapeT_co, _dtype]: ... # @overload # () def view(self, /) -> Self: ... @overload # (dtype: T) - def view[DTypeT: dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... + def view[DTypeT: _dtype](self, /, dtype: DTypeT | _HasDType[DTypeT]) -> ndarray[_ShapeT_co, DTypeT]: ... @overload # (dtype: dtype[T]) - def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def view[ScalarT: generic](self, /, dtype: _DTypeLike[ScalarT]) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload # (type: T) def view[ArrayT: ndarray](self, /, *, type: type[ArrayT]) -> ArrayT: ... @overload # (_: T) def view[ArrayT: ndarray](self, /, dtype: type[ArrayT]) -> ArrayT: ... @overload # (dtype: ?) - def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, _dtype]: ... @overload # (dtype: ?, type: T) def view[ArrayT: ndarray](self, /, dtype: DTypeLike, type: type[ArrayT]) -> ArrayT: ... @@ -2673,11 +2675,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / ) -> Iterator[Any]: ... @overload # == 1-d & dtype[T \ object_] - def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], dtype[ScalarT]], /) -> Iterator[ScalarT]: ... + def __iter__[ScalarT: _ScalarNotObject](self: ndarray[tuple[int], _dtype[ScalarT]], /) -> Iterator[ScalarT]: ... @overload # == 1-d & StringDType def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... @overload # >= 2-d - def __iter__[DTypeT: dtype]( + def __iter__[DTypeT: _dtype]( self: ndarray[tuple[int, int, *tuple[int, ...]], DTypeT], / ) -> Iterator[ndarray[_AnyShape, DTypeT]]: ... @overload # ?-d @@ -2760,8 +2762,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # def __abs__[ShapeT: _Shape](self: ndarray[ShapeT, dtype[complex128]], /) -> ndarray[ShapeT, dtype[float64]]: ... @overload def __abs__[ShapeT: _Shape, NBitT: NBitBase]( - self: ndarray[ShapeT, dtype[complexfloating[NBitT]]], / - ) -> ndarray[ShapeT, dtype[floating[NBitT]]]: ... + self: ndarray[ShapeT, _dtype[complexfloating[NBitT]]], / + ) -> ndarray[ShapeT, _dtype[floating[NBitT]]]: ... @overload def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... @@ -2833,7 +2835,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mod__[ScalarT: floating | integer]( self: NDArray[ScalarT], other: int | bool_, / - ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __mod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -2860,7 +2862,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __mod__ def __rmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], other: int | bool_, / - ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __rmod__[ScalarT: floating | integer](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -2887,7 +2889,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __divmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], rhs: int | bool_, / - ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload def __divmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], rhs: _ArrayLikeBool_co, / @@ -2914,7 +2916,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # signature equivalent to __divmod__ def __rdivmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], lhs: int | bool_, / - ) -> _2Tuple[ndarray[_ShapeT_co, dtype[ScalarT]]]: ... + ) -> _2Tuple[ndarray[_ShapeT_co, _dtype[ScalarT]]]: ... @overload def __rdivmod__[ScalarT: floating | integer]( self: NDArray[ScalarT], lhs: _ArrayLikeBool_co, / @@ -2940,7 +2942,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__add__` @overload - def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __add__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __add__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -2988,7 +2990,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ - def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __radd__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __radd__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3036,7 +3038,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__sub__` @overload - def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __sub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __sub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3074,7 +3076,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rsub__` @overload - def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __rsub__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3112,7 +3114,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__mul__` @overload - def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __mul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __mul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3143,7 +3145,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mul__( - self: ndarray[Any, dtype[character] | dtypes.StringDType], + self: ndarray[Any, _dtype[character] | dtypes.StringDType], other: _ArrayLikeInt, /, ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... @@ -3154,7 +3156,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rmul__` @overload # signature equivalent to __mul__ - def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __rmul__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3185,7 +3187,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmul__( - self: ndarray[Any, dtype[character] | dtypes.StringDType], + self: ndarray[Any, _dtype[character] | dtypes.StringDType], other: _ArrayLikeInt, /, ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... @@ -3260,7 +3262,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __floordiv__[ScalarT: integer | floating]( self: NDArray[ScalarT], other: int | bool_, / - ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __floordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3292,7 +3294,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rfloordiv__[ScalarT: integer | floating]( self: NDArray[ScalarT], other: int | bool_, / - ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __rfloordiv__[ScalarT: integer | floating](self: NDArray[ScalarT], other: _ArrayLikeBool_co, /) -> NDArray[ScalarT]: ... @overload @@ -3320,7 +3322,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__pow__` @overload - def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __pow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __pow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload @@ -3354,7 +3356,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # Keep in sync with `MaskedArray.__rpow__` @overload - def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: int | bool_, mod: None = None, /) -> ndarray[_ShapeT_co, _dtype[ScalarT]]: ... @overload def __rpow__[ScalarT: number](self: NDArray[ScalarT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[ScalarT]: ... @overload @@ -3675,9 +3677,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def __buffer__(self, flags: int, /) -> memoryview: ... @overload - def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], _dtype[Self]]: ... @overload - def __array__[DTypeT: dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... + def __array__[DTypeT: _dtype](self, dtype: DTypeT, /) -> ndarray[tuple[()], DTypeT]: ... # @overload @@ -3685,25 +3687,25 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def __getitem__( self, key: EllipsisType | tuple[EllipsisType], / - ) -> ndarray[tuple[()], dtype[Self]]: ... + ) -> ndarray[tuple[()], _dtype[Self]]: ... @overload def __getitem__( self, key: None | tuple[None], / - ) -> ndarray[tuple[int], dtype[Self]]: ... + ) -> ndarray[tuple[int], _dtype[Self]]: ... @overload def __getitem__( self, key: tuple[None, None], / - ) -> ndarray[tuple[int, int], dtype[Self]]: ... + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... @overload def __getitem__( self, key: tuple[None, None, None], / - ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... @overload # Limited support for (None,) * N > 3 def __getitem__(self, key: tuple[None, ...], /) -> NDArray[Self]: ... # @overload - def __array_wrap__[ShapeT: _Shape, DTypeT: dtype]( + def __array_wrap__[ShapeT: _Shape, DTypeT: _dtype]( self, array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None, @@ -3713,13 +3715,13 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def __array_wrap__[ScalarT: generic]( self, - array: ndarray[tuple[()], dtype[ScalarT]], + array: ndarray[tuple[()], _dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, ) -> ScalarT: ... @overload - def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: dtype]( + def __array_wrap__[ShapeT: tuple[int, *tuple[int, ...]], DTypeT: _dtype]( self, array: ndarray[ShapeT, DTypeT], context: tuple[ufunc, tuple[object, ...], int] | None = None, @@ -3729,11 +3731,11 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @overload def __array_wrap__[ShapeT: _Shape, ScalarT: generic]( self, - array: ndarray[ShapeT, dtype[ScalarT]], + array: ndarray[ShapeT, _dtype[ScalarT]], context: tuple[ufunc, tuple[object, ...], int] | None = None, return_scalar: L[True] = True, /, - ) -> ScalarT | ndarray[ShapeT, dtype[ScalarT]]: ... + ) -> ScalarT | ndarray[ShapeT, _dtype[ScalarT]]: ... @property def base(self) -> None: ... @@ -3746,7 +3748,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @property def strides(self) -> tuple[()]: ... @property - def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + def flat(self) -> flatiter[ndarray[tuple[int], _dtype[Self]]]: ... @overload def item(self, /) -> _ItemT_co: ... @@ -3857,9 +3859,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = "raise", ) -> ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... - def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], _dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _dtype[Self]]: ... @overload # (()) def reshape( @@ -3878,7 +3880,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[ShapeT, dtype[Self]]: ... + ) -> ndarray[ShapeT, _dtype[Self]]: ... @overload # (Sequence[index, ...]) # not recommended def reshape( self, @@ -3896,7 +3898,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[tuple[int], dtype[Self]]: ... + ) -> ndarray[tuple[int], _dtype[Self]]: ... @overload # _(index, index) def reshape( self, @@ -3906,7 +3908,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[tuple[int, int], dtype[Self]]: ... + ) -> ndarray[tuple[int, int], _dtype[Self]]: ... @overload # _(index, index, index) def reshape( self, @@ -3917,7 +3919,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[tuple[int, int, int], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int], _dtype[Self]]: ... @overload # _(index, index, index, index) def reshape( self, @@ -3929,7 +3931,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[tuple[int, int, int, int], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int], _dtype[Self]]: ... @overload # _(index, index, index, index, index, *index) # ndim >= 5 def reshape( self, @@ -3942,7 +3944,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): *sizes6_: SupportsIndex, order: _OrderACF = "C", copy: py_bool | None = None, - ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], dtype[Self]]: ... + ) -> ndarray[tuple[int, int, int, int, int, *tuple[int, ...]], _dtype[Self]]: ... def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... @@ -3955,17 +3957,17 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True ) -> bool_: ... @overload def all[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, ) -> ScalarT: ... @overload def all[ScalarT: generic]( @@ -3973,9 +3975,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, ) -> ScalarT: ... @overload @@ -3986,17 +3988,17 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): out: None = None, keepdims: SupportsIndex = False, *, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True ) -> bool_: ... @overload def any[ScalarT: generic]( self, /, axis: L[0, -1] | tuple[()] | None, - out: ndarray[tuple[()], dtype[ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, *, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, ) -> ScalarT: ... @overload def any[ScalarT: generic]( @@ -4004,9 +4006,9 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): /, axis: L[0, -1] | tuple[()] | None = None, *, - out: ndarray[tuple[()], dtype[ScalarT]], + out: ndarray[tuple[()], _dtype[ScalarT]], keepdims: SupportsIndex = False, - where: py_bool | bool_ | ndarray[tuple[()], dtype[bool_]] = True, + where: py_bool | bool_ | ndarray[tuple[()], _dtype[bool_]] = True, ) -> ScalarT: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 167cc3f3a097..d06c38539306 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,3 +1,5 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import object as py_object from collections.abc import Callable # Using a private class is by no means ideal, but it is simply a consequence @@ -40,8 +42,8 @@ class _FormatDict(TypedDict, total=False): longcomplexfloat: Callable[[np.clongdouble], str] void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] - object: Callable[[object], str] - all: Callable[[object], str] + object: Callable[[py_object], str] + all: Callable[[py_object], str] int_kind: Callable[[np.integer], str] float_kind: Callable[[np.floating], str] complex_kind: Callable[[np.complexfloating], str] diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 920e23c85f1f..20a503ad2557 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,4 +1,6 @@ # ruff: noqa: ANN401 +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import str as py_str, type as py_type from typing import ( Any, Generic, @@ -610,7 +612,7 @@ class StringDType( # type: ignore[misc] @property def subdtype(self) -> None: ... @property - def type(self) -> type[str]: ... + def type(self) -> py_type[py_str]: ... @property def str(self) -> L["|T8", "|T16"]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index d5a01a35f372..86e3e9933784 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1603,9 +1603,9 @@ def _nanquantile_unchecked( def _nanquantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, axis: int | None = None, out=None, overwrite_input: bool = False, diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 4dd3bbfbb098..fb54cc7b8238 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1127,7 +1127,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): dtype: _DTypeLike[ScalarT], type: None = None, fill_value: _ScalarLike_co | None = None, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload # ([dtype: _, ]*, type: ArrayT) def view[ArrayT: np.ndarray]( self, @@ -1174,7 +1174,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self: _MaskedArray[np.void], indx: str, /) -> MaskedArray[_ShapeT_co]: ... @overload - def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[np.void]]: ... + def __getitem__(self: _MaskedArray[np.void], indx: list[str], /) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... @property def shape(self) -> _ShapeT_co: ... @@ -1183,11 +1183,11 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + def mask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @mask.setter def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property - def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + def recordmask(self) -> np.ndarray[_ShapeT_co, np.dtype[MaskType]] | MaskType: ... @recordmask.setter def recordmask(self, mask: Never, /) -> NoReturn: ... def harden_mask(self) -> Self: ... @@ -1270,7 +1270,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __add__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1322,7 +1322,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __radd__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1374,7 +1374,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __sub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1416,7 +1416,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __rsub__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1458,7 +1458,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __mul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1489,7 +1489,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __mul__( - self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1504,7 +1504,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __rmul__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1535,7 +1535,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... @overload def __rmul__( - self: MaskedArray[Any, dtype[np.character] | np.dtypes.StringDType], + self: MaskedArray[Any, np.dtype[np.character] | np.dtypes.StringDType], other: _ArrayLikeInt, /, ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... @@ -1612,7 +1612,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __floordiv__[ScalarT: _RealNumber](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1650,7 +1650,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __rfloordiv__[ScalarT: _RealNumber]( self: _MaskedArray[ScalarT], @@ -1690,7 +1690,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __pow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1726,7 +1726,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self: _MaskedArray[ScalarT], other: int | np.bool, /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... @overload def __rpow__[ScalarT: np.number](self: _MaskedArray[ScalarT], other: _ArrayLikeBool_co, /) -> _MaskedArray[ScalarT]: ... @overload @@ -1761,22 +1761,22 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def imag[ScalarT: np.generic]( # type: ignore[override] self: _HasDTypeWithRealAndImag[object, ScalarT], /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... def get_imag[ScalarT: np.generic]( self: _HasDTypeWithRealAndImag[object, ScalarT], /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... # @property # type: ignore[misc] def real[ScalarT: np.generic]( # type: ignore[override] self: _HasDTypeWithRealAndImag[ScalarT, object], /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... def get_real[ScalarT: np.generic]( self: _HasDTypeWithRealAndImag[ScalarT, object], /, - ) -> MaskedArray[_ShapeT_co, dtype[ScalarT]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[ScalarT]]: ... # keep in sync with `np.ma.count` @overload @@ -2129,9 +2129,9 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... @overload - def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... @overload - def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, np.dtype]: ... # keep in sync with `std` and `ma.core.var` @overload # type: ignore[override] @@ -2441,7 +2441,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: None, kind: _PartitionKind = "introselect", order: None = None, - ) -> MaskedArray[tuple[int], dtype[intp]]: ... + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... @overload # axis: index (default) def argpartition( self, @@ -2450,7 +2450,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: None = None, - ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... @overload # void, axis: None def argpartition( self: _MaskedArray[np.void], @@ -2459,7 +2459,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> MaskedArray[tuple[int], dtype[intp]]: ... + ) -> MaskedArray[tuple[int], np.dtype[intp]]: ... @overload # void, axis: index (default) def argpartition( self: _MaskedArray[np.void], @@ -2468,7 +2468,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, - ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + ) -> MaskedArray[_ShapeT_co, np.dtype[intp]]: ... # Keep in-sync with np.ma.take @overload # type: ignore[override] @@ -2557,20 +2557,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep in sync with `ndarray.tolist` @override @overload - def tolist[T](self: MaskedArray[tuple[Never], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + def tolist[T](self: MaskedArray[tuple[Never], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... @overload - def tolist[T](self: MaskedArray[tuple[()], dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... + def tolist[T](self: MaskedArray[tuple[()], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None) -> T: ... @overload def tolist[T](self: _Masked1D[np.generic[T]], /, fill_value: _ScalarLike_co | None = None) -> list[T]: ... @overload def tolist[T]( - self: MaskedArray[tuple[int, int], dtype[generic[T]]], + self: MaskedArray[tuple[int, int], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None, ) -> list[list[T]]: ... @overload def tolist[T]( - self: MaskedArray[tuple[int, int, int], dtype[generic[T]]], + self: MaskedArray[tuple[int, int, int], np.dtype[generic[T]]], /, fill_value: _ScalarLike_co | None = None, ) -> list[list[list[T]]]: ... diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index f3fb9bb7baf5..634aaf68912c 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,3 +1,5 @@ +# Aliases for builtins shadowed by classes to avoid annotations resolving to class members by ty +from builtins import bytes as py_bytes from collections.abc import Callable, MutableSequence from typing import Any, Literal, Self, overload @@ -47,7 +49,7 @@ class Generator: @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Self]: ... - def bytes(self, length: int) -> bytes: ... + def bytes(self, length: int) -> py_bytes: ... # continuous distributions From a639fbf5d2a4fb789967124d16f503f57bd5958b Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Sun, 1 Feb 2026 15:17:52 +0100 Subject: [PATCH 1354/1718] BUG: Fix some leaks found via LeakSanitizer (#30756) --- numpy/_core/src/umath/dispatching.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index cf2ce657b426..2998ad0465de 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -179,6 +179,7 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) PyObject *dtypes = PyArray_TupleFromItems( nargs, (PyObject **)bmeth->dtypes, 1); if (dtypes == NULL) { + Py_DECREF(bmeth); return -1; } PyObject *info = PyTuple_Pack(2, dtypes, bmeth->method); @@ -187,7 +188,9 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } @@ -1319,8 +1322,9 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (info == NULL) { return -1; } - - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } /* @@ -1394,5 +1398,7 @@ PyUFunc_AddPromoter( if (info == NULL) { return -1; } - return PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 0); + Py_DECREF(info); + return res; } From 096c19badc19cf59b6a53f7c9e202adf5502a5f7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 1 Feb 2026 09:37:33 -0700 Subject: [PATCH 1355/1718] MAINT: Update main after 2.4.2 release. - Forward port 2.4.2-changelog.rst - Forward port 2.4.2-notes.rst - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.4.2-changelog.rst | 35 +++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.4.2-notes.rst | 50 ++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 doc/changelog/2.4.2-changelog.rst create mode 100644 doc/source/release/2.4.2-notes.rst diff --git a/doc/changelog/2.4.2-changelog.rst b/doc/changelog/2.4.2-changelog.rst new file mode 100644 index 000000000000..06d50fa5e8f0 --- /dev/null +++ b/doc/changelog/2.4.2-changelog.rst @@ -0,0 +1,35 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + diff --git a/doc/source/release.rst b/doc/source/release.rst index 36e01cac6e1b..c0207894ac02 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.5.0 + 2.4.2 2.4.1 2.4.0 2.3.5 diff --git a/doc/source/release/2.4.2-notes.rst b/doc/source/release/2.4.2-notes.rst new file mode 100644 index 000000000000..3cb4fc3b1955 --- /dev/null +++ b/doc/source/release/2.4.2-notes.rst @@ -0,0 +1,50 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.2 Release Notes +========================= + +The NumPy 2.4.2 is a patch release that fixes bugs discovered after the +2.4.1 release. Highlights are: + +- Fixes memory leaks +- Updates OpenBLAS to fix hangs + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Tang + +* Joren Hammudoglu +* Kumar Aditya +* Matti Picus +* Nathan Goldbaum +* Ralf Gommers +* Sebastian Berg +* Vikram Kumar + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#30629 `__: MAINT: Prepare 2.4.x for further development +* `#30636 `__: TYP: ``arange``\ : accept datetime strings +* `#30657 `__: MAINT: avoid possible race condition by not touching ``os.environ``... +* `#30700 `__: BUG: validate contraction axes in tensordot (#30521) +* `#30701 `__: DOC: __array_namespace__info__: set_module not __module__ (#30679) +* `#30702 `__: BUG: fix free-threaded PyObject layout in replace_scalar_type_names... +* `#30703 `__: TST: fix limited API example in tests for latest Cython +* `#30709 `__: BUG: Fix some bugs found via valgrind (#30680) +* `#30712 `__: MAINT: replace ob_type access with Py_TYPE in PyArray_CheckExact +* `#30713 `__: BUG: Fixup the quantile promotion fixup +* `#30736 `__: BUG: fix thread safety of ``array_getbuffer`` (#30667) +* `#30737 `__: backport scipy-openblas version change + From 9f0b0bea55eb09f0c45e2544d9913d5b21a726a0 Mon Sep 17 00:00:00 2001 From: mayeut Date: Sun, 1 Feb 2026 10:57:42 +0100 Subject: [PATCH 1356/1718] BUG: Fix some leaks found in f2py via LeakSanitizer --- numpy/f2py/f90mod_rules.py | 2 +- numpy/f2py/src/fortranobject.c | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index d13a42a9d71f..e01ac04e9d64 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -237,7 +237,7 @@ def iadd(line, s=ihooks): % (F_FUNC, m['name'], m['name'].upper(), m['name'])) iadd('}\n') ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + ret['initf90modhooks'] = ['\t{ PyObject *tmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s); PyDict_SetItemString(d, "%s", tmp); Py_XDECREF(tmp); }' % ( m['name'], m['name'], m['name'])] + ret['initf90modhooks'] fadd('') fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index d6664d6bdfb7..1727f7ed0972 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -192,13 +192,24 @@ PyFortranObject_NewAsAttr(FortranDataDef *defs) } fp->len = 1; fp->defs = defs; + PyObject *name; if (defs->rank == -1) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + name = PyUnicode_FromFormat("function %s", defs->name); } else if (defs->rank == 0) { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + name = PyUnicode_FromFormat("scalar %s", defs->name); } else { - PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + name = PyUnicode_FromFormat("array %s", defs->name); } + if (name == NULL) { + Py_DECREF(fp); + return NULL; + } + if (PyDict_SetItemString(fp->dict, "__name__", name) < 0) { + Py_DECREF(name); + Py_DECREF(fp); + return NULL; + } + Py_DECREF(name); return (PyObject *)fp; } From 947e496df4e7fc552c6f73f88ffa780bd5a797a0 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Sun, 1 Feb 2026 21:08:10 +0100 Subject: [PATCH 1357/1718] DOC: Cleanup type documentation (#30758) Co-authored-by: Joren Hammudoglu --- doc/source/reference/arrays.dtypes.rst | 38 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index 262c22655c76..2a5b8ce50fc5 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -258,21 +258,29 @@ Array-protocol type strings (see :ref:`arrays.interface`) which represents boolean. The item size must correspond to an existing type, or an error will be raised. The supported kinds are - ================= ======================== - ``'?'``, ``'b1'`` boolean - ``'b'`` (signed) byte - ``'B'`` unsigned byte - ``'i'`` (signed) integer - ``'u'`` unsigned integer - ``'f'`` floating-point - ``'c'`` complex-floating point - ``'m'`` timedelta - ``'M'`` datetime - ``'O'`` (Python) objects - ``'S'``, ``'a'`` zero-terminated bytes (not recommended) - ``'U'`` Unicode string - ``'V'`` raw data (:class:`void`) - ================= ======================== + ================== ======================== + ``'?'`` boolean + ``'b'`` (signed) byte + ``'B'`` unsigned byte + ``'h'`` (signed) short + ``'H'`` unsigned short + ``'i'`` (signed) integer + ``'I'`` unsigned integer + ``'l'`` (signed) long integer + ``'L'`` unsigned long integer + ``'q'`` (signed) long long integer + ``'Q'`` unsigned long long integer + ``'f'`` single precision + ``'F'`` complex single precision + ``'d'`` double precision + ``'D'`` complex double precision + ``'g'`` long precision + ``'G'`` complex long double precision + ``'O'`` (Python) objects + ``'S'`` zero-terminated bytes (not recommended) + ``'U'`` Unicode string + ``'V'`` raw data (:class:`void`) + ================== ======================== .. admonition:: Example From 67b74f78faecb9f7da700b588c83808cb899e151 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 2 Feb 2026 08:40:41 +1100 Subject: [PATCH 1358/1718] MAINT: update scipy-openblas to one with risc-V --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 6f396a0d6a06..da8c8141917f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.31.22.0 +scipy-openblas32==0.3.31.22.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 0b57a13dff06..6a63af65d96c 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.31.22.0 -scipy-openblas64==0.3.31.22.0 +scipy-openblas32==0.3.31.22.1 +scipy-openblas64==0.3.31.22.1 From 9bb5f7fa3da72a48e1759eb3ad368aad5c216fdc Mon Sep 17 00:00:00 2001 From: mayeut Date: Mon, 2 Feb 2026 11:13:06 +0100 Subject: [PATCH 1359/1718] better formatting --- numpy/f2py/f90mod_rules.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index e01ac04e9d64..5cd7637a95c2 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -237,8 +237,14 @@ def iadd(line, s=ihooks): % (F_FUNC, m['name'], m['name'].upper(), m['name'])) iadd('}\n') ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks - ret['initf90modhooks'] = ['\t{ PyObject *tmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s); PyDict_SetItemString(d, "%s", tmp); Py_XDECREF(tmp); }' % ( - m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + ret['initf90modhooks'] = [ + '\t{', + '\t\tPyObject *tmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' + % (m['name'], m['name']), + '\t\tPyDict_SetItemString(d, "%s", tmp);' % (m['name'],), + '\t\tPy_XDECREF(tmp);', + '\t}', + ] + ret["initf90modhooks"] fadd('') fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") if mfargs: From 78c8d77a93e81628766e050e03ec11f3133f7837 Mon Sep 17 00:00:00 2001 From: Sabaa Siddique <103187876+sabasiddique1@users.noreply.github.com> Date: Mon, 2 Feb 2026 20:53:24 +0500 Subject: [PATCH 1360/1718] DOC: Fix `f2py` Project layout code block formatting (#30769) --- doc/source/f2py/usage.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index ec936bb72e1c..a1fd38d57b9d 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -279,13 +279,13 @@ This example shows how to build the ``add`` extension from the ``add.f`` and ``a files described in the :ref:`f2py-examples` (note that you do not always need a ``.pyf`` file: in many cases ``f2py`` can figure out the annotations by itself). -Project layout: +Project layout:: - f2py_examples/ - meson.build - add.f - add.pyf (optional) - __init__.py (can be empty) + f2py_examples/ + meson.build + add.f + add.pyf (optional) + __init__.py (can be empty) Example ``meson.build``: From 691fabf157335274cbc318ee51de2db182f76543 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 2 Feb 2026 13:42:19 -0700 Subject: [PATCH 1361/1718] MAINT: silence unused variable warning --- numpy/_core/src/multiarray/arrayobject.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index a47abc372c98..9a536227c3be 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -466,6 +466,8 @@ array_dealloc(PyArrayObject *self) { // NPY_TRUE flags that errors are unraisable. int ret = _clear_array_attributes(self, NPY_TRUE); + // silence unused variable warning in release builds + (void)ret; assert(ret == 0); // should always succeed if unraisable. // Only done on actual deallocation, nothing allocated by numpy. if (((PyArrayObject_fields *)self)->weakreflist != NULL) { From 59f95a335c53a75cd02779bbc710333d3555b5d0 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 3 Feb 2026 02:27:40 +0200 Subject: [PATCH 1362/1718] MAINT: drop pypy (#30764) --- .github/workflows/linux.yml | 2 +- .spin/cmds.py | 7 +- numpy/_core/_add_newdocs.py | 3 - numpy/_core/_internal.py | 10 +- numpy/_core/_internal.pyi | 2 - numpy/_core/arrayprint.py | 2 +- numpy/_core/include/numpy/ndarrayobject.h | 9 -- numpy/_core/src/common/gil_utils.c | 4 - numpy/_core/src/common/ufunc_override.c | 2 +- .../src/multiarray/_multiarray_tests.c.src | 4 +- numpy/_core/src/multiarray/alloc.cpp | 3 - numpy/_core/src/multiarray/array_coercion.c | 2 +- numpy/_core/src/multiarray/dtypemeta.c | 2 +- numpy/_core/src/multiarray/methods.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 - numpy/_core/src/multiarray/sequence.c | 1 - numpy/_core/src/multiarray/shape.c | 7 - numpy/_core/src/multiarray/temp_elide.c | 6 +- numpy/_core/src/umath/override.c | 2 +- numpy/_core/tests/test_dlpack.py | 4 +- numpy/_core/tests/test_dtype.py | 2 - numpy/_core/tests/test_function_base.py | 2 - numpy/_core/tests/test_indexing.py | 2 - numpy/_core/tests/test_limited_api.py | 3 +- numpy/_core/tests/test_memmap.py | 11 +- numpy/_core/tests/test_multiarray.py | 127 +++++++----------- numpy/_core/tests/test_nditer.py | 4 - numpy/_core/tests/test_numeric.py | 3 - numpy/_core/tests/test_numerictypes.py | 12 +- numpy/_core/tests/test_regression.py | 23 +--- numpy/_core/tests/test_scalar_methods.py | 3 +- numpy/_core/tests/test_scalarmath.py | 27 ++-- numpy/_core/tests/test_shape_base.py | 5 - numpy/_core/tests/test_stringdtype.py | 7 +- numpy/_core/tests/test_ufunc.py | 3 - numpy/_core/tests/test_umath.py | 7 +- numpy/_pytesttester.py | 7 +- numpy/conftest.py | 2 - numpy/f2py/cb_rules.py | 22 +-- numpy/f2py/cfuncs.py | 2 +- numpy/f2py/tests/test_block_docstring.py | 4 - numpy/f2py/tests/test_callback.py | 3 - numpy/f2py/tests/test_mixed.py | 4 - numpy/f2py/tests/test_modules.py | 3 - numpy/lib/tests/test_format.py | 2 - numpy/lib/tests/test_io.py | 22 +-- numpy/lib/tests/test_loadtxt.py | 6 +- numpy/random/tests/test_regression.py | 3 +- numpy/typing/tests/data/reveal/testing.pyi | 1 - pyproject.toml | 2 +- 50 files changed, 90 insertions(+), 310 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index fd8e7f8e40bc..0428389922c4 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -1,7 +1,7 @@ name: Linux tests # This file is meant for testing across supported Python versions, build types -# and interpreters (PyPy, python-dbg, a pre-release Python in summer time), +# and interpreters (python-dbg, a pre-release Python in summer time), # build-via-sdist, run benchmarks, measure code coverage, and other build # options. diff --git a/.spin/cmds.py b/.spin/cmds.py index 1f5b0d78611d..609b32485e19 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -9,8 +9,6 @@ import spin from spin.cmds import meson -IS_PYPY = (sys.implementation.name == 'pypy') - # Check that the meson git submodule is present curdir = pathlib.Path(__file__).parent meson_import_dir = curdir.parent / 'vendored-meson' / 'meson' / 'mesonbuild' @@ -129,10 +127,7 @@ def docs(*, parent_callback, **kwargs): jobs_param = next(p for p in docs.params if p.name == 'jobs') jobs_param.default = 1 -if IS_PYPY: - default = "not slow and not slow_pypy" -else: - default = "not slow" +default = "not slow" @click.option( "-m", diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 9a0534cdf2fc..30ed3c11ac73 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4174,9 +4174,6 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: ValueError If `a` does not own its own data or references or views to it exist, and the data memory must be changed. - PyPy only: will always raise if the data memory must be changed, since - there is no reliable way to determine if references or views to it - exist. SystemError If the `order` keyword argument is specified. This behaviour is a diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 7c64daf30dbd..ce796d5ee6a7 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -20,8 +20,6 @@ except ImportError: ctypes = None -IS_PYPY = sys.implementation.name == 'pypy' - if sys.byteorder == 'little': _nbo = '<' else: @@ -949,12 +947,8 @@ def npy_ctypes_check(cls): try: # ctypes class are new-style, so have an __mro__. This probably fails # for ctypes classes with multiple inheritance. - if IS_PYPY: - # (..., _ctypes.basics._CData, Bufferable, object) - ctype_base = cls.__mro__[-3] - else: - # # (..., _ctypes._CData, object) - ctype_base = cls.__mro__[-2] + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] # right now, they're part of the _ctypes module return '_ctypes' in ctype_base.__module__ except Exception: diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 179e077629b6..777bcd5561b2 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -12,8 +12,6 @@ _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) ### -IS_PYPY: Final[bool] = ... - format_re: Final[re.Pattern[str]] = ... sep_re: Final[re.Pattern[str]] = ... space_re: Final[re.Pattern[str]] = ... diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 96c17285bb3d..4fc9c95ce5e8 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -964,7 +964,7 @@ def recurser(index, hanging_indent, curr_width): finally: # recursive closures have a cyclic reference to themselves, which # requires gc to collect (gh-10620). To avoid this problem, for - # performance and PyPy friendliness, we break the cycle: + # performance, we break the cycle: recurser = None def _none_or_positive_arg(x, name): diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index 6bfc40fa6e2f..82a1589ff075 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -220,15 +220,6 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) if (key == title) { return 1; } -#ifdef PYPY_VERSION - /* - * On PyPy, dictionary keys do not always preserve object identity. - * Fall back to comparison by value. - */ - if (PyUnicode_Check(title) && PyUnicode_Check(key)) { - return PyUnicode_Compare(title, key) == 0 ? 1 : 0; - } -#endif return 0; } diff --git a/numpy/_core/src/common/gil_utils.c b/numpy/_core/src/common/gil_utils.c index 95af26a2bf8e..c87cbe2d64ae 100644 --- a/numpy/_core/src/common/gil_utils.c +++ b/numpy/_core/src/common/gil_utils.c @@ -16,9 +16,6 @@ npy_gil_error(PyObject *type, const char *format, ...) NPY_ALLOW_C_API_DEF; NPY_ALLOW_C_API; if (!PyErr_Occurred()) { -#if !defined(PYPY_VERSION) - PyErr_FormatV(type, format, va); -#else PyObject *exc_str = PyUnicode_FromFormatV(format, va); if (exc_str == NULL) { // no reason to have special handling for this error case, since @@ -29,7 +26,6 @@ npy_gil_error(PyObject *type, const char *format, ...) } PyErr_SetObject(type, exc_str); Py_DECREF(exc_str); -#endif } NPY_DISABLE_C_API; va_end(va); diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 0bcbea5baa30..1ed7165a4e83 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -105,7 +105,7 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * if (PyTuple_CheckExact(*out_kwd_obj)) { /* * The C-API recommends calling PySequence_Fast before any of the other - * PySequence_Fast* functions. This is required for PyPy + * PySequence_Fast* functions. */ PyObject *seq; seq = PySequence_Fast(*out_kwd_obj, // noqa: borrowed-ref OK diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 9aaf0013958a..f79ff9486fe4 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -676,7 +676,7 @@ npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) return array; } -/* used to test WRITEBACKIFCOPY without resolution emits runtime warning */ +/* used to test WRITEBACKIFCOPY without resolution, emits runtime warning */ static PyObject* npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) { @@ -690,7 +690,7 @@ npy_abuse_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); if (array == NULL) return NULL; - Py_DECREF(array); /* calls array_dealloc even on PyPy */ + Py_DECREF(array); /* calls array_dealloc */ Py_RETURN_NONE; } diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.cpp index e2b632616a6f..958d9a309db7 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -176,9 +176,6 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #endif p = alloc(nelem * esz); if (p) { -#ifdef _PyPyGC_AddMemoryPressure - _PyPyPyGC_AddMemoryPressure(nelem * esz); -#endif indicate_hugepages(p, nelem * esz); } return p; diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 3c5373e7f28e..24982a4fdc1e 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1138,7 +1138,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( force_sequence_due_to_char_dtype: - /* Ensure we have a sequence (required for PyPy) */ + /* Ensure we have a sequence */ seq = PySequence_Fast(obj, "Could not convert object to sequence"); // noqa: borrowed-ref - manual fix needed if (seq == NULL) { /* diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index bada1addd9cc..c8fb6a1c8490 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1142,7 +1142,7 @@ dtypemeta_wrap_legacy_descriptor( * a prototype instances for everything except our own fields which * vary between the DTypes. * In particular any Object initialization must be strictly copied from - * the untouched prototype to avoid complexities (e.g. with PyPy). + * the untouched prototype to avoid complexities. * Any Type slots need to be fixed before PyType_Ready, although most * will be inherited automatically there. */ diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 419a3c1bd2d0..5333ea7b7538 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1020,7 +1020,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) return -1; } for (i = 0; i < nin; ++i) { -#if defined(PYPY_VERSION) || defined(Py_LIMITED_API) +#if defined(Py_LIMITED_API) PyObject *obj = PyTuple_GetItem(args, i); if (obj == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 668da32add18..2f381a1c7aa6 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4470,7 +4470,6 @@ _blas_supports_fpe(PyObject *NPY_UNUSED(self), PyObject *arg) { static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { -#if !defined(PYPY_VERSION) if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { if (PyErr_WarnEx(PyExc_UserWarning, "NumPy was imported from a Python sub-interpreter but " @@ -4488,7 +4487,6 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { npy_global_state.reload_guard_initialized = 1; Py_RETURN_NONE; } -#endif if (npy_global_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " diff --git a/numpy/_core/src/multiarray/sequence.c b/numpy/_core/src/multiarray/sequence.c index 4c94bb798072..ce2e2059e218 100644 --- a/numpy/_core/src/multiarray/sequence.c +++ b/numpy/_core/src/multiarray/sequence.c @@ -56,7 +56,6 @@ array_concat(PyObject *self, PyObject *other) { /* * Throw a type error, when trying to concat NDArrays - * NOTE: This error is not Thrown when running with PyPy */ PyErr_SetString(PyExc_TypeError, "Concatenation operation is not implemented for NumPy arrays, " diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index fce61ef36e63..a34af9f9f12b 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -86,12 +86,6 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) return -1; } if (refcheck) { -#ifdef PYPY_VERSION - PyErr_SetString(PyExc_ValueError, - "cannot resize an array with refcheck=True on PyPy.\n" - "Use the np.resize function or refcheck=False"); - return -1; -#else #if PY_VERSION_HEX >= 0x030E00B0 if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { #else @@ -105,7 +99,6 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) "Use the np.resize function or refcheck=False"); return -1; } -#endif /* PYPY_VERSION */ } /* Reallocate space if needed - allocating 0 is forbidden */ PyObject *handler = PyArray_HANDLER(self); diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index ea6cac08f78b..6a26ee1f7485 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -59,7 +59,7 @@ * supported too by using the appropriate Windows APIs. */ -#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H && ! defined PYPY_VERSION +#if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H #include @@ -113,9 +113,9 @@ find_addr(void * addresses[], npy_intp naddr, void * addr) static int check_unique_temporary(PyObject *lhs) { -#if PY_VERSION_HEX == 0x030E00A7 && !defined(PYPY_VERSION) +#if PY_VERSION_HEX == 0x030E00A7 #error "NumPy is broken on CPython 3.14.0a7, please update to a newer version" -#elif PY_VERSION_HEX >= 0x030E00B1 && !defined(PYPY_VERSION) +#elif PY_VERSION_HEX >= 0x030E00B1 // Python 3.14 changed the semantics for reference counting temporaries // see https://github.com/python/cpython/issues/133164 return PyUnstable_Object_IsUniqueReferencedTemporary(lhs); diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index 139d9c7bdbbd..421359eb6203 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -342,7 +342,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, } /* * Set override arguments for each call since the tuple must - * not be mutated after use in PyPy + * not be mutated after use * We increase all references since SET_ITEM steals * them and they will be DECREF'd when the tuple is deleted. */ diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index e8198ac1823e..239f34559cef 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -3,7 +3,7 @@ import pytest import numpy as np -from numpy.testing import IS_PYPY, assert_array_equal +from numpy.testing import assert_array_equal def new_and_old_dlpack(): @@ -18,7 +18,6 @@ def __dlpack__(self, stream=None): class TestDLPack: - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) @@ -54,7 +53,6 @@ def test_strides_not_multiple_of_itemsize(self): with pytest.raises(BufferError): np.from_dlpack(z) - @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") @pytest.mark.parametrize("arr", new_and_old_dlpack()) def test_from_dlpack_refcount(self, arr): arr = arr.copy() diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index f1503592cb8e..0aa7a95d8835 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -19,7 +19,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -1980,7 +1979,6 @@ def test_creating_dtype_with_dtype_class_errors(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestDTypeSignatures: def test_signature_dtype(self): sig = inspect.signature(np.dtype) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index c6e10397b3ff..b78c79a6f032 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -21,7 +21,6 @@ from numpy._core import sctypes from numpy._core.function_base import add_newdoc from numpy.testing import ( - IS_PYPY, assert_, assert_allclose, assert_array_equal, @@ -487,7 +486,6 @@ def test_any_step_zero_and_not_mult_inplace(self): class TestAdd_newdoc: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_doc(self): # test that np.add_newdoc did attach a docstring successfully: tgt = "Current flat index into the array." diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 65d42d6c9370..7a8cd42c59aa 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -12,7 +12,6 @@ from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -1677,7 +1676,6 @@ def test_nonempty_string_flat_index_on_flatiter(self): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("methodname", ["__array__", "copy"]) def test_flatiter_method_signatures(methodname: str): method = getattr(np.flatiter, methodname) diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 984210e53af7..30ed3023cc92 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -5,7 +5,7 @@ import pytest -from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD +from numpy.testing import IS_EDITABLE, IS_WASM, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -91,7 +91,6 @@ def install_temp(tmpdir_factory): NOGIL_BUILD, reason="Py_GIL_DISABLED builds do not currently support the limited API", ) -@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") def test_limited_api(install_temp): """Test building a third-party C extension with the limited API and building a cython extension with the limited API diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 40222b62020a..8df78da067eb 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -21,13 +21,7 @@ subtract, sum, ) -from numpy.testing import ( - IS_PYPY, - assert_, - assert_array_equal, - assert_equal, - break_cycles, -) +from numpy.testing import assert_, assert_array_equal, assert_equal @pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") @@ -41,9 +35,6 @@ def setup_method(self): def teardown_method(self): self.tmpfp.close() self.data = None - if IS_PYPY: - break_cycles() - break_cycles() def test_roundtrip(self): # Write data to file diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index b393a21cd839..1b76c7100b7f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -37,7 +37,6 @@ BLAS_SUPPORTS_FPE, HAS_REFCOUNT, IS_64BIT, - IS_PYPY, IS_WASM, assert_, assert_allclose, @@ -180,7 +179,6 @@ def test_writeable_from_buffer(self): vals.setflags(write=True) assert_(vals.flags.writeable) - @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") def test_writeable_pickle(self): import pickle # Small arrays will be copied without setting base. @@ -599,7 +597,6 @@ def test_array_as_keyword(self, func): func(a=3) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("func", [np.array, np.asarray, @@ -4023,7 +4020,6 @@ def test_inplace(self): # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc - @pytest.mark.xfail(IS_PYPY, reason="Bug in pypy, #24862") def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) @@ -4507,8 +4503,6 @@ def test_intp_sequence_converters(self, converter): @pytest.mark.parametrize("converter", [_multiarray_tests.run_scalar_intp_converter, _multiarray_tests.run_scalar_intp_from_sequence]) - @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), - reason="PyPy bug in error formatting") def test_intp_sequence_converters_errors(self, converter): with pytest.raises(TypeError, match="expected a sequence of integers or a single integer, "): @@ -6253,9 +6247,6 @@ def test_array_base(self, obj): def test_empty(self): assert_array_equal(np.frombuffer(b''), np.array([])) - @pytest.mark.skipif(IS_PYPY, - reason="PyPy's memoryview currently does not track exports. See: " - "https://foss.heptapod.net/pypy/pypy/-/issues/3724") def test_mmap_close(self): # The old buffer protocol was not safe for some things that the new # one is. But `frombuffer` always used the old one for a long time. @@ -6361,10 +6352,7 @@ class TestResize: @_no_tracing def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - if IS_PYPY: - x.resize((5, 5), refcheck=False) - else: - x.resize((5, 5)) + x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) @@ -6384,10 +6372,7 @@ def test_check_reference_2(self): @_no_tracing def test_int_shape(self): x = np.eye(3) - if IS_PYPY: - x.resize(3, refcheck=False) - else: - x.resize(3) + x.resize(3) assert_array_equal(x, np.eye(3)[0, :]) def test_none_shape(self): @@ -6418,19 +6403,13 @@ def test_invalid_arguments(self): @_no_tracing def test_freeform_shape(self): x = np.eye(3) - if IS_PYPY: - x.resize(3, 2, 1, refcheck=False) - else: - x.resize(3, 2, 1) + x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) @_no_tracing def test_zeros_appended(self): x = np.eye(3) - if IS_PYPY: - x.resize(2, 3, 3, refcheck=False) - else: - x.resize(2, 3, 3) + x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) @@ -6438,10 +6417,7 @@ def test_zeros_appended(self): def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) - if IS_PYPY: - a.resize(15, refcheck=False) - else: - a.resize(15,) + a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) @@ -9148,8 +9124,7 @@ def test_order_mismatch(self, arr, order1, order2): for copy in self.if_needed_vals + self.false_vals: res = np.array(view, copy=copy, order=order2) # res.base.obj refers to the memoryview - if not IS_PYPY: - assert res is arr or res.base.obj is arr + assert res is arr or res.base.obj is arr else: for copy in self.if_needed_vals: res = np.array(arr, copy=copy, order=order2) @@ -9681,59 +9656,57 @@ def test_kwargs(self): np.where(a, x=a, y=a) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: +class TestSizeOf: - def test_empty_array(self): - x = np.array([]) - assert_(sys.getsizeof(x) > 0) - - def check_array(self, dtype): - elem_size = dtype(0).itemsize + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) - for length in [10, 50, 100, 500]: - x = np.arange(length, dtype=dtype) - assert_(sys.getsizeof(x) > length * elem_size) + def check_array(self, dtype): + elem_size = dtype(0).itemsize - def test_array_int32(self): - self.check_array(np.int32) + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) - def test_array_int64(self): - self.check_array(np.int64) + def test_array_int32(self): + self.check_array(np.int32) - def test_array_float32(self): - self.check_array(np.float32) + def test_array_int64(self): + self.check_array(np.int64) - def test_array_float64(self): - self.check_array(np.float64) + def test_array_float32(self): + self.check_array(np.float32) - def test_view(self): - d = np.ones(100) - assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + def test_array_float64(self): + self.check_array(np.float64) - def test_reshape(self): - d = np.ones(100) - assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) - @_no_tracing - def test_resize(self): - d = np.ones(100) - old = sys.getsizeof(d) - d.resize(50) - assert_(old > sys.getsizeof(d)) - d.resize(150) - assert_(old < sys.getsizeof(d)) + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) - @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) - def test_resize_structured(self, dtype): - a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) - a.resize(1000) - assert_array_equal(a, np.zeros(1000, dtype=dtype)) + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) - def test_error(self): - d = np.ones(100) - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") class TestHashing: @@ -9981,11 +9954,6 @@ def test_ctypes_data_as_holds_reference(self, arr): # but when the `ctypes_ptr` object dies, so should `arr` del ctypes_ptr - if IS_PYPY: - # Pypy does not recycle arr objects immediately. Trigger gc to - # release arr. Cpython uses refcounts. An explicit call to gc - # should not be needed here. - break_cycles() assert_( arr_ref() is None, "unknowable whether ctypes pointer holds a reference", @@ -10006,8 +9974,6 @@ def test_ctypes_as_parameter_holds_reference(self): # but when the `ctypes_ptr` object dies, so should `arr` del ctypes_ptr - if IS_PYPY: - break_cycles() assert_( arr_ref() is None, "unknowable whether ctypes pointer holds a reference", @@ -11024,7 +10990,6 @@ def test_array_dunder_array_preserves_dtype_on_none(dtype): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestTextSignatures: @pytest.mark.parametrize( "methodname", diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 775086353501..520e638b2edb 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -13,7 +13,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -2206,7 +2205,6 @@ def test_buffered_cast_error_paths(): buf[...] = "a" # cannot be converted to int. @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") -@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") def test_buffered_cast_error_paths_unraisable(): # The following gives an unraisable error. Pytest sometimes captures that # (depending python and/or pytest version). So with Python>=3.8 this can @@ -3640,7 +3638,6 @@ def test_debug_print(capfd): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_signature_constructor(): sig = inspect.signature(np.nditer) @@ -3651,7 +3648,6 @@ def test_signature_constructor(): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "method", [fn for name, fn in vars(np.nditer).items() if callable(fn) and name[0] != "_"], diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 45e238b27901..d32052bda176 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -19,7 +19,6 @@ from numpy.random import rand, randint, randn from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_almost_equal, @@ -3385,7 +3384,6 @@ def test_for_reference_leak(self): assert_(sys.getrefcount(dim) == beg) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) def test_signatures(self, func): sig = inspect.signature(func) @@ -4182,7 +4180,6 @@ def test_shape_mismatch_error_message(self): np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") def test_signatures(self): sig_new = inspect.signature(np.broadcast) assert len(sig_new.parameters) == 1 diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index c47d86c39135..3fd26c32a500 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -6,13 +6,7 @@ import numpy as np import numpy._core.numerictypes as nt from numpy._core.numerictypes import issctype, sctype2char, sctypes -from numpy.testing import ( - IS_PYPY, - assert_, - assert_equal, - assert_raises, - assert_raises_regex, -) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex # This is the structure of the table used for plain objects: # @@ -547,10 +541,6 @@ def test_issctype(rep, expected): sys.flags.optimize > 1, reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1", ) -@pytest.mark.xfail( - IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready", -) class TestDocStrings: def test_platform_dependent_aliases(self): if np.int64 is np.int_: diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 32d61f8de935..24ce9330005d 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -17,7 +17,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_64BIT, - IS_PYPY, IS_WASM, _assert_valid_refcount, assert_, @@ -1294,15 +1293,9 @@ def test_blasdot_uninitialized_memory(self): for k in range(3): # Try to ensure that x->data contains non-zero floats x = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - x.resize((m, 0), refcheck=False) - else: - x.resize((m, 0)) + x.resize((m, 0)) y = np.array([123456789e199], dtype=np.float64) - if IS_PYPY: - y.resize((0, n), refcheck=False) - else: - y.resize((0, n)) + y.resize((0, n)) # `dot` should just return zero (m, n) matrix z = np.dot(x, y) @@ -2008,7 +2001,6 @@ def test_assign_obj_listoflists(self): a[...] = [[1, 2]] assert_equal(a, [[1, 2], [1, 2]]) - @pytest.mark.slow_pypy def test_memoryleak(self): # Ticket #1917 - ensure that array data doesn't leak for i in range(1000): @@ -2354,16 +2346,6 @@ def test_void_getitem(self): assert_(np.array([b'abc'], 'V3').astype('O') == b'abc') assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd') - def test_structarray_title(self): - # The following used to segfault on pypy, due to NPY_TITLE_KEY - # not working properly and resulting to double-decref of the - # structured array field items: - # See: https://bitbucket.org/pypy/pypy/issues/2789 - for j in range(5): - structure = np.array([1], dtype=[(('x', 'X'), np.object_)]) - structure[0]['x'] = np.array([2]) - gc.collect() - def test_dtype_scalar_squeeze(self): # gh-11384 values = { @@ -2550,7 +2532,6 @@ def test_nonbool_logical(self): expected = np.ones(size, dtype=np.bool) assert_array_equal(np.logical_and(a, b), expected) - @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") def test_gh_23737(self): with pytest.raises(TypeError, match="not an acceptable base type"): class Y(np.flexible): diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index b993a8f3df29..3cb00dc6ab64 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -12,7 +12,7 @@ import numpy as np from numpy._core import sctypes -from numpy.testing import IS_PYPY, assert_equal, assert_raises +from numpy.testing import assert_equal, assert_raises class TestAsIntegerRatio: @@ -257,7 +257,6 @@ def test_array_wrap(scalar): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") class TestSignature: # test that scalar types have a valid __text_signature__ or __signature__ set @pytest.mark.parametrize( diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index c14d0b22af9d..04c78b065f25 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -15,7 +15,6 @@ from numpy._utils import _pep440 from numpy.exceptions import ComplexWarning from numpy.testing import ( - IS_PYPY, _gen_alignment_data, assert_, assert_almost_equal, @@ -520,14 +519,6 @@ def test_int_from_infinite_longdouble(self): x = np.clongdouble(np.inf) assert_raises(OverflowError, int, x) - @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") - def test_int_from_infinite_longdouble___int__(self): - x = np.longdouble(np.inf) - assert_raises(OverflowError, x.__int__) - with pytest.warns(ComplexWarning): - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) - @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") @pytest.mark.skipif(platform.machine().startswith("ppc"), @@ -652,18 +643,16 @@ def test_float_repr(self): self._test_type_repr(t) -if not IS_PYPY: - # sys.getsizeof() is not valid on PyPy - class TestSizeOf: +class TestSizeOf: - def test_equal_nbytes(self): - for type in types: - x = type(0) - assert_(sys.getsizeof(x) > x.nbytes) + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) - def test_error(self): - d = np.float32() - assert_raises(TypeError, d.__sizeof__, "a") + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") class TestMultiply: diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 5be3d05bbf11..e8a842ba5589 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -24,7 +24,6 @@ ) from numpy.exceptions import AxisError from numpy.testing import ( - IS_PYPY, assert_, assert_array_equal, assert_equal, @@ -389,10 +388,6 @@ def test_concatenate_same_value(self): with pytest.raises(ValueError, match="^casting must be one of"): concatenate([r4, r4], casting="same_value") - @pytest.mark.skipif( - IS_PYPY, - reason="PYPY handles sq_concat, nb_add differently than cpython" - ) def test_operator_concat(self): import operator a = array([1, 2]) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index e8bc7c886d4e..281aaf41893e 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -11,7 +11,7 @@ import numpy as np from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType -from numpy.testing import IS_PYPY, assert_array_equal +from numpy.testing import assert_array_equal def random_unicode_string_list(): @@ -596,10 +596,7 @@ def test_concatenate(string_list): def test_resize_method(string_list): sarr = np.array(string_list, dtype="T") - if IS_PYPY: - sarr.resize(len(string_list) + 3, refcheck=False) - else: - sarr.resize(len(string_list) + 3) + sarr.resize(len(string_list) + 3) assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index c93873e96610..d8ca3f2364f4 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -17,7 +17,6 @@ from numpy.exceptions import AxisError from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_allclose, @@ -215,7 +214,6 @@ def test_pickle_withstring(self): b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") assert_(pickle.loads(astring) is np.cos) - @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") def test_pickle_name_is_qualname(self): # This tests that a simplification of our ufunc pickle code will # lead to allowing qualnames as names. Future ufuncs should @@ -2978,7 +2976,6 @@ def test_ufunc_input_floatingpoint_error(bad_offset): @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") -@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "methodname", ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index fa7622cca482..8c5af69af9a7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -17,7 +17,6 @@ from numpy.testing import ( HAS_REFCOUNT, IS_MUSL, - IS_PYPY, IS_WASM, _gen_alignment_data, assert_, @@ -4189,13 +4188,10 @@ def test_array_ufunc_direct_call(self): assert_array_equal(res, a + a) @pytest.mark.thread_unsafe(reason="modifies global module") - @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" - expected_dict = ( - {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} - ) + expected_dict = {"__module__": "numpy", "__qualname__": "add"} expected_dict["__signature__"] = inspect.signature(np.add) np.add.__doc__ = new_doc @@ -5104,7 +5100,6 @@ def test_bad_legacy_gufunc_silent_errors(x1): class TestAddDocstring: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") def test_add_same_docstring(self): # test for attributes (which are C-level defined) ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 25f5300a74ac..55583561a19d 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -162,12 +162,7 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, pytest_args += ["--cov=" + module_path] if label == "fast": - # not importing at the top level to avoid circular import of module - from numpy.testing import IS_PYPY - if IS_PYPY: - pytest_args += ["-m", "not slow and not slow_pypy"] - else: - pytest_args += ["-m", "not slow"] + pytest_args += ["-m", "not slow"] elif label != "full": pytest_args += ["-m", label] diff --git a/numpy/conftest.py b/numpy/conftest.py index 7880e0cf1ac3..30625d90c69c 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -66,8 +66,6 @@ def pytest_configure(config): "leaks_references: Tests that are known to leak references.") config.addinivalue_line("markers", "slow: Tests that are very slow.") - config.addinivalue_line("markers", - "slow_pypy: Tests that are very slow on pypy.") if not PARALLEL_RUN_AVALIABLE: config.addinivalue_line("markers", "parallel_threads(n): run the given test function in parallel " diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 238d473113e0..dcc75ec6f969 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -143,31 +143,15 @@ goto capi_fail; } #setdims# -#ifdef PYPY_VERSION -#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List((PyObject *)capi_arglist); - if (capi_arglist_list == NULL) goto capi_fail; -#else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) -#endif #pyobjfrom# #undef CAPI_ARGLIST_SETITEM -#ifdef PYPY_VERSION - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); -#else - CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); -#endif - CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_call_clock(); #endif -#ifdef PYPY_VERSION - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); - Py_DECREF(capi_arglist_list); - capi_arglist_list = NULL; -#else - capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); -#endif +capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_call_clock(); #endif diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index b2b1cad3d867..f48617f8e878 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1346,7 +1346,7 @@ def errmess(s: str) -> None: Py_INCREF(tmp_fun); tot = maxnofargs; if (PyCFunction_Check(fun)) { - /* In case the function has a co_argcount (like on PyPy) */ + /* In case the function has a co_argcount */ di = 0; } if (xa != NULL) diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py index ba255a1b473c..0a35f2e34a7e 100644 --- a/numpy/f2py/tests/test_block_docstring.py +++ b/numpy/f2py/tests/test_block_docstring.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -13,8 +11,6 @@ class TestBlockDocString(util.F2PyTest): @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_block_docstring(self): expected = "bar : 'i'-array(2,3)\n" assert self.module.block.__doc__ == expected diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 6614efb16db8..1560c73d01fc 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -9,7 +9,6 @@ import pytest import numpy as np -from numpy.testing import IS_PYPY from . import util @@ -22,8 +21,6 @@ class TestF77Callback(util.F2PyTest): def test_all(self, name): self.check_function(name) - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = t(fun,[fun_extra_args]) diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 07f43e2bcfaa..bb3a5e541859 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -20,8 +18,6 @@ def test_all(self): assert self.module.foo_fixed.bar12() == 12 assert self.module.foo_free.bar13() == 13 - @pytest.mark.xfail(IS_PYPY, - reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = bar11() diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 96d5ffc66093..36b6060bcfc7 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -2,8 +2,6 @@ import pytest -from numpy.testing import IS_PYPY - from . import util @@ -42,7 +40,6 @@ def test_gh26920(self): class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] - @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_module_docstring(self): assert self.module.mod.__doc__ == textwrap.dedent( """\ diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 7bbded153725..b616d2fa8afb 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -285,7 +285,6 @@ from numpy.lib import format from numpy.testing import ( IS_64BIT, - IS_PYPY, IS_WASM, assert_, assert_array_equal, @@ -943,7 +942,6 @@ def test_large_file_support(tmpdir): assert_array_equal(r, d) -@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b54265338e06..839500945464 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1,4 +1,3 @@ -import gc import gzip import locale import os @@ -26,7 +25,6 @@ from numpy.ma.testutils import assert_equal from numpy.testing import ( HAS_REFCOUNT, - IS_PYPY, IS_WASM, assert_, assert_allclose, @@ -35,7 +33,6 @@ assert_no_warnings, assert_raises, assert_raises_regex, - break_cycles, tempdir, temppath, ) @@ -231,7 +228,6 @@ def test_load_non_npy(self): assert len(npz["test2"]) == 10 assert npz["metadata"] == b"Name: Test" - @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") @pytest.mark.slow @pytest.mark.thread_unsafe(reason="crashes with low memory") @@ -320,7 +316,6 @@ def test_not_closing_opened_fid(self): fp.seek(0) assert_(not fp.closed) - @pytest.mark.slow_pypy def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on @@ -338,14 +333,7 @@ def test_closing_fid(self): # TODO: specify exact message warnings.simplefilter('ignore', ResourceWarning) for i in range(1, 1025): - try: - np.load(tmp)["data"] - except Exception as e: - msg = f"Failed to load data from a file: {e}" - raise AssertionError(msg) - finally: - if IS_PYPY: - gc.collect() + np.load(tmp)["data"] def test_closing_zipfile_after_load(self): # Check that zipfile owns file and can close it. This needs to @@ -646,7 +634,7 @@ def check_large_zip(memoryerror_raised): except MemoryError: memoryerror_raised.value = True raise - # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # run in a subprocess to ensure memory is released # Use an object in shared memory to re-raise the MemoryError exception # in our process if needed, see gh-16889 memoryerror_raised = Value(c_bool) @@ -2542,9 +2530,6 @@ def test_save_load_memmap(self): assert_array_equal(data, a) # close the mem-mapped file del data - if IS_PYPY: - break_cycles() - break_cycles() @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") @pytest.mark.parametrize("filename_type", [Path, str]) @@ -2557,9 +2542,6 @@ def test_save_load_memmap_readwrite(self, filename_type): a[0][0] = 5 b[0][0] = 5 del b # closes the file - if IS_PYPY: - break_cycles() - break_cycles() data = np.load(path) assert_array_equal(data, a) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index a164bf38f189..b50478209520 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -672,9 +672,7 @@ def test_warn_on_skipped_data(skiprows): ("float16", 3.07e-05), ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), ("float64", -1.758571353180402e-24), - # Here and below, the repr side-steps a small loss of precision in - # complex `str` in PyPy (which is probably fine, as repr works): - ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)), + ("complex128", 5.406409232372729e-29 - 1.758571353180402e-24j), # Use integer values that fit into double. Everything else leads to # problems due to longdoubles going via double and decimal strings # causing rounding errors. @@ -685,7 +683,7 @@ def test_warn_on_skipped_data(skiprows): def test_byteswapping_and_unaligned(dtype, value, swap): # Try to create "interesting" values within the valid unicode range: dtype = np.dtype(dtype) - data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + data = [f"x,{value}\n"] if swap: dtype = dtype.newbyteorder() full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index eeaf6d2b4bd3..f63c16650df8 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -5,7 +5,7 @@ import numpy as np from numpy import random -from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises +from numpy.testing import assert_, assert_array_equal, assert_raises class TestRegression: @@ -151,7 +151,6 @@ def __array__(self, dtype=None, copy=None): assert_array_equal(m.__array__(), np.arange(5)) @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") - @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") @pytest.mark.parametrize( "cls", [ diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 583ca60f90a7..0361f635a848 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -65,7 +65,6 @@ with suppress_obj as c3: assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] assert_type(np.testing.verbose, int) -assert_type(np.testing.IS_PYPY, bool) assert_type(np.testing.HAS_REFCOUNT, bool) assert_type(np.testing.HAS_LAPACK64, bool) diff --git a/pyproject.toml b/pyproject.toml index 07189ddbe592..962aed0571cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -192,7 +192,7 @@ skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" -enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +enable = ["cpython-freethreading", "cpython-prerelease"] # The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) [tool.cibuildwheel.config-settings] From ef627e18ae0789b092ef2cad51692dab3e83a5d1 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Tue, 3 Feb 2026 08:28:53 +0530 Subject: [PATCH 1363/1718] DOC: Use `spin` for lint checks --- doc/source/dev/development_environment.rst | 88 +++++++++++++++++++--- 1 file changed, 76 insertions(+), 12 deletions(-) diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index c2085a0013ef..5d77509b43dc 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -206,18 +206,82 @@ Install all dependent packages using pip:: To run lint checks before committing new code, run:: - $ python tools/linter.py - -To check all changes in newly added Python code of current branch with target branch, run:: - - $ python tools/linter.py - -If there are no errors, the script exits with no message. In case of errors, -check the error message for details:: - - $ python tools/linter.py - ./numpy/_core/tests/test_scalarmath.py:34:5: E303 too many blank lines (3) - 1 E303 too many blank lines (3) + $ spin lint + +If there are no errors, the output will look like:: + + $ spin lint + Running Ruff Check... + All checks passed! + + Running C API borrow-reference linter... + Scanning 548 C/C++ source files... + + All checks passed! C API borrow-ref linter found no issues. + + + Running cython-lint... + +In case of errors, check the error message for details:: + + $ spin lint + Running Ruff Check... + I001 [*] Import block is un-sorted or un-formatted + --> numpy/matlib.py:12:1 + | + 10 | PendingDeprecationWarning, stacklevel=2) + 11 | + 12 | / import numpy as np + 13 | | + 14 | | # Matlib.py contains all functions in the numpy namespace with a few + 15 | | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | | # Need * as we're copying the numpy namespace. + 17 | | from numpy import * # noqa: F403 + 18 | | from numpy.matrixlib.defmatrix import matrix, asmatrix + | |______________________________________________________^ + 19 | + 20 | __version__ = np.__version__ + | + help: Organize imports + 15 | # replacements. See doc/source/reference/routines.matlib.rst for details. + 16 | # Need * as we're copying the numpy namespace. + 17 | from numpy import * # noqa: F403 + - from numpy.matrixlib.defmatrix import matrix, asmatrix + 18 + from numpy.matrixlib.defmatrix import asmatrix, matrix + 19 | + 20 | __version__ = np.__version__ + 21 | + + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors. + [*] 1 fixable with the `--fix` option. + +To automatically fix issues that can be fixed, run:: + + $ spin lint --fix + Running Ruff Check... + E501 Line too long (127 > 88) + --> numpy/matlib.py:214:89 + | + 212 | ------- + 213 | I : matrix + 214 | A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + 215 | + 216 | See Also + | + + Found 2 errors (1 fixed, 1 remaining). It is advisable to run lint checks before pushing commits to a remote branch since the linter runs as part of the CI pipeline. From 0574e12549573a6b489d87de4b54bcd5949b58d1 Mon Sep 17 00:00:00 2001 From: "Kai (Kazuya Ito)" Date: Wed, 4 Feb 2026 00:08:59 +0900 Subject: [PATCH 1364/1718] DOC: Correct description of Euler's constant (#30773) --- doc/source/reference/constants.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 79d758bddada..00a2d607b356 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -8,7 +8,7 @@ NumPy includes several constants: .. data:: e - Euler's constant, base of natural logarithms, Napier's constant. + Euler's number, base of natural logarithms, Napier's constant. ``e = 2.71828182845904523536028747135266249775724709369995...`` From 938edf0d23e8a46da21f77c3ba0128014c491e98 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 3 Feb 2026 16:26:23 +0100 Subject: [PATCH 1365/1718] DOC: update NEP 57 draft for reviewer feedback [ci skip] --- doc/neps/nep-0057-numpy-platform-support.rst | 43 ++++++++++++++++---- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 9e555a74839c..8cbcd7bcdd6e 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -87,11 +87,12 @@ General principles however they should disclose this to the Steering Council, and indicate whether long-term support is conditional on their employment or contractor status for the support tiers that include releasing wheels to PyPI. + *Rationale: releasing wheels to PyPI is a long-term commitment by the project as a whole, see the backwards compatibility section below.* -2. CI support with native runners for the platform is strongly preferred. Free - is best, however decisions on paid CI are up to the Steering Council. +2. CI support for the platform is required, preferably with native runners. + Free is best, however decisions on paid CI are up to the Steering Council. Emulation for running the test suite (e.g., under QEMU) or self-hosted buildbots are slower and less reliable, hence not preferred. @@ -101,6 +102,13 @@ General principles - A previously used rule of thumb: >=0.5% of the user base should be on this platform. There may be reasons to deviate from this rule of thumb. + *Note: finding clean data sources isn't always easy. If wheels are already + being shipped, for NumPy or for a comparable project, then download data + from PyPI may be obtained through BigQuery. For new platforms, sources + like the* + `Steam Hardware & Software Survey `__ + *may have to be used.* + 4. Adding a non-wheel CI job for a platform to the NumPy CI matrix is much cheaper, and easily reverted in case of problems. The bar for adding such jobs is low, and assessed on a case-by-case basis. @@ -113,6 +121,8 @@ General principles 6. Decision making: - Moving a platform to a lower support tier must be discussed on the mailing list. + The circumstances for each platform are unique so the community will + evaluate each proposal to demote a platform on a case-by-case basis. - Moving a platform to a higher support tier this includes releasing wheels on PyPI for that platform must be discussed on the mailing list. - Adding an entry for a platform for an unsupported platform or one without @@ -120,16 +130,35 @@ General principles the relevant maintainers agree. +Releasing wheels to PyPI +'''''''''''''''''''''''' + +The wheels that the NumPy team releases on PyPI for the ``numpy`` package get +hundreds of millions of downloads a month. We therefore highly value both +reliability and supply chain security of those release artifacts. Compromising +on those aspects is unlikely to be acceptable for the NumPy team. + +The details of how wheels are produced, tested and distributed can be found in +the `numpy/numpy-release `__ +repository. Some key requirements of the current setup, which aren't likely to +change soon, are: + +1. Must be buildable on publicly-visible CI infrastructure (i.e., GitHub). +2. Must be tested well enough (meaning native runners are preferred; QEMU is quite slow). +3. Must be publishable to PyPI automatically, through PyPI's trusted publishing + mechanism. + + Tier 1 ~~~~~~ - Must have regular CI support on GitHub or (exceptionally) through another well-integrated CI platform that the release team and steering council deem acceptable. -- The NumPy team releases wheels on PyPI for this platform +- The NumPy team releases wheels on PyPI for this platform. - CI failures (either regular CI or wheel build CI) block releases. -- All maintainers developers are responsible to keep the ``main`` branch and - wheel builds working. +- All maintainers are responsible to keep the ``main`` branch and wheel builds + working. Tier 1 platforms: @@ -147,7 +176,7 @@ Tier 2 - Must have regular CI support, either as defined for Tier 1 or through a reliable self-hosted service. -- The NumPy team releases wheels on PyPI for this platform +- The NumPy team releases wheels on PyPI for this platform. - CI failures block releases. - Must have at least one maintainer who commits to take primary and long-term responsibility for keeping the ``main`` branch and wheel builds working. @@ -164,7 +193,7 @@ Tier 3 - Is supported as part of NumPy's regular CI setup for the ``main`` branch. CI support as defined for Tier 2. -- No wheels are released on PyPI for this platform +- No wheels are released on PyPI for this platform. - CI failures block releases (skips may be applied a bit more liberally). - Must have at least one maintainer or a regular contributor trusted by the NumPy maintainers who commits to take responsibility for CI on the ``main`` From 64789ddd786006fc34a47b7952ef9b2ede4ee919 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Wed, 4 Feb 2026 04:08:40 +0530 Subject: [PATCH 1366/1718] MAINT: Use `spin lint` in CI (#25103) --- .github/workflows/linux.yml | 2 +- requirements/linter_requirements.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 0428389922c4..ef76656eb69c 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -53,7 +53,7 @@ jobs: env: BASE_REF: ${{ github.base_ref }} run: - python tools/linter.py + spin lint - name: Check Python.h is first file included run: | python tools/check_python_h_first.py diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 8dfd5d05e304..81088887a0ee 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -2,3 +2,4 @@ cython-lint ruff==0.14.14 GitPython>=3.1.30 +spin From 715280f06e6121f1702d8addb3ad6ef364498a25 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Wed, 4 Feb 2026 05:01:24 +0530 Subject: [PATCH 1367/1718] MAINT: Update x86-simd-sort to latest (#30747) --- numpy/_core/src/npysort/x86-simd-sort | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 6a7a01da4b0d..5adb33411f3c 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 6a7a01da4b0dfde108aa626a2364c954e2c50fe1 +Subproject commit 5adb33411f3cea8bdbafa9d91bd75bc4bf19c7dd From 5f280d4bb46748a7ddfda20b58f1cb4a91e3ca37 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Mon, 2 Feb 2026 01:17:12 +0100 Subject: [PATCH 1368/1718] MAINT: Replace %-formatting with f-strings in numpy/lib (UP031) --- numpy/lib/_format_impl.py | 8 +++++--- numpy/lib/_function_base_impl.py | 22 ++++++++++++---------- numpy/lib/_iotools.py | 2 +- numpy/lib/_npyio_impl.py | 14 ++++++++------ numpy/lib/_polynomial_impl.py | 4 ++-- numpy/lib/_utils_impl.py | 2 +- numpy/lib/recfunctions.py | 6 +++--- numpy/lib/tests/test_format.py | 4 ++-- numpy/lib/tests/test_io.py | 10 +++++----- 9 files changed, 39 insertions(+), 33 deletions(-) diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 2bb557709c8b..51b16ce0de48 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -838,9 +838,11 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: # Friendlier error message - raise UnicodeError("Unpickling a python object failed: %r\n" - "You may need to pass the encoding= option " - "to numpy.load" % (err,)) from err + raise UnicodeError( + f"Unpickling a python object failed: {err!r}\n" + "You may need to pass the encoding= option " + "to numpy.load" + ) from err else: if isfileobj(fp): # We can use the fast fromfile() function. diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 5893a116bf18..8698df708b89 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2190,17 +2190,17 @@ def _update_dim_sizes(dim_sizes, arg, core_dims): num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( - '%d-dimensional argument does not have enough ' - 'dimensions for all core dimensions %r' - % (arg.ndim, core_dims)) + f'{arg.ndim}-dimensional argument does not have enough ' + f'dimensions for all core dimensions {core_dims!r}') core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( - 'inconsistent size for core dimension %r: %r vs %r' - % (dim, size, dim_sizes[dim])) + f'inconsistent size for core dimension {dim!r}: {size!r} vs ' + f'{dim_sizes[dim]!r}' + ) else: dim_sizes[dim] = size @@ -2610,9 +2610,10 @@ def _vectorize_call_with_signature(self, func, args): input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): - raise TypeError('wrong number of positional arguments: ' - 'expected %r, got %r' - % (len(input_core_dims), len(args))) + raise TypeError( + 'wrong number of positional arguments: ' + f'expected {len(input_core_dims)!r}, got {len(args)!r}' + ) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( @@ -2633,8 +2634,9 @@ def _vectorize_call_with_signature(self, func, args): if nout != n_results: raise ValueError( - 'wrong number of outputs from pyfunc: expected %r, got %r' - % (nout, n_results)) + f'wrong number of outputs from pyfunc: expected {nout!r}, ' + f'got {n_results!r}' + ) if nout == 1: results = (results,) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 3586b41de86c..c823842b36b6 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -373,7 +373,7 @@ def validate(self, names, defaultfmt="f%i", nbfields=None): item += '_' cnt = seen.get(item, 0) if cnt > 0: - validatednames.append(item + '_%d' % cnt) + validatednames.append(f"{item}_{cnt}") else: validatednames.append(item) seen[item] = cnt + 1 diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 3644dfb9f128..34e5985ea2a2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -765,7 +765,7 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): namedict = kwds for i, val in enumerate(args): - key = 'arr_%d' % i + key = f'arr_{i}' if key in namedict.keys(): raise ValueError( f"Cannot use un-named variables and keyword {key}") @@ -1561,7 +1561,7 @@ def first_write(self, v): # Handle 1-dimensional arrays if X.ndim == 0 or X.ndim > 2: raise ValueError( - "Expected 1D or 2D array, got %dD array instead" % X.ndim) + f"Expected 1D or 2D array, got {X.ndim}D array instead") elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: @@ -1614,9 +1614,10 @@ def first_write(self, v): try: v = format % tuple(row) + newline except TypeError as e: - raise TypeError("Mismatch between array dtype ('%s') and " - "format specifier ('%s')" - % (str(X.dtype), format)) from e + raise TypeError( + f"Mismatch between array dtype ('{str(X.dtype)}') and " + f"format specifier ('{format}')" + ) from e fh.write(v) if len(footer) > 0: @@ -1965,7 +1966,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " - "(got '%s' instead)" % type(user_converters)) + f"(got '{type(user_converters)}' instead)" + ) if encoding == 'bytes': encoding = None diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 9b2ff399baa5..e17d6a49ef66 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -1323,9 +1323,9 @@ def fmt_float(q): elif coefstr == '0': newstr = '' elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) + newstr = f'{var}**{power}' else: - newstr = '%s %s**%d' % (coefstr, var, power) + newstr = f'{coefstr} {var}**{power}' if k > 0: if newstr != '': diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 6531d9631145..6aa1065047ef 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -348,7 +348,7 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): print(f"Help for {object} not found.", file=output) else: print("\n " - "*** Total of %d references found. ***" % numfound, + f"*** Total of {numfound} references found. ***", file=output ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index d466d5560a4c..8603f7b81a46 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -1532,9 +1532,9 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', # Check jointype if jointype not in ('inner', 'outer', 'leftouter'): raise ValueError( - "The 'jointype' argument should be in 'inner', " - "'outer' or 'leftouter' (got '%s' instead)" % jointype - ) + "The 'jointype' argument should be in 'inner', " + f"'outer' or 'leftouter' (got '{jointype}' instead)" + ) # If we have a single key, put it in a tuple if isinstance(key, str): key = (key,) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 7bbded153725..c569df526aba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -675,7 +675,7 @@ def test_descr_to_dtype(dt): def test_version_2_0(): f = BytesIO() # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) format.write_array(f, d, version=(2, 0)) @@ -700,7 +700,7 @@ def test_version_2_0(): @pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly") def test_version_2_0_memmap(tmpdir): # requires more than 2 byte for header - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) tf1 = os.path.join(tmpdir, 'version2_01.npy') tf2 = os.path.join(tmpdir, 'version2_02.npy') diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index b54265338e06..2739da004624 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -185,7 +185,7 @@ def test_record(self): @pytest.mark.slow def test_format_2_0(self): - dt = [(("%d" % i) * 100, float) for i in range(500)] + dt = [(f"{i}" * 100, float) for i in range(500)] a = np.ones(1000, dtype=dt) with warnings.catch_warnings(record=True): warnings.filterwarnings('always', '', UserWarning) @@ -205,7 +205,7 @@ def roundtrip(self, *args, **kwargs): arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: for n, a in enumerate(arr): - reloaded = arr_reloaded['arr_%d' % n] + reloaded = arr_reloaded[f'arr_{n}'] assert_equal(a, reloaded) assert_equal(a.dtype, reloaded.dtype) assert_equal(a.flags.fnc, reloaded.flags.fnc) @@ -623,7 +623,7 @@ def test_unicode_and_bytes_fmt(self, iotype): np.savetxt(s, a, fmt="%f") s.seek(0) if iotype is StringIO: - assert_equal(s.read(), "%f\n" % 1.) + assert_equal(s.read(), f"{1.:f}\n") else: assert_equal(s.read(), b"%f\n" % 1.) @@ -1179,7 +1179,7 @@ def test_ndmin_keyword(self): def test_generator_source(self): def count(): for i in range(10): - yield "%d" % i + yield f"{i}" res = np.loadtxt(count()) assert_array_equal(res, np.arange(10)) @@ -2419,7 +2419,7 @@ def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): for i in range(10): - yield asbytes("%d" % i) + yield asbytes(f"{i}") res = np.genfromtxt(count()) assert_array_equal(res, np.arange(10)) From b0abd85a5a978e3b74edaa0da5fbf19cd1f120ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:56:59 +0000 Subject: [PATCH 1369/1718] MAINT: Bump hypothesis from 6.151.3 to 6.151.4 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.3 to 6.151.4. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.3...hypothesis-python-6.151.4) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index e661596e1933..87a48ab32b9d 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.3 +hypothesis==6.151.4 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index bea090fbd0c7..18e207e8fc47 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.3 +hypothesis==6.151.4 pytest==9.0.2 pytest-cov==7.0.0 meson From 7c44b65525867a8ea92154dc9a5476f153839ed8 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Fri, 6 Feb 2026 02:17:24 +0100 Subject: [PATCH 1370/1718] MAINT: Replace %-formatting with f-strings in numpy/_core (UP031) --- numpy/_core/_dtype.py | 18 +++++----- numpy/_core/_internal.py | 6 ++-- numpy/_core/arrayprint.py | 8 ++--- numpy/_core/code_generators/genapi.py | 20 ++++------- numpy/_core/code_generators/generate_umath.py | 23 +++++-------- numpy/_core/einsumfunc.py | 24 ++++++------- numpy/_core/getlimits.py | 6 ++-- numpy/_core/overrides.py | 4 +-- numpy/_core/records.py | 5 ++- numpy/_core/tests/test_cpu_features.py | 11 +++--- numpy/_core/tests/test_deprecations.py | 4 +-- numpy/_core/tests/test_half.py | 12 +++---- numpy/_core/tests/test_multiarray.py | 34 +++++++++---------- numpy/_core/tests/test_nditer.py | 4 +-- numpy/_core/tests/test_print.py | 4 +-- numpy/_core/tests/test_regression.py | 6 ++-- numpy/_core/tests/test_scalarmath.py | 4 +-- numpy/_core/tests/test_umath.py | 2 +- numpy/_core/tests/test_umath_complex.py | 4 +-- 19 files changed, 88 insertions(+), 111 deletions(-) diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 58cfbbf8724a..9ae361fe651c 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -116,13 +116,13 @@ def _scalar_str(dtype, short): if _isunsized(dtype): return "'S'" else: - return "'S%d'" % dtype.itemsize + return f"'S{dtype.itemsize}'" elif dtype.type == np.str_: if _isunsized(dtype): return f"'{byteorder}U'" else: - return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + return f"'{byteorder}U{dtype.itemsize // 4}'" elif dtype.type is str: return "'T'" @@ -136,7 +136,7 @@ def _scalar_str(dtype, short): if _isunsized(dtype): return "'V'" else: - return "'V%d'" % dtype.itemsize + return f"'V{dtype.itemsize}'" elif dtype.type == np.datetime64: return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" @@ -150,11 +150,11 @@ def _scalar_str(dtype, short): elif np.issubdtype(dtype, np.number): # Short repr with endianness, like 'type_resolver = &%s;" % uf.typereso) + mlist.append(rf"((PyUFuncObject *)f)->type_resolver = &{uf.typereso};") for c in uf.indexed: # Handle indexed loops by getting the underlying ArrayMethodObject # from the list in f._loops and setting its field appropriately @@ -1582,7 +1575,7 @@ def make_ufuncs(funcdict): funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) - mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) + mlist.append(rf"""PyDict_SetItemString(dictionary, "{name}", f);""") mlist.append(r"""Py_DECREF(f);""") code3list.append('\n'.join(mlist)) return '\n'.join(code3list) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 3a04b02b9c93..3f06c6490f87 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -608,8 +608,8 @@ def _parse_einsum_input(operands): # Make sure output subscripts are in the input for char in output_subscript: if output_subscript.count(char) != 1: - raise ValueError("Output character %s appeared more than once in " - "the output." % char) + raise ValueError(f"Output character {char} appeared more than once in " + "the output.") if char not in input_subscripts: raise ValueError(f"Output character {char} did not appear in the input") @@ -790,9 +790,9 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): for tnum, term in enumerate(input_list): sh = operands[tnum].shape if len(sh) != len(term): - raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d." - % (input_subscripts[tnum], tnum)) + raise ValueError(f"Einstein sum subscript {input_subscripts[tnum]} " + "does not contain the " + f"correct number of indices for operand {tnum}.") for cnum, char in enumerate(term): dim = sh[cnum] @@ -801,9 +801,9 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): if dimension_dict[char] == 1: dimension_dict[char] = dim elif dim not in (1, dimension_dict[char]): - raise ValueError("Size of label '%s' for operand %d (%d) " - "does not match previous terms (%d)." - % (char, tnum, dimension_dict[char], dim)) + raise ValueError(f"Size of label '{char}' for " + f"operand {tnum} ({dimension_dict[char]}) " + f"does not match previous terms ({dim}).") else: dimension_dict[char] = dim @@ -886,7 +886,6 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Return the path along with a nice string representation overall_contraction = input_subscripts + "->" + output_subscript - header = ("scaling", "current", "remaining") # Compute naive cost # This isn't quite right, need to look into exactly how einsum does this @@ -903,20 +902,19 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): path_print = f" Complete contraction: {overall_contraction}\n" path_print += f" Naive scaling: {num_indices}\n" - path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += f" Optimized scaling: {max(scale_list)}\n" path_print += f" Naive FLOP count: {naive_cost:.3e}\n" path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" path_print += f" Theoretical speedup: {speedup:3.3f}\n" path_print += f" Largest intermediate: {max_i:.3e} elements\n" path_print += "-" * 74 + "\n" - path_print += "%6s %24s %40s\n" % header + path_print += f"{'scaling':>6} {'current':>24} {'remaining':>40}\n" path_print += "-" * 74 for n, contraction in enumerate(contraction_list): _, einsum_str, remaining = contraction remaining_str = ",".join(remaining) + "->" + output_subscript - path_run = (scale_list[n], einsum_str, remaining_str) - path_print += "\n%4d %24s %40s" % path_run + path_print += f"\n{scale_list[n]:4d} {einsum_str:>24} {remaining_str:>40}" path = ['einsum_path'] + path return (path, path_print) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 7d2b9966fcd7..fe08bbda43cb 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -407,7 +407,7 @@ def __init__(self, int_type): self.dtype = numeric.dtype(type(int_type)) self.kind = self.dtype.kind self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) + self.key = f"{self.kind}{self.bits}" if self.kind not in 'iu': raise ValueError(f"Invalid integer data type {self.kind!r}.") @@ -449,5 +449,5 @@ def __str__(self): return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) + return (f"{self.__class__.__name__}" + f"(min={self.min}, max={self.max}, dtype={self.dtype})") diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6d5e7750b09b..1bd2373dbaa4 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -96,8 +96,8 @@ def verify_matching_signatures(implementation, dispatcher): (implementation_spec.defaults is not None and len(implementation_spec.defaults) != len(dispatcher_spec.defaults))): - raise RuntimeError('implementation and dispatcher for %s have ' - 'different function signatures' % implementation) + raise RuntimeError(f'implementation and dispatcher for {implementation} have ' + 'different function signatures') if implementation_spec.defaults is not None: if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 8ad0f1fb07a3..d77015a4ccce 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -163,7 +163,7 @@ def _setfieldnames(self, names, titles): # "f0, f1, f2,..." # if not enough names are specified, they will be assigned as "f[n], # f[n+1],..." etc. where n is the number of specified names..." - self._names += ['f%d' % i for i in range(len(self._names), + self._names += [f'f{i}' for i in range(len(self._names), self._nfields)] # check for redundant names _dup = find_duplicate(self._names) @@ -263,8 +263,7 @@ def pprint(self): # pretty-print all fields names = self.dtype.names maxlen = max(len(name) for name in names) - fmt = '%% %ds: %%s' % maxlen - rows = [fmt % (name, getattr(self, name)) for name in names] + rows = [f"{name:>{maxlen}}: {getattr(self, name)}" for name in names] return "\n".join(rows) # The recarray is almost identical to a standard array (which supports diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index c95886752949..631a1a9e7380 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -53,13 +53,12 @@ def assert_features_equal(actual, desired, fname): {auxv} """, prefix='\r') - raise AssertionError(( + raise AssertionError( "Failure Detection\n" - " NAME: '%s'\n" - " ACTUAL: %s\n" - " DESIRED: %s\n" - "%s" - ) % (fname, actual, desired, error_report)) + f" NAME: '{fname}'\n" + f" ACTUAL: {actual}\n" + f" DESIRED: {desired}\n" + f"{error_report}") def _text_to_list(txt): out = txt.strip("][\n").replace("'", "").split(', ') diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 6c52aea84e5d..f6aab148b840 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -87,8 +87,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, num_found += 1 elif not ignore_others: raise AssertionError( - "expected %s but got: %s" % - (self.warning_cls.__name__, warning.category)) + f"expected {self.warning_cls.__name__} but got: " + f"{warning.category}") if num is not None and num_found != num: msg = f"{len(w_context)} warnings found but {num} expected." lst = [str(w) for w in w_context] diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 3ced5b466a44..85ef0f22b3f1 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -298,19 +298,15 @@ def test_half_correctness(self): if len(a32_fail) != 0: bad_index = a32_fail[0] assert_equal(finite_f32, a_manual, - "First non-equal is half value 0x%x -> %g != %g" % - (a_bits[bad_index], - finite_f32[bad_index], - a_manual[bad_index])) + f"First non-equal is half value 0x{a_bits[bad_index]:x} -> " + f"{finite_f32[bad_index]:g} != {a_manual[bad_index]:g}") a64_fail = np.nonzero(finite_f64 != a_manual)[0] if len(a64_fail) != 0: bad_index = a64_fail[0] assert_equal(finite_f64, a_manual, - "First non-equal is half value 0x%x -> %g != %g" % - (a_bits[bad_index], - finite_f64[bad_index], - a_manual[bad_index])) + f"First non-equal is half value 0x{a_bits[bad_index]:x} -> " + f"{finite_f64[bad_index]:g} != {a_manual[bad_index]:g}") def test_half_ordering(self): """Make sure comparisons are working right""" diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 1b76c7100b7f..35224b4e4aa3 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -299,17 +299,17 @@ def test_int(self): (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), - err_msg="%r: -2**%d" % (st, i)) + err_msg=f"{st!r}: -2**{i}") assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (st, i - 1)) + err_msg=f"{st!r}: 2**{i - 1}") assert_equal(hash(st(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (st, i)) + err_msg=f"{st!r}: 2**{i} - 1") i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), - err_msg="%r: 2**%d" % (ut, i - 1)) + err_msg=f"{ut!r}: 2**{i - 1}") assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), - err_msg="%r: 2**%d - 1" % (ut, i)) + err_msg=f"{ut!r}: 2**{i} - 1") class TestAttributes: @@ -3205,9 +3205,9 @@ def test_partition(self): aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), - msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) + msg=f"{i}: {p[:, i]!r} <= {p[:, :i].T!r}") at((p[:, i + 1:].T > p[:, i]).all(), - msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + msg=f"{i}: {p[:, i]!r} < {p[:, i + 1:].T!r}") for row in range(p.shape[0]): self.assert_partitioned(p[row], [i]) self.assert_partitioned(parg[row], [i]) @@ -3218,9 +3218,9 @@ def test_partition(self): aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i, :] <= p[i, :]).all(), - msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + msg=f"{i}: {p[i, :]!r} <= {p[:i, :]!r}") at((p[i + 1:, :] > p[i, :]).all(), - msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + msg=f"{i}: {p[i, :]!r} < {p[:, i + 1:]!r}") for col in range(p.shape[1]): self.assert_partitioned(p[:, col], [i]) self.assert_partitioned(parg[:, col], [i]) @@ -3240,9 +3240,9 @@ def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_compare(operator.__le__, d[prev:k], d[k], - err_msg='kth %d' % k) + err_msg=f'kth {k}') assert_((d[k:] >= d[k]).all(), - msg="kth %d, %r not greater equal %r" % (k, d[k:], d[k])) + msg=f"kth {k}, {d[k:]!r} not greater equal {d[k]!r}") prev = k + 1 def test_partition_iterative(self): @@ -4094,8 +4094,8 @@ def make_obj(base, array_priority=False, array_ufunc=False, def check(obj, binop_override_expected, ufunc_override_expected, inplace_override_expected, check_scalar=True): for op, (ufunc, has_inplace, dtype) in ops.items(): - err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' - % (op, ufunc, has_inplace, dtype)) + err_msg = (f'op: {op}, ufunc: {ufunc}, ' + f'has_inplace: {has_inplace}, dtype: {dtype}') check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] if check_scalar: check_objs.append(check_objs[0][0]) @@ -4439,7 +4439,7 @@ def test_temporary_with_cast(self): for dt in (np.complex64, np.complex128, np.clongdouble): c = np.ones(100000, dtype=dt) r = abs(c * 2.0) - assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + assert_equal(r.dtype, np.dtype(f'f{c.itemsize // 2}')) def test_elide_broadcast(self): # test no elision on broadcast to higher dimension @@ -5923,7 +5923,7 @@ def test_file_position_after_fromfile(self, tmp_path, param_filename): f.write(b'\0') for mode in ['rb', 'r+b']: - err_msg = "%d %s" % (size, mode) + err_msg = f"{size} {mode}" with open(tmp_filename, mode) as f: f.read(2) @@ -5939,7 +5939,7 @@ def test_file_position_after_tofile(self, tmp_path, param_filename): tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: - err_msg = "%d" % (size,) + err_msg = f"{size}" with open(tmp_filename, 'wb') as f: f.seek(size - 1) @@ -8329,7 +8329,7 @@ def test_native_padding(self): if j == 0: s = 'bi' else: - s = 'b%dxi' % j + s = f'b{j}xi' self._check('@' + s, {'f0': ('i1', 0), 'f1': ('i', align * (1 + j // align))}) self._check('=' + s, {'f0': ('i1', 0), diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 520e638b2edb..aa4acbf9f338 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -820,8 +820,8 @@ def test_iter_broadcasting_errors(): assert_(msg.find('(2,3)->(2,3)') >= 0, f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') assert_(msg.find('(2,)->(2,newaxis)') >= 0, - ('Message "%s" doesn\'t contain remapped operand shape' - '(2,)->(2,newaxis)') % msg) + f'Message "{msg}" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') # The message should contain the itershape parameter assert_(msg.find('(4,3)') >= 0, f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index 95a177b57a7d..1a9a0faddd18 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -180,8 +180,8 @@ def test_scalar_format(): f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, - "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % - (fmat, repr(val), repr(valtype), str(e))) + f"format raised exception (fmt='{fmat}', val={repr(val)}, " + f"type={repr(valtype)}, exc='{str(e)}')") # diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 24ce9330005d..02732e3bbc6f 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1199,8 +1199,8 @@ def test_char_array_creation(self): def test_unaligned_unicode_access(self): # Ticket #825 for i in range(1, 9): - msg = 'unicode offset: %d chars' % i - t = np.dtype([('a', 'S%d' % i), ('b', 'U2')]) + msg = f'unicode offset: {i} chars' + t = np.dtype([('a', f'S{i}'), ('b', 'U2')]) x = np.array([(b'a', 'b')], dtype=t) assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) @@ -1845,7 +1845,7 @@ def test_ticket_1756(self): s = b'0123456789abcdef' a = np.array([s] * 5) for i in range(1, 17): - a1 = np.array(a, "|S%d" % i) + a1 = np.array(a, f"|S{i}") a2 = np.array([s[:i]] * 5) assert_equal(a1, a2) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 04c78b065f25..f9ddb9b026fd 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -69,8 +69,8 @@ def test_type_add(self): # skipped ahead based on the first argument, but that # does not produce properly symmetric results... assert_equal(c_scalar.dtype, c_array.dtype, - "error with types (%d/'%c' + %d/'%c')" % - (k, np.dtype(atype).char, l, np.dtype(btype).char)) + f"error with types ({k}/'{np.dtype(atype).char}' + " + f"{l}/'{np.dtype(btype).char}')") def test_type_create(self): for atype in types: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 8c5af69af9a7..a2f6d40055c7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1340,7 +1340,7 @@ def test_log2_ints(self, i): # a good log2 implementation should provide this, # might fail on OS with bad libm v = np.log2(2.**i) - assert_equal(v, float(i), err_msg='at exponent %d' % i) + assert_equal(v, float(i), err_msg=f'at exponent {i}') @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") def test_log2_special(self): diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 7012e7e357fe..9991aac129c9 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -389,7 +389,7 @@ def test_scalar(self): n_r = [x[i] ** y[i] for i in lx] for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + assert_almost_equal(n_r[i], p_r[i], err_msg=f'Loop {i}\n') def test_array(self): x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) @@ -409,7 +409,7 @@ def test_array(self): n_r = x ** y for i in lx: - assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + assert_almost_equal(n_r[i], p_r[i], err_msg=f'Loop {i}\n') class TestCabs: def setup_method(self): From fbe3a3053018030dcb4980d805b0a093ce8e22a9 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Fri, 6 Feb 2026 03:42:02 +0100 Subject: [PATCH 1371/1718] MAINT: Apply reviewer suggestion to use !r Co-authored-by: Robert Kern --- numpy/_core/tests/test_print.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index 1a9a0faddd18..cd5b076a29e8 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -180,8 +180,8 @@ def test_scalar_format(): f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, - f"format raised exception (fmt='{fmat}', val={repr(val)}, " - f"type={repr(valtype)}, exc='{str(e)}')") + f"format raised exception (fmt='{fmat}', val={val!r}, " + f"type={valtype!r}, exc='{e}')") # From 6f494276f0418b06cc24b8628023ae4588797d09 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Fri, 6 Feb 2026 15:19:37 +0100 Subject: [PATCH 1372/1718] DOC: update for reviewer feedback [skip actions] [skipp cirrus] [skip azp] --- doc/neps/nep-0057-numpy-platform-support.rst | 73 ++++++++++---------- 1 file changed, 38 insertions(+), 35 deletions(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 8cbcd7bcdd6e..309df1916178 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -1,4 +1,4 @@ -.. _NEP47: +.. _NEP57: =============================== NEP 57 — NumPy platform support @@ -10,26 +10,11 @@ NEP 57 — NumPy platform support :Created: 2026-01-30 :Resolution: - -.. note:: - - This NEP is drafted as a policy specific to NumPy rather than a SPEC - for several reasons that are all project-specific: - - * it involves committing to a nontrivial amount of maintainer effort, - * personal commitment from a maintainer may make the difference between a - yes and a no of supporting a platform (e.g., NumPy supported PyPy for a - long time because of the efforts of one maintainer) - * support for a platform being possible at all may depend on features of the - code base (e.g., NumPy supports 32-bit Python on Windows while SciPy does - not because there's no suitable compiler toolchain for it) - * the number of wheels depends on whether the Stable ABI can be used (NumPy - is more performance-sensitive for small arrays, so can't use it) - Abstract -------- -This PEP documents how a platform - i.e., a specific operating system, CPU +This NEP documents how a platform - i.e., a specific operating system, CPU architecture and CPython interpreter - becomes supported in NumPy, what platforms are currently supported, and were supported in the (recent) past. @@ -37,8 +22,21 @@ platforms are currently supported, and were supported in the (recent) past. Motivation and scope -------------------- -*This policy is being drafted now because there is a lot of interest in -extending the number of platforms NumPy supports through wheels in particular.* +This policy is drafted now (early 2026) because there is a lot of interest in +extending the number of platforms NumPy supports through wheels in particular. +It is a policy specific to NumPy - even though other projects may possibly want +to refer to it - for several reasons: + +* It involves committing to a nontrivial amount of maintainer effort, +* Personal commitment from a maintainer may make the difference between a + yes and a no of supporting a platform (e.g., NumPy supported PyPy for a + long time because of the efforts of one maintainer) +* Support for a platform being possible at all may depend on features of the + code base (e.g., NumPy supports 32-bit Python on Windows while SciPy does + not because there's no suitable compiler toolchain for it). +* The number of wheels depends on whether the Stable ABI can be used (NumPy + is more performance-sensitive for small arrays, so can't use it) + The scope of this NEP includes: @@ -109,9 +107,10 @@ General principles `Steam Hardware & Software Survey `__ *may have to be used.* -4. Adding a non-wheel CI job for a platform to the NumPy CI matrix is much - cheaper, and easily reverted in case of problems. The bar for adding such - jobs is low, and assessed on a case-by-case basis. +4. Adding a regular CI job (i.e., not aimed at uploading wheels to PyPI) for a + platform to the NumPy CI matrix is much cheaper, and easily reverted in case + of problems. The bar for adding such jobs is low, and assessed on a + case-by-case basis. 5. For all platforms in any supported tier: the relevant prerequisites in our dependencies have been met. E.g., build tools have support, and for wheels @@ -123,11 +122,14 @@ General principles - Moving a platform to a lower support tier must be discussed on the mailing list. The circumstances for each platform are unique so the community will evaluate each proposal to demote a platform on a case-by-case basis. - - Moving a platform to a higher support tier this includes releasing wheels - on PyPI for that platform must be discussed on the mailing list. - - Adding an entry for a platform for an unsupported platform or one without - wheels can be done on GitHub, assuming it's clear from the discussion that - the relevant maintainers agree. + - Moving a platform to a higher support tier, if that higher tier includes + releasing wheels on PyPI for that platform, must be discussed on the + mailing list. + - Adding an entry to a support tier in this NEP for (a) an unsupported + platform or (b) a tier which does not include uploading wheels to PyPI can + be done on GitHub through a regular pull request (assuming it's clear from + the discussion that the relevant maintainers agree it doesn't need to hit + the mailing list). Releasing wheels to PyPI @@ -183,9 +185,9 @@ Tier 2 Tier 2 platforms: -- Linux x86-64 (musllinux) -- Linux aarch64 (musllinux) -- Free-threaded CPython +- Linux x86-64 (musllinux), *Ralf Gommers* +- Linux aarch64 (musllinux), *Ralf Gommers* +- Free-threaded CPython, *Nathan Goldbaum, Ralf Gommers* Tier 3 @@ -194,16 +196,17 @@ Tier 3 - Is supported as part of NumPy's regular CI setup for the ``main`` branch. CI support as defined for Tier 2. - No wheels are released on PyPI for this platform. -- CI failures block releases (skips may be applied a bit more liberally). +- CI failures block releases (skips may be applied when the failure is clearly + platform-specific and does not indicate a regression in core functionality). - Must have at least one maintainer or a regular contributor trusted by the NumPy maintainers who commits to take responsibility for CI on the ``main`` branch working. Tier 3 platforms: -- FreeBSD (note: runs on Cirrus CI) -- Linux ppc64le (note: runs on IBM-provided self-hosted runners) -- Emscripten/Pyodide +- FreeBSD (note: runs on Cirrus CI), *Ralf Gommers* +- Linux ppc64le (note: runs on IBM-provided self-hosted runners), *Sandeep Gupta* +- Emscripten/Pyodide, *Agriya Khetarpal, Gyeongjae Choi* Unsupported platforms @@ -241,7 +244,7 @@ Backward compatibility Moving a platform to a lower tier of support is generally backwards compatible. The exception is stopping to release wheels on PyPI for a platform. That causes -signifcant disruption for existing users on that platform. Their install commands +significant disruption for existing users on that platform. Their install commands (e.g., ``pip install numpy``) may stop working because if a new release no longer has wheels for the platform, by default ``pip`` will try to build from source rather than using a wheel from an older version of ``numpy``. Therefore, we should be very From 02a09aff0b77d2a8b819cf9309038a72b4f8fb72 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 08:24:43 -0400 Subject: [PATCH 1373/1718] MAINT: Use WRITEBACKIFCOPY for in-place arrays. --- numpy/f2py/rules.py | 12 +++++++ numpy/f2py/src/fortranobject.c | 53 ++++++------------------------ numpy/f2py/tests/src/inplace/foo.f | 31 +++++++++++++++++ numpy/f2py/tests/test_inplace.py | 43 ++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 43 deletions(-) create mode 100644 numpy/f2py/tests/src/inplace/foo.f create mode 100644 numpy/f2py/tests/test_inplace.py diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 77b703017537..6ad941f98287 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -96,6 +96,7 @@ isintent_copy, isintent_hide, isintent_inout, + isintent_inplace, isintent_nothide, isintent_out, isintent_overwrite, @@ -1200,6 +1201,17 @@ } if (f2py_success) {"""]}, ], + 'pyobjfrom': [ + {l_and(isintent_inplace, l_not(isintent_out)): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* inplace array #varname# has been written back to */"""}, + {l_and(isintent_inplace, isintent_out): """\ + f2py_success = (PyArray_ResolveWritebackIfCopy(capi_#varname#_as_array) >= 0); + if (f2py_success) { /* return written-back-to inplace array #varname# */ + Py_INCREF(#varname#_capi); + Py_SETREF(capi_#varname#_as_array, (PyArrayObject*)#varname#_capi);"""}, + ], + 'closepyobjfrom': {isintent_inplace: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, 'cleanupfrompyobj': [ # note that this list will be reversed (' } ' '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */'), diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 1727f7ed0972..e7a9b42989d3 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -785,30 +785,6 @@ dump_attrs(const PyArrayObject *obj) } #endif -#define SWAPTYPE(a, b, t) \ - { \ - t c; \ - c = (a); \ - (a) = (b); \ - (b) = c; \ - } - -static int -swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) -{ - PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, - *arr2 = (PyArrayObject_fields *)obj2; - SWAPTYPE(arr1->data, arr2->data, char *); - SWAPTYPE(arr1->nd, arr2->nd, int); - SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); - SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); - SWAPTYPE(arr1->base, arr2->base, PyObject *); - SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); - SWAPTYPE(arr1->flags, arr2->flags, int); - /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ - return 0; -} - #define ARRAY_ISCOMPATIBLE(arr,type_num) \ ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ @@ -1053,29 +1029,20 @@ ndarray_from_pyobj(const int type_num, /* here we have always intent(in) or intent(inplace) */ { - PyArrayObject * retarr = (PyArrayObject *) \ - PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), - NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + int flags = NPY_ARRAY_FORCECAST | NPY_ARRAY_ENSURECOPY + | ((intent & F2PY_INTENT_C) ? NPY_ARRAY_IN_ARRAY + : NPY_ARRAY_IN_FARRAY); + if (intent & F2PY_INTENT_INPLACE) { + flags |= NPY_ARRAY_WRITEBACKIFCOPY; + } + /* Steals reference to descr */ + PyArrayObject *retarr = (PyArrayObject *)PyArray_FromArray( + arr, descr, flags); if (retarr==NULL) { - Py_DECREF(descr); return NULL; } + arr = retarr; F2PY_REPORT_ON_ARRAY_COPY_FROMARR; - if (PyArray_CopyInto(retarr, arr)) { - Py_DECREF(retarr); - return NULL; - } - if (intent & F2PY_INTENT_INPLACE) { - if (swap_arrays(arr,retarr)) { - Py_DECREF(retarr); - return NULL; /* XXX: set exception */ - } - Py_XDECREF(retarr); - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - } else { - arr = retarr; - } } return arr; } diff --git a/numpy/f2py/tests/src/inplace/foo.f b/numpy/f2py/tests/src/inplace/foo.f new file mode 100644 index 000000000000..ac85112beda8 --- /dev/null +++ b/numpy/f2py/tests/src/inplace/foo.f @@ -0,0 +1,31 @@ +c Test inplace calculations in array c, by squaring all its values. +c As a sanity check on the input, stores the original content in copy. + subroutine inplace(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end + + subroutine inplace_out(c, m1, m2, copy) + integer*4 m1, m2, i, j + real*4 c(m1, m2), copy(m1, m2) +cf2py intent(inplace, out) c +cf2py intent(out) copy +cf2py integer, depend(c), intent(hide) :: m1 = len(c) +cf2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + do i=1,m1 + do j=1,m2 + copy(i, j) = c(i, j) + c(i, j) = c(i, j) ** 2 + end do + end do + end diff --git a/numpy/f2py/tests/test_inplace.py b/numpy/f2py/tests/test_inplace.py new file mode 100644 index 000000000000..296af84f94ef --- /dev/null +++ b/numpy/f2py/tests/test_inplace.py @@ -0,0 +1,43 @@ + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal + + +@pytest.mark.slow +class TestInplace(util.F2PyTest): + sources = [util.getpath("tests", "src", "inplace", "foo.f")] + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + @pytest.mark.parametrize("writeable", ["writeable", "readonly"]) + @pytest.mark.parametrize("view", [ + None, (), (slice(None, 2, None), slice(None, None, 2))]) + @pytest.mark.parametrize("dtype", ["f4", "f8"]) + def test_inplace(self, dtype, view, writeable, func): + # Test inplace modifications of an input array. + a = np.arange(12.0, dtype=dtype).reshape((3, 4)).copy() + a.flags.writeable = writeable == "writeable" + k = a if view is None else a[view] + + ffunc = getattr(self.module, func) + if not a.flags.writeable: + with pytest.raises(ValueError, match="WRITEBACKIFCOPY base is read-only"): + ffunc(k) + return + + ref_k = k + exp_copy = k.copy() + exp_k = k ** 2 + exp_a = a.copy() + exp_a[view or ()] = exp_k + if func == "inplace_out": + kout, copy = ffunc(k) + assert kout is k + else: + copy = ffunc(k) + assert_array_equal(copy, exp_copy) + assert k is ref_k + assert np.allclose(k, exp_k) + assert np.allclose(a, exp_a) From b062080a335af4a576590342713d8039ffbd57bd Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 15:35:25 -0400 Subject: [PATCH 1374/1718] MAINT,TST,BUG: Adjust tests for using WRITEBACKIFCOPY --- .../tests/src/array_from_pyobj/wrapmodule.c | 27 ++++++++ numpy/f2py/tests/test_array_from_pyobj.py | 66 +++++++++++++++---- 2 files changed, 81 insertions(+), 12 deletions(-) diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index 25866f1a40ec..99bfca3322c9 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -120,10 +120,37 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_ITEMSIZE(arr)); } +static char doc_f2py_rout_wrap_resolve_write_back_if_copy[] = "\ +Function signature:\n\ + resolvewritebackifcopy(arr)\n\ + Calls PyArray_ResolveWriteBackIfCopy\n\ +Required arguments:\n" +" arr : input array object\n" +"Return objects:\n" +" return_code : int\n" +; +static PyObject *f2py_rout_wrap_resolve_write_back_if_copy(PyObject *capi_self, + PyObject *capi_args) { + PyObject *arr_capi = Py_None; + PyArrayObject *arr = NULL; + if (!PyArg_ParseTuple(capi_args,"O!|:wrap.resolve_write_back_if_copy", + &PyArray_Type,&arr_capi)) { + return NULL; + } + arr = (PyArrayObject *)arr_capi; + int res = PyArray_ResolveWritebackIfCopy(arr); + if (res < 0) { + return NULL; + } + return Py_BuildValue("i",res); +} + static PyMethodDef f2py_module_methods[] = { {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {"resolve_write_back_if_copy",f2py_rout_wrap_resolve_write_back_if_copy, + METH_VARARGS,doc_f2py_rout_wrap_resolve_write_back_if_copy}, {NULL,NULL} }; diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 15383e9431cc..7649954c8d53 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -321,7 +321,17 @@ def __init__(self, typ, dims, intent, obj): assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( self.arr_attr[5], self.pyarr_attr[5] )) # descr - assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + arr_flags = self.arr_attr[6] + if intent.is_intent("inplace") and not ( + obj.dtype == typ and obj.flags["F_CONTIGUOUS"] + ): + assert flags2names(8192) == ["WRITEBACKIFCOPY"] + assert (arr_flags & 8192), f"{flags2names(8192)} not set." + arr_flags -= 8192 # Not easy to set on pyarr. + else: + assert not (arr_flags & 8192) + + assert arr_flags == self.pyarr_attr[6], repr(( self.arr_attr[6], self.pyarr_attr[6], flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), @@ -651,14 +661,34 @@ def test_inplace(self): assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) + # Spot check that they contain the same information initially. assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + # If we change a.arr, that will not immediatetly be reflected in obj. + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + # This is because our implementation uses writebackifcopy. + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # It has a different organization from obj. + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + # If we resolve the write-back, obj will be propertly filled. + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Check that the original's attributes are not messed up. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + + def test_inplace_f_order(self): + # If the input array is suitable, it will just be used. + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + assert obj.flags["FORTRAN"] and not obj.flags["CONTIGUOUS"] + a = self.array(obj.shape, intent.inplace, obj) assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes are changed inplace! - assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): + # Similar to above, but including casting. for t in self.type.cast_types(): if t is self.type: continue @@ -669,10 +699,22 @@ def test_inplace_from_casttype(self): shape = obj.shape a = self.array(shape, intent.inplace, obj) assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - a.arr[1][2] = 54 - assert obj[1][2] == a.arr[1][2] == np.array(54, - dtype=self.type.dtype) - assert a.arr is obj - assert obj.flags["FORTRAN"] # obj attributes changed inplace! - assert not obj.flags["CONTIGUOUS"] - assert obj.dtype.type is self.type.type # obj changed inplace! + change_item = 54 if self.type.dtype != bool else False + a.arr[1][2] = change_item + assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) + # Not yet propagated. + assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) + assert a.arr.flags["WRITEBACKIFCOPY"] + assert a.arr.base is obj + # Propagate back to obj, ignoring warnings about loosing .imag. + if (np.issubdtype(a.arr.dtype, np.complexfloating) + and not np.issubdtype(t.dtype, np.complexfloating)): + with pytest.warns(np.exceptions.ComplexWarning): + code = wrap.resolve_write_back_if_copy(a.arr) + else: + code = wrap.resolve_write_back_if_copy(a.arr) + assert code == 1, "no write-back resolution was done!" + assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) + # Should not affect attributes. + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + assert obj.dtype.type is not self.type.type From 863e307509fda925dd101ee4abbcd09841df0df9 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 16:03:16 -0400 Subject: [PATCH 1375/1718] DOC: update description of the effects of intent(inplace) --- doc/source/f2py/f2py.getting-started.rst | 5 ++--- doc/source/f2py/python-usage.rst | 5 +++-- doc/source/f2py/signature-file.rst | 12 ++++-------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index b6951b11da8d..e5df85b93a75 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -154,9 +154,8 @@ Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: Clearly, this is unexpected, as Fortran typically passes by reference. That the above example worked with ``dtype=float`` is considered accidental. - F2PY provides an ``intent(inplace)`` attribute that modifies the attributes - of an input array so that any changes made by the Fortran routine will be - reflected in the input argument. For example, if one specifies the + F2PY provides an ``intent(inplace)`` attribute that ensures that changes + are copied back to the input argument. For example, if one specifies the ``intent(inplace) a`` directive (see :ref:`f2py-attributes` for details), then the example above would read:: diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 8c68b6e03e2e..fa9af7b97e57 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -115,8 +115,9 @@ two notable exceptions: * ``intent(inout)`` array arguments must always be :term:`proper-contiguous ` and have a compatible ``dtype``, otherwise an exception is raised. -* ``intent(inplace)`` array arguments will be changed *in situ* if the argument - has a different type than expected (see the ``intent(inplace)`` +* ``intent(inplace)`` array arguments must be arrays. If these have + incompatible order or type, a converted copy is passed in, which is + copied back into the original array on exit (see the ``intent(inplace)`` :ref:`attribute ` for more information). In general, if a NumPy array is :term:`proper-contiguous ` and has diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index ba370d73582b..ebb05b48e0d5 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -393,17 +393,13 @@ The following attributes can be used by F2PY. The corresponding argument is considered to be an input/output or *in situ* output argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper size. If the type of an array is not "proper" or the array is - non-contiguous then the array will be modified in-place to fix the type and - make it contiguous. + non-contiguous then the routine will be passed a fixed copy of array, + which has the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, so that the + result will be copied back to the original array on exit. .. note:: - Using ``intent(inplace)`` is generally not recommended either. - - For example, when slices have been taken from an ``intent(inplace)`` argument - then after in-place changes, the data pointers for the slices may point to - an unallocated memory area. - + ``intent(inplace)`` is recommended over ``inout``, but not over ``in,out``. * ``out`` The corresponding argument is considered to be a return variable. It is appended to the From 04ec8dd189b627c8206b3ab7580eab7cf80664e6 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 11 Oct 2025 16:15:40 -0400 Subject: [PATCH 1376/1718] DOC: what's new entry --- doc/release/upcoming_changes/29929.improvement.rst | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 doc/release/upcoming_changes/29929.improvement.rst diff --git a/doc/release/upcoming_changes/29929.improvement.rst b/doc/release/upcoming_changes/29929.improvement.rst new file mode 100644 index 000000000000..20c09ec5acd9 --- /dev/null +++ b/doc/release/upcoming_changes/29929.improvement.rst @@ -0,0 +1,9 @@ +For ``f2py``, the behaviour of ``intent(inplace)`` has improved. +Previously, if an input array did not have the right dtype or order, +the input array was modified in-place, changing its dtype and +replacing its data by a corrected copy. Now, instead, the corrected +copy is kept a separate array, which, after being passed and +presumably modified by the fortran routine, is copied back to the +input routine. This means one no longer has the risk that pre-existing +views or slices of the input array start pointing to unallocated +memory. From 6b1d095a56ee617d3d3802e924df09e39a085dc3 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Fri, 6 Feb 2026 22:30:01 +0100 Subject: [PATCH 1377/1718] MAINT: Use intermediate variable to be more readable --- numpy/_core/arrayprint.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index e90334db35e0..957687488829 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -1415,10 +1415,11 @@ def __call__(self, x): return super().__call__(x) def _format_non_nat(self, x): - return f"'{datetime_as_string(x, - unit=self.unit, - timezone=self.timezone, - casting=self.casting)}'" + datetime_str = datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + return f"'{datetime_str}'" class TimedeltaFormat(_TimelikeFormat): From b454fedab2d8fc30ea59a36702ff8c1a91810d73 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Wed, 10 Dec 2025 09:38:02 -0500 Subject: [PATCH 1378/1718] MAINT: Enforce same kind array for intent(inplace) --- .../upcoming_changes/29929.improvement.rst | 16 +++++++++--- doc/source/f2py/python-usage.rst | 2 +- doc/source/f2py/signature-file.rst | 5 ++-- numpy/f2py/src/fortranobject.c | 17 ++++++++++++- numpy/f2py/tests/test_array_from_pyobj.py | 25 +++++++++++++------ numpy/f2py/tests/test_inplace.py | 6 +++++ 6 files changed, 56 insertions(+), 15 deletions(-) diff --git a/doc/release/upcoming_changes/29929.improvement.rst b/doc/release/upcoming_changes/29929.improvement.rst index 20c09ec5acd9..633d8768a03f 100644 --- a/doc/release/upcoming_changes/29929.improvement.rst +++ b/doc/release/upcoming_changes/29929.improvement.rst @@ -4,6 +4,16 @@ the input array was modified in-place, changing its dtype and replacing its data by a corrected copy. Now, instead, the corrected copy is kept a separate array, which, after being passed and presumably modified by the fortran routine, is copied back to the -input routine. This means one no longer has the risk that pre-existing -views or slices of the input array start pointing to unallocated -memory. +input routine. The above means one no longer has the risk that +pre-existing views or slices of the input array start pointing to +unallocated memory (at the price of increased overhead for the +write-back copy at the end of the call). + +A potential problem would be that one might get very different results +if one, e.g., previously passed in an integer array where a double +array was expected: the writeback to integer would likely give wrong +results. To avoid such situations, ``intent(inplace)`` will now only +allow arrays that have equivalent type to that used in the fortran +routine, i.e., ``dtype.kind`` is the same. For instance, a routine +expecting double would be able to receive float, but would raise on +integer input. diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index fa9af7b97e57..ecffd695e05a 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -116,7 +116,7 @@ two notable exceptions: :term:`proper-contiguous ` and have a compatible ``dtype``, otherwise an exception is raised. * ``intent(inplace)`` array arguments must be arrays. If these have - incompatible order or type, a converted copy is passed in, which is + incompatible order or size, a converted copy is passed in, which is copied back into the original array on exit (see the ``intent(inplace)`` :ref:`attribute ` for more information). diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst index ebb05b48e0d5..3ac47b113745 100644 --- a/doc/source/f2py/signature-file.rst +++ b/doc/source/f2py/signature-file.rst @@ -392,14 +392,15 @@ The following attributes can be used by F2PY. * ``inplace`` The corresponding argument is considered to be an input/output or *in situ* output argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper - size. If the type of an array is not "proper" or the array is + size. If the size of an array is not "proper" or the array is non-contiguous then the routine will be passed a fixed copy of array, which has the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, so that the result will be copied back to the original array on exit. .. note:: - ``intent(inplace)`` is recommended over ``inout``, but not over ``in,out``. + Since copies may be made, ``intent(inplace)`` can be slower than expected. + It is recommended over ``inout``, but not over ``in,out``. * ``out`` The corresponding argument is considered to be a return variable. It is appended to the diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index e7a9b42989d3..f368b18292d7 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -1026,13 +1026,28 @@ ndarray_from_pyobj(const int type_num, return NULL; } - /* here we have always intent(in) or intent(inplace) */ + /* + * Here, we have always intent(in) or intent(inplace) + * and require a copy for input. We allow arbitrary casting for + * input, but for inplace we check that the types are equivalent. + */ { int flags = NPY_ARRAY_FORCECAST | NPY_ARRAY_ENSURECOPY | ((intent & F2PY_INTENT_C) ? NPY_ARRAY_IN_ARRAY : NPY_ARRAY_IN_FARRAY); if (intent & F2PY_INTENT_INPLACE) { + if (!(ARRAY_ISCOMPATIBLE(arr, type_num)) || + (PyArray_ISSIGNED(arr) && PyTypeNum_ISUNSIGNED(type_num)) || + (PyArray_ISUNSIGNED(arr) && PyTypeNum_ISSIGNED(type_num)) + ) { + sprintf(mess, "failed to initialize intent(inplace) array" + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } flags |= NPY_ARRAY_WRITEBACKIFCOPY; } /* Steals reference to descr */ diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 7649954c8d53..9046f2df4fa4 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -697,7 +697,21 @@ def test_inplace_from_casttype(self): assert obj.dtype.type is not self.type.type assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape - a = self.array(shape, intent.inplace, obj) + same_kind = obj.dtype.kind == self.type.dtype.kind + # We avoid pytest.raises here since if the error is not raised, + # we need to do the callback to avoid a runtime warning. + try: + a = self.array(shape, intent.inplace, obj) + except ValueError as exc: + assert not same_kind, "Array not created while having same kind" + assert "not compatible" in str(exc) + return + + if not same_kind: + # Shouldn't happen! Resolve write-back to get right error. + wrap.resolve_write_back_if_copy(a.arr) + assert same_kind, "Array created despite not having same kind" + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) change_item = 54 if self.type.dtype != bool else False a.arr[1][2] = change_item @@ -706,13 +720,8 @@ def test_inplace_from_casttype(self): assert obj[1][2] != np.array(change_item, dtype=self.type.dtype) assert a.arr.flags["WRITEBACKIFCOPY"] assert a.arr.base is obj - # Propagate back to obj, ignoring warnings about loosing .imag. - if (np.issubdtype(a.arr.dtype, np.complexfloating) - and not np.issubdtype(t.dtype, np.complexfloating)): - with pytest.warns(np.exceptions.ComplexWarning): - code = wrap.resolve_write_back_if_copy(a.arr) - else: - code = wrap.resolve_write_back_if_copy(a.arr) + # Propagate back to obj. + code = wrap.resolve_write_back_if_copy(a.arr) assert code == 1, "no write-back resolution was done!" assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) # Should not affect attributes. diff --git a/numpy/f2py/tests/test_inplace.py b/numpy/f2py/tests/test_inplace.py index 296af84f94ef..35af2a42db6a 100644 --- a/numpy/f2py/tests/test_inplace.py +++ b/numpy/f2py/tests/test_inplace.py @@ -41,3 +41,9 @@ def test_inplace(self, dtype, view, writeable, func): assert k is ref_k assert np.allclose(k, exp_k) assert np.allclose(a, exp_a) + + @pytest.mark.parametrize("func", ["inplace", "inplace_out"]) + def test_inplace_error(self, func): + ffunc = getattr(self.module, func) + with pytest.raises(ValueError, match="input.*not compatible"): + ffunc(np.array([1 + 1j])) From 79f2d32e43723ef6c3af5d29799d0828c6280216 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 8 Feb 2026 17:40:29 +0100 Subject: [PATCH 1379/1718] DEP: deprecate ``np.char.[as]array`` --- doc/release/upcoming_changes/30802.deprecation.rst | 1 + numpy/_core/defchararray.py | 8 ++++++++ numpy/_core/defchararray.pyi | 14 ++++++++++++++ numpy/_core/tests/test_defchararray.py | 3 ++- numpy/char/__init__.py | 8 +++----- 5 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/30802.deprecation.rst diff --git a/doc/release/upcoming_changes/30802.deprecation.rst b/doc/release/upcoming_changes/30802.deprecation.rst new file mode 100644 index 000000000000..82fe6672b885 --- /dev/null +++ b/doc/release/upcoming_changes/30802.deprecation.rst @@ -0,0 +1 @@ +* The ``numpy.char.[as]array`` functions are deprecated. Use an ``numpy.[as]array`` with a string or bytes dtype instead. diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 61274eb1a4a0..5883bb6be5f5 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -1219,6 +1219,10 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None): """ Create a `~numpy.char.chararray`. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + .. note:: This class is provided for numarray backward-compatibility. New code (not concerned with numarray compatibility) should use @@ -1363,6 +1367,10 @@ def asarray(obj, itemsize=None, unicode=None, order=None): Convert the input to a `~numpy.char.chararray`, copying the data only if necessary. + .. deprecated:: 2.5 + ``chararray`` is deprecated. Use an ``ndarray`` with a string or + bytes dtype instead. + Versus a NumPy array of dtype `bytes_` or `str_`, this class adds the following functionality: diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index b339b50c37aa..1fb086a3d451 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -661,6 +661,7 @@ def str_len(A: UST_co) -> NDArray[int_]: ... # overload 5 and 6: arbitrary object with unicode=True (-> str_) # overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: U_co, itemsize: int | None = None, @@ -669,6 +670,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: S_co, itemsize: int | None = None, @@ -677,6 +679,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None, @@ -685,6 +688,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -694,6 +698,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None, @@ -702,6 +707,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -711,6 +717,7 @@ def array( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.array is deprecated and will be removed in a future release.") def array( obj: object, itemsize: int | None = None, @@ -720,6 +727,7 @@ def array( ) -> _CharArray[str_] | _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: U_co, itemsize: int | None = None, @@ -727,6 +735,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: S_co, itemsize: int | None = None, @@ -734,6 +743,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None, @@ -741,6 +751,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, @@ -749,6 +760,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[bytes_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None, @@ -756,6 +768,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, @@ -764,6 +777,7 @@ def asarray( order: _OrderKACF = None, ) -> _CharArray[str_]: ... @overload +@deprecated("numpy.char.asarray is deprecated and will be removed in a future release.") def asarray( obj: object, itemsize: int | None = None, diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index 643293580708..33db8747e0e6 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -14,10 +14,11 @@ kw_unicode_false = {'unicode': False} ignore_charray_deprecation = pytest.mark.filterwarnings( - r"ignore:\w+ chararray \w+:DeprecationWarning" + r"ignore:\w+ (chararray|array|asarray) \w+:DeprecationWarning" ) +@ignore_charray_deprecation class TestBasic: def test_from_object_array(self): A = np.array([['abc', 2], diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index dd6c36c84451..a757fcee58ac 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -1,8 +1,10 @@ from numpy._core.defchararray import __all__, __doc__ +__DEPRECATED = frozenset({"chararray", "array", "asarray"}) + def __getattr__(name: str): - if name == "chararray": + if name in __DEPRECATED: # Deprecated in NumPy 2.5, 2026-01-07 import warnings @@ -15,10 +17,6 @@ def __getattr__(name: str): stacklevel=2, ) - from numpy._core.defchararray import chararray - - return chararray - import numpy._core.defchararray as char if (export := getattr(char, name, None)) is not None: From e247dcbd44d8537277addb3e8815cea667d662f7 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 9 Feb 2026 12:09:16 +0100 Subject: [PATCH 1380/1718] NEP: add Kumar Adity to the free-threaded CPython platform maintainers --- doc/neps/nep-0057-numpy-platform-support.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 309df1916178..1e76b49fd4b7 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -187,7 +187,7 @@ Tier 2 platforms: - Linux x86-64 (musllinux), *Ralf Gommers* - Linux aarch64 (musllinux), *Ralf Gommers* -- Free-threaded CPython, *Nathan Goldbaum, Ralf Gommers* +- Free-threaded CPython, *Nathan Goldbaum, Kumar Aditya, Ralf Gommers* Tier 3 From 8167d51b451d4d773c5404492aa8f0ca44583fc3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 9 Feb 2026 12:22:35 +0100 Subject: [PATCH 1381/1718] NEP: change lists of platforms to reST tables [skip actions] [skip cirrus] [skip azp] --- doc/neps/nep-0057-numpy-platform-support.rst | 90 +++++++++++++++----- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 1e76b49fd4b7..81be3ea5baaf 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -164,13 +164,24 @@ Tier 1 Tier 1 platforms: -- Windows x86-64 -- Windows arm64 -- Windows x86 (32-bit Python: note this is shipped without BLAS, it's legacy) -- Linux x86-64 (manylinux) -- Linux aarch64 (manylinux) -- macOS arm64 -- macOS x86-64 (expected to move to unsupported by 2027/28 once the platform is dropped by GitHub) ++---------------------------+--------------------------------------------------------------------------+ +| Platform | Notes | ++===========================+==========================================================================+ +| Windows x86-64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| Windows x86 | 32-bit Python: note this is shipped without BLAS, it's legacy | ++---------------------------+--------------------------------------------------------------------------+ +| Linux x86-64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| Linux aarch64 (manylinux) | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS arm64 | | ++---------------------------+--------------------------------------------------------------------------+ +| macOS x86-64 | Expected to move to unsupported by 2027/28 once the platform is dropped | +| | by GitHub | ++---------------------------+--------------------------------------------------------------------------+ Tier 2 @@ -185,9 +196,16 @@ Tier 2 Tier 2 platforms: -- Linux x86-64 (musllinux), *Ralf Gommers* -- Linux aarch64 (musllinux), *Ralf Gommers* -- Free-threaded CPython, *Nathan Goldbaum, Kumar Aditya, Ralf Gommers* ++---------------------------+-------+------------------------------------------+ +| Platform | Notes | Contacts | ++===========================+=======+==========================================+ +| Linux x86-64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Linux aarch64 (musllinux) | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ +| Free-threaded CPython | | Nathan Goldbaum, Kumar Aditya, | +| | | Ralf Gommers | ++---------------------------+-------+------------------------------------------+ Tier 3 @@ -204,9 +222,17 @@ Tier 3 Tier 3 platforms: -- FreeBSD (note: runs on Cirrus CI), *Ralf Gommers* -- Linux ppc64le (note: runs on IBM-provided self-hosted runners), *Sandeep Gupta* -- Emscripten/Pyodide, *Agriya Khetarpal, Gyeongjae Choi* ++--------------------+----------------------------------------+----------------------------------+ +| Platform | Notes | Contacts | ++====================+========================================+==================================+ +| FreeBSD | Runs on Cirrus CI | Ralf Gommers | ++--------------------+----------------------------------------+----------------------------------+ +| Linux ppc64le | Runs on IBM-provided self-hosted | Sandeep Gupta | +| | runners, see gh-22318_ | | ++--------------------+----------------------------------------+----------------------------------+ +| Emscripten/Pyodide | We provide nightly wheels, used for | Agriya Khetarpal, Gyeongjae Choi | +| | interactive docs | | ++--------------------+----------------------------------------+----------------------------------+ Unsupported platforms @@ -226,17 +252,31 @@ they: Unsupported platforms (previously in a supported tier, may be an incomplete list): -- PyPy -- macOS ppc64, universal, universal2 -- Linux i686 -- Linux on IBM Z (s390x) ++------------------------------------+--------------------------------+ +| Platform | Notes | ++====================================+================================+ +| PyPy | See gh-30416_ | ++------------------------------------+--------------------------------+ +| macOS ppc64, universal, universal2 | | ++------------------------------------+--------------------------------+ +| Linux i686 | Dropped in 1.22.0, low demand | ++------------------------------------+--------------------------------+ +| Linux on IBM Z (s390x) | CI jobs used to run on TravisCI| ++------------------------------------+--------------------------------+ Unsupported platforms (known interest in moving to a higher tier): -- iOS -- Android -- RISC-V -- WASI ++----------+------------------+ +| Platform | Notes | ++==========+==================+ +| iOS | See gh-28759_ | ++----------+------------------+ +| Android | See gh-30412_ | ++----------+------------------+ +| RISC-V | See gh-30216_ | ++----------+------------------+ +| WASI | See gh-25859_ | ++----------+------------------+ Backward compatibility @@ -271,6 +311,14 @@ Discussion References and footnotes ------------------------ +.. _gh-22318: https://github.com/numpy/numpy/issues/22318 +.. _gh-22530: https://github.com/numpy/numpy/issues/22530 +.. _gh-25859: https://github.com/numpy/numpy/issues/25859 +.. _gh-28759: https://github.com/numpy/numpy/pull/28759 +.. _gh-30216: https://github.com/numpy/numpy/issues/30216 +.. _gh-30412: https://github.com/numpy/numpy/pull/30412 +.. _gh-30416: https://github.com/numpy/numpy/issues/30416 + Copyright --------- From 938450b001d6dce6b19a7d7ac854c5e69348f2fb Mon Sep 17 00:00:00 2001 From: mdrdope <127673676+mdrdope@users.noreply.github.com> Date: Mon, 9 Feb 2026 18:40:34 +0000 Subject: [PATCH 1382/1718] PERF: speed up array_equal for equal_nan=True --- numpy/_core/numeric.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index fd9737a3fe72..07a0966096bf 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2471,7 +2471,6 @@ def _array_equal_dispatcher(a1, a2, equal_nan=None): def _dtype_cannot_hold_nan(dtype): return type(dtype) in _no_nan_types - @array_function_dispatch(_array_equal_dispatcher) def array_equal(a1, a2, equal_nan=False): """ @@ -2549,14 +2548,10 @@ def array_equal(a1, a2, equal_nan=False): if cannot_have_nan: return builtins.bool(asarray(a1 == a2).all()) - # Handling NaN values if equal_nan is True - a1nan, a2nan = isnan(a1), isnan(a2) - # NaN's occur at different locations - if not (a1nan == a2nan).all(): - return False - # Shapes of a1, a2 and masks are guaranteed to be consistent by this point - return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) - + # Optimized NaN handling: single vectorized predicate + reduction + # replaces the original boolean-mask + fancy-index approach + equal_or_both_nan = (a1 == a2) | (isnan(a1) & isnan(a2)) + return builtins.bool(equal_or_both_nan.all()) def _array_equiv_dispatcher(a1, a2): return (a1, a2) From 0eeffd47ff227ba4ff5a5fdcde92208cdaee6794 Mon Sep 17 00:00:00 2001 From: mdrdope <127673676+mdrdope@users.noreply.github.com> Date: Tue, 10 Feb 2026 13:27:15 +0000 Subject: [PATCH 1383/1718] Improve NaN comparison logic in array_equal Optimize NaN handling in array_equal function. --- numpy/_core/numeric.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 07a0966096bf..ead7a56a9f0f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2471,6 +2471,7 @@ def _array_equal_dispatcher(a1, a2, equal_nan=None): def _dtype_cannot_hold_nan(dtype): return type(dtype) in _no_nan_types + @array_function_dispatch(_array_equal_dispatcher) def array_equal(a1, a2, equal_nan=False): """ @@ -2548,11 +2549,15 @@ def array_equal(a1, a2, equal_nan=False): if cannot_have_nan: return builtins.bool(asarray(a1 == a2).all()) - # Optimized NaN handling: single vectorized predicate + reduction - # replaces the original boolean-mask + fancy-index approach - equal_or_both_nan = (a1 == a2) | (isnan(a1) & isnan(a2)) + # fast path for a1 and a2 being all NaN arrays + a1nan = isnan(a1) + if a1nan.all(): + return builtins.bool(isnan(a2).all()) + + equal_or_both_nan = (a1 == a2) | (a1nan & isnan(a2)) return builtins.bool(equal_or_both_nan.all()) + def _array_equiv_dispatcher(a1, a2): return (a1, a2) From 4656d66f2c976a517ebfb5b7bbd97d0261850bc9 Mon Sep 17 00:00:00 2001 From: mdrdope <127673676+mdrdope@users.noreply.github.com> Date: Tue, 10 Feb 2026 13:28:12 +0000 Subject: [PATCH 1384/1718] Fix comment capitalization for clarity --- numpy/_core/numeric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index ead7a56a9f0f..f52d8cdbad8f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2549,7 +2549,7 @@ def array_equal(a1, a2, equal_nan=False): if cannot_have_nan: return builtins.bool(asarray(a1 == a2).all()) - # fast path for a1 and a2 being all NaN arrays + # Fast path for a1 and a2 being all NaN arrays a1nan = isnan(a1) if a1nan.all(): return builtins.bool(isnan(a2).all()) From e54b05ba0b241bd2d34f3ccf19d2e7cb4a4fe794 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 10 Feb 2026 14:42:25 +0100 Subject: [PATCH 1385/1718] address review comments from Matti [skip actions] [skip cirrus] [skip azp] --- doc/neps/nep-0057-numpy-platform-support.rst | 51 +++++++++++--------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 81be3ea5baaf..1a652410c2bb 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -61,15 +61,17 @@ CPython.* Platform support is broken down into tiers. Each tier comes with different requirements which lead to different promises being made about support. -To be promoted to a tier, steering council support is required and is expected -to be driven by team consensus. Demotion to a lower tier occurs when the -requirements of the current tier are no longer met for a platform for an -extended period of time based on the judgment of the steering council. For -platforms which no longer meet the requirements of any tier by the middle of a -new feature release cycle, an announcement will be made to warn the community -of the pending removal of support for the platform. If the platform is not -brought into line for at least one of the tiers by the first release candidate, -it will be listed as unsupported in this NEP. +To be promoted to a tier, +`Steering council +`__ +support is required and is expected to be driven by team consensus. Demotion to +a lower tier occurs when the requirements of the current tier are no longer met +for a platform for an extended period of time based on the judgment of the +Steering Council. For platforms which no longer meet the requirements of any +tier by the middle of a new feature release cycle, an announcement will be made +to warn the community of the pending removal of support for the platform. If +the platform is not brought into line for at least one of the tiers by the +first release candidate, it will be listed as unsupported in this NEP. General principles @@ -113,7 +115,7 @@ General principles case-by-case basis. 5. For all platforms in any supported tier: the relevant prerequisites in our - dependencies have been met. E.g., build tools have support, and for wheels + dependencies must be met. E.g., build tools have support, and for wheels there is support in CPython, PyPI, cibuildwheel, manylinux, and ``scipy-openblas64`` or another easily-integrated BLAS library. @@ -155,7 +157,7 @@ Tier 1 ~~~~~~ - Must have regular CI support on GitHub or (exceptionally) through another - well-integrated CI platform that the release team and steering council deem + well-integrated CI platform that the release team and Steering Council deem acceptable. - The NumPy team releases wheels on PyPI for this platform. - CI failures (either regular CI or wheel build CI) block releases. @@ -230,8 +232,8 @@ Tier 3 platforms: | Linux ppc64le | Runs on IBM-provided self-hosted | Sandeep Gupta | | | runners, see gh-22318_ | | +--------------------+----------------------------------------+----------------------------------+ -| Emscripten/Pyodide | We provide nightly wheels, used for | Agriya Khetarpal, Gyeongjae Choi | -| | interactive docs | | +| Emscripten/Pyodide | We currently provide nightly wheels, | Agriya Khetarpal, Gyeongjae Choi | +| | used for interactive docs | | +--------------------+----------------------------------------+----------------------------------+ @@ -252,17 +254,18 @@ they: Unsupported platforms (previously in a supported tier, may be an incomplete list): -+------------------------------------+--------------------------------+ -| Platform | Notes | -+====================================+================================+ -| PyPy | See gh-30416_ | -+------------------------------------+--------------------------------+ -| macOS ppc64, universal, universal2 | | -+------------------------------------+--------------------------------+ -| Linux i686 | Dropped in 1.22.0, low demand | -+------------------------------------+--------------------------------+ -| Linux on IBM Z (s390x) | CI jobs used to run on TravisCI| -+------------------------------------+--------------------------------+ ++------------------------------------+--------------------------------------------------+ +| Platform | Notes | ++====================================+==================================================+ +| PyPy | Was Tier 2 until the 2.4.x releases, see | +| | gh-30416_ | ++------------------------------------+--------------------------------------------------+ +| macOS ppc64, universal, universal2 | | ++------------------------------------+--------------------------------------------------+ +| Linux i686 | Dropped in 1.22.0, low demand | ++------------------------------------+--------------------------------------------------+ +| Linux on IBM Z (s390x) | CI jobs used to run on TravisCI | ++------------------------------------+--------------------------------------------------+ Unsupported platforms (known interest in moving to a higher tier): From ff80af12ace3878763e738db9d9c51390dd79f69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 17:57:21 +0000 Subject: [PATCH 1386/1718] MAINT: Bump ruff from 0.14.14 to 0.15.0 in /requirements Bumps [ruff](https://github.com/astral-sh/ruff) from 0.14.14 to 0.15.0. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.14.14...0.15.0) --- updated-dependencies: - dependency-name: ruff dependency-version: 0.15.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 81088887a0ee..fcc9af70a93e 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.14.14 +ruff==0.15.0 GitPython>=3.1.30 spin From 169c9959a3eb42cccfe85c28cfd5a1665024d064 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 10 Feb 2026 11:11:14 -0700 Subject: [PATCH 1387/1718] MAINT: disable Cython bindings for Cython < 3.0.0. (#30770) --- .../upcoming_changes/30770.compatibility.rst | 28 + doc/source/user/c-info.python-as-glue.rst | 110 +- numpy/__init__.pxd | 1155 +---------------- 3 files changed, 123 insertions(+), 1170 deletions(-) create mode 100644 doc/release/upcoming_changes/30770.compatibility.rst diff --git a/doc/release/upcoming_changes/30770.compatibility.rst b/doc/release/upcoming_changes/30770.compatibility.rst new file mode 100644 index 000000000000..a1987f77e4c5 --- /dev/null +++ b/doc/release/upcoming_changes/30770.compatibility.rst @@ -0,0 +1,28 @@ +Cython support +-------------- + +NumPy's Cython headers (accessed via ``cimport numpy``) now require +Cython 3.0 or newer to build. If you try to compile a project that depends on +NumPy's Cython headers using Cython 0.29 or older, you will see a message like +this: + +:: + + Error compiling Cython file: + ------------------------------------------------------------ + ... + # versions. + # + # See __init__.cython-30.pxd for the real Cython header + # + + DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') + ------------------------------------------------------------ + + /path/to/site-packages/numpy/__init__.pxd:11:13: Error in compile-time expression: ValueError: invalid literal for int() with base 10: 'Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.' + + +Note that the invalid integer is not a bug in NumPy - we are intentionally +generating this error to avoid triggering a more obscure error later in the +build when an older Cython version tries to use a Cython feature that was not +available in the old Cython version. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 20d3f1bb5937..75fca544d9e5 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -159,34 +159,102 @@ work with multidimensional arrays. Notice that Cython is an extension-module generator only. Unlike f2py, it includes no automatic facility for compiling and linking -the extension module (which must be done in the usual fashion). It -does provide a modified distutils class called ``build_ext`` which lets -you build an extension module from a ``.pyx`` source. Thus, you could -write in a ``setup.py`` file: +the extension module. However, many Python build tools have support for Cython. + +Here is an example of how to set up a Python project that contains a Cython +extension. The example uses the `meson-python Python build backend +`_ and `the meson build system +`_. This is the same build system NumPy itself uses. +. + +First, create a file named ``my_extension.pyx``. + +.. code-block:: cython + + cimport numpy as np + + def say_hello(): + print("Hello!") + +This file lives next to a ``__init__.py`` file with the following content: .. code-block:: python - from Cython.Distutils import build_ext - from distutils.extension import Extension - from distutils.core import setup - import numpy + from .my_extension import say_hello + +Now you need to create two more files to set up the build system. First, a +``meson.build`` file: + +.. code-block:: meson + + project( + 'module_with_extension', + 'c', 'cython', + version: '0.0.1', + license: 'MIT', + ) + + cython = find_program('cython') + py = import('python').find_installation(pure: false) + + numpy_nodepr_api = ['-DNPY_NO_DEPRECATED_API=NPY_2_0_API_VERSION'] + + np_dep = declare_dependency(dependencies: dependency('numpy'), + compile_args: numpy_nodepr_api) + + py.extension_module( + 'my_extension', + 'my_extension.pyx', + dependencies: [np_dep], + install: true, + subdir: 'my_module_with_extension', + ) + + py.install_sources( + '__init__.py', + subdir: 'my_module_with_extension', + ) + +And a ``pyproject.toml`` file with the following content: + +.. code-block:: toml + + [build-system] + build-backend = "mesonpy" + requires = [ + "meson-python", + "Cython>=3.0.0", + "numpy", + ] + + [project] + name = "my_module_with_extension" + version = "0.0.1" + license = "MIT" + dependencies = ["numpy"] + +You should then be able to do the following command to build, install, and call +the function defined in the extension from Python: + +.. code-block:: bash + + $ pip install . + $ python -c "from my_module_with_extension import say_hello; say_hello()" + "Hello!" - setup(name='mine', description='Nothing', - ext_modules=[Extension('filter', ['filter.pyx'], - include_dirs=[numpy.get_include()])], - cmdclass = {'build_ext':build_ext}) +Adding a NumPy dependency to your Meson configuration is only necessary +if you are using the NumPy C API in the extension module via ``cimport +numpy`` (which is what we assume you are using Cython for). If you just +use Cython to compile a standard Python module, then you will get a C +extension module that typically runs a bit faster than the equivalent +Python module. Further speed increases can be gained by using the +``cdef`` keyword to statically define C variables. -Adding the NumPy include directory is, of course, only necessary if -you are using NumPy arrays in the extension module (which is what we -assume you are using Cython for). -If you just use Cython to compile a standard Python module, then you -will get a C extension module that typically runs a bit faster than the -equivalent Python module. Further speed increases can be gained by using -the ``cdef`` keyword to statically define C variables. +See the meson and meson-python documentation for more details on how to +build more complicated extensions. Let's look at two examples we've seen before to see how they might be -implemented using Cython. These examples were compiled into extension -modules using Cython 0.21.1. +implemented using Cython. Complex addition in Cython diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 40a24b6c7cc1..ddb904c1fd68 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -1,1155 +1,12 @@ # NumPy static imports for Cython < 3.0 # -# If any of the PyArray_* functions are called, import_array must be -# called first. +# DO NOT USE OR REFER TO THIS HEADER # -# Author: Dag Sverre Seljebotn +# This is provided only to generate an error message on older Cython +# versions. # - -DEF _buffer_format_string_len = 255 - -cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF -from cpython.mem cimport PyObject_Malloc, PyObject_Free -from cpython.object cimport PyObject, PyTypeObject -from cpython.buffer cimport PyObject_GetBuffer -from cpython.type cimport type -cimport libc.stdio as stdio - - -cdef extern from *: - # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. - # See https://github.com/cython/cython/issues/3573 - """ - /* Using NumPy API declarations from "numpy/__init__.pxd" */ - """ - - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - bint PyObject_TypeCheck(object obj, PyTypeObject* type) - -cdef extern from "numpy/arrayobject.h": - # It would be nice to use size_t and ssize_t, but ssize_t has special - # implicit conversion rules, so just use "long". - # Note: The actual type only matters for Cython promotion, so long - # is closer than int, but could lead to incorrect promotion. - # (Not to worrying, and always the status-quo.) - ctypedef signed long npy_intp - ctypedef unsigned long npy_uintp - - ctypedef unsigned char npy_bool - - ctypedef signed char npy_byte - ctypedef signed short npy_short - ctypedef signed int npy_int - ctypedef signed long npy_long - ctypedef signed long long npy_longlong - - ctypedef unsigned char npy_ubyte - ctypedef unsigned short npy_ushort - ctypedef unsigned int npy_uint - ctypedef unsigned long npy_ulong - ctypedef unsigned long long npy_ulonglong - - ctypedef float npy_float - ctypedef double npy_double - ctypedef long double npy_longdouble - - ctypedef signed char npy_int8 - ctypedef signed short npy_int16 - ctypedef signed int npy_int32 - ctypedef signed long long npy_int64 - - ctypedef unsigned char npy_uint8 - ctypedef unsigned short npy_uint16 - ctypedef unsigned int npy_uint32 - ctypedef unsigned long long npy_uint64 - - ctypedef float npy_float32 - ctypedef double npy_float64 - ctypedef long double npy_float80 - ctypedef long double npy_float96 - ctypedef long double npy_float128 - - ctypedef struct npy_cfloat: - pass - - ctypedef struct npy_cdouble: - pass - - ctypedef struct npy_clongdouble: - pass - - ctypedef struct npy_complex64: - pass - - ctypedef struct npy_complex128: - pass - - ctypedef struct npy_complex160: - pass - - ctypedef struct npy_complex192: - pass - - ctypedef struct npy_complex256: - pass - - ctypedef struct PyArray_Dims: - npy_intp *ptr - int len - - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VSTRING - NPY_VOID - NPY_DATETIME - NPY_TIMEDELTA - NPY_NTYPES_LEGACY - NPY_NOTYPE - - NPY_INT8 - NPY_INT16 - NPY_INT32 - NPY_INT64 - NPY_UINT8 - NPY_UINT16 - NPY_UINT32 - NPY_UINT64 - NPY_FLOAT16 - NPY_FLOAT32 - NPY_FLOAT64 - NPY_FLOAT80 - NPY_FLOAT96 - NPY_FLOAT128 - NPY_COMPLEX64 - NPY_COMPLEX128 - NPY_COMPLEX160 - NPY_COMPLEX192 - NPY_COMPLEX256 - - NPY_INTP - NPY_UINTP - NPY_DEFAULT_INT # Not a compile time constant (normally)! - - ctypedef enum NPY_ORDER: - NPY_ANYORDER - NPY_CORDER - NPY_FORTRANORDER - NPY_KEEPORDER - - ctypedef enum NPY_CASTING: - NPY_NO_CASTING - NPY_EQUIV_CASTING - NPY_SAFE_CASTING - NPY_SAME_KIND_CASTING - NPY_UNSAFE_CASTING - NPY_SAME_VALUE_CASTING - - ctypedef enum NPY_CLIPMODE: - NPY_CLIP - NPY_WRAP - NPY_RAISE - - ctypedef enum NPY_SCALARKIND: - NPY_NOSCALAR, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR - - ctypedef enum NPY_SORTKIND: - NPY_QUICKSORT - NPY_HEAPSORT - NPY_MERGESORT - - ctypedef enum NPY_SEARCHSIDE: - NPY_SEARCHLEFT - NPY_SEARCHRIGHT - - enum: - NPY_ARRAY_C_CONTIGUOUS - NPY_ARRAY_F_CONTIGUOUS - NPY_ARRAY_OWNDATA - NPY_ARRAY_FORCECAST - NPY_ARRAY_ENSURECOPY - NPY_ARRAY_ENSUREARRAY - NPY_ARRAY_ELEMENTSTRIDES - NPY_ARRAY_ALIGNED - NPY_ARRAY_NOTSWAPPED - NPY_ARRAY_WRITEABLE - NPY_ARRAY_WRITEBACKIFCOPY - - NPY_ARRAY_BEHAVED - NPY_ARRAY_BEHAVED_NS - NPY_ARRAY_CARRAY - NPY_ARRAY_CARRAY_RO - NPY_ARRAY_FARRAY - NPY_ARRAY_FARRAY_RO - NPY_ARRAY_DEFAULT - - NPY_ARRAY_IN_ARRAY - NPY_ARRAY_OUT_ARRAY - NPY_ARRAY_INOUT_ARRAY - NPY_ARRAY_IN_FARRAY - NPY_ARRAY_OUT_FARRAY - NPY_ARRAY_INOUT_FARRAY - - NPY_ARRAY_UPDATE_ALL - - cdef enum: - NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x - NPY_RAVEL_AXIS # Used for functions like PyArray_Mean - - ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) - - ctypedef struct PyArray_ArrayDescr: - # shape is a tuple, but Cython doesn't support "tuple shape" - # inside a non-PyObject declaration, so we have to declare it - # as just a PyObject*. - PyObject* shape - - ctypedef struct PyArray_Descr: - pass - - ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type - # Numpy sometimes mutates this without warning (e.g. it'll - # sometimes change "|" to "<" in shared dtype objects on - # little-endian machines). If this matters to you, use - # PyArray_IsNativeByteOrder(dtype.byteorder) instead of - # directly accessing this field. - cdef char byteorder - # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. - # cdef char flags - cdef int type_num - # itemsize/elsize, alignment, fields, names, and subarray must - # use the `PyDataType_*` accessor macros. With Cython 3 you can - # still use getter attributes `dtype.itemsize` - - ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: - # Use through macros - pass - - ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: - cdef int numiter - cdef npy_intp size, index - cdef int nd - cdef npy_intp *dimensions - cdef void **iters - - ctypedef struct PyArrayObject: - # For use in situations where ndarray can't replace PyArrayObject*, - # like PyArrayObject**. - pass - - ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: - cdef __cythonbufferdefaults__ = {"mode": "strided"} - - cdef: - # Only taking a few of the most commonly used and stable fields. - # One should use PyArray_* macros instead to access the C fields. - char *data - int ndim "nd" - npy_intp *shape "dimensions" - npy_intp *strides - dtype descr # deprecated since NumPy 1.7 ! - PyObject* base # NOT PUBLIC, DO NOT USE ! - - - int _import_array() except -1 - # A second definition so _import_array isn't marked as used when we use it here. - # Do not use - subject to change any time. - int __pyx_import_array "_import_array"() except -1 - - # - # Macros from ndarrayobject.h - # - bint PyArray_CHKFLAGS(ndarray m, int flags) nogil - bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil - bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil - bint PyArray_ISCONTIGUOUS(ndarray m) nogil - bint PyArray_ISWRITEABLE(ndarray m) nogil - bint PyArray_ISALIGNED(ndarray m) nogil - - int PyArray_NDIM(ndarray) nogil - bint PyArray_ISONESEGMENT(ndarray) nogil - bint PyArray_ISFORTRAN(ndarray) nogil - int PyArray_FORTRANIF(ndarray) nogil - - void* PyArray_DATA(ndarray) nogil - char* PyArray_BYTES(ndarray) nogil - - npy_intp* PyArray_DIMS(ndarray) nogil - npy_intp* PyArray_STRIDES(ndarray) nogil - npy_intp PyArray_DIM(ndarray, size_t) nogil - npy_intp PyArray_STRIDE(ndarray, size_t) nogil - - PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! - PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! - PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. - int PyArray_FLAGS(ndarray) nogil - void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 - npy_intp PyArray_ITEMSIZE(ndarray) nogil - int PyArray_TYPE(ndarray arr) nogil - - object PyArray_GETITEM(ndarray arr, void *itemptr) - int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 - - bint PyTypeNum_ISBOOL(int) nogil - bint PyTypeNum_ISUNSIGNED(int) nogil - bint PyTypeNum_ISSIGNED(int) nogil - bint PyTypeNum_ISINTEGER(int) nogil - bint PyTypeNum_ISFLOAT(int) nogil - bint PyTypeNum_ISNUMBER(int) nogil - bint PyTypeNum_ISSTRING(int) nogil - bint PyTypeNum_ISCOMPLEX(int) nogil - bint PyTypeNum_ISFLEXIBLE(int) nogil - bint PyTypeNum_ISUSERDEF(int) nogil - bint PyTypeNum_ISEXTENDED(int) nogil - bint PyTypeNum_ISOBJECT(int) nogil - - npy_intp PyDataType_ELSIZE(dtype) nogil - npy_intp PyDataType_ALIGNMENT(dtype) nogil - PyObject* PyDataType_METADATA(dtype) nogil - PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil - PyObject* PyDataType_NAMES(dtype) nogil - PyObject* PyDataType_FIELDS(dtype) nogil - - bint PyDataType_ISBOOL(dtype) nogil - bint PyDataType_ISUNSIGNED(dtype) nogil - bint PyDataType_ISSIGNED(dtype) nogil - bint PyDataType_ISINTEGER(dtype) nogil - bint PyDataType_ISFLOAT(dtype) nogil - bint PyDataType_ISNUMBER(dtype) nogil - bint PyDataType_ISSTRING(dtype) nogil - bint PyDataType_ISCOMPLEX(dtype) nogil - bint PyDataType_ISFLEXIBLE(dtype) nogil - bint PyDataType_ISUSERDEF(dtype) nogil - bint PyDataType_ISEXTENDED(dtype) nogil - bint PyDataType_ISOBJECT(dtype) nogil - bint PyDataType_HASFIELDS(dtype) nogil - bint PyDataType_HASSUBARRAY(dtype) nogil - npy_uint64 PyDataType_FLAGS(dtype) nogil - - bint PyArray_ISBOOL(ndarray) nogil - bint PyArray_ISUNSIGNED(ndarray) nogil - bint PyArray_ISSIGNED(ndarray) nogil - bint PyArray_ISINTEGER(ndarray) nogil - bint PyArray_ISFLOAT(ndarray) nogil - bint PyArray_ISNUMBER(ndarray) nogil - bint PyArray_ISSTRING(ndarray) nogil - bint PyArray_ISCOMPLEX(ndarray) nogil - bint PyArray_ISFLEXIBLE(ndarray) nogil - bint PyArray_ISUSERDEF(ndarray) nogil - bint PyArray_ISEXTENDED(ndarray) nogil - bint PyArray_ISOBJECT(ndarray) nogil - bint PyArray_HASFIELDS(ndarray) nogil - - bint PyArray_ISVARIABLE(ndarray) nogil - - bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil - bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder - bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder - bint PyArray_ISNOTSWAPPED(ndarray) nogil - bint PyArray_ISBYTESWAPPED(ndarray) nogil - - bint PyArray_FLAGSWAP(ndarray, int) nogil - - bint PyArray_ISCARRAY(ndarray) nogil - bint PyArray_ISCARRAY_RO(ndarray) nogil - bint PyArray_ISFARRAY(ndarray) nogil - bint PyArray_ISFARRAY_RO(ndarray) nogil - bint PyArray_ISBEHAVED(ndarray) nogil - bint PyArray_ISBEHAVED_RO(ndarray) nogil - - - bint PyDataType_ISNOTSWAPPED(dtype) nogil - bint PyDataType_ISBYTESWAPPED(dtype) nogil - - bint PyArray_DescrCheck(object) - - bint PyArray_Check(object) - bint PyArray_CheckExact(object) - - # Cannot be supported due to out arg: - # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) - # bint PyArray_HasArrayInterface(op, out) - - - bint PyArray_IsZeroDim(object) - # Cannot be supported due to ## ## in macro: - # bint PyArray_IsScalar(object, verbatim work) - bint PyArray_CheckScalar(object) - bint PyArray_IsPythonNumber(object) - bint PyArray_IsPythonScalar(object) - bint PyArray_IsAnyScalar(object) - bint PyArray_CheckAnyScalar(object) - - ndarray PyArray_GETCONTIGUOUS(ndarray) - bint PyArray_SAMESHAPE(ndarray, ndarray) nogil - npy_intp PyArray_SIZE(ndarray) nogil - npy_intp PyArray_NBYTES(ndarray) nogil - - object PyArray_FROM_O(object) - object PyArray_FROM_OF(object m, int flags) - object PyArray_FROM_OT(object m, int type) - object PyArray_FROM_OTF(object m, int type, int flags) - object PyArray_FROMANY(object m, int type, int min, int max, int flags) - object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) - object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) - void PyArray_FILLWBYTE(ndarray, int val) - object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) - unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) - bint PyArray_EquivByteorders(int b1, int b2) nogil - object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) - #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) - object PyArray_ToScalar(void* data, ndarray arr) - - void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil - void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil - void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil - void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil - - # Cannot be supported due to out arg - # void PyArray_DESCR_REPLACE(descr) - - - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) - - object PyArray_Cast(ndarray mp, int type_num) - object PyArray_Take(ndarray ap, object items, int axis) - object PyArray_Put(ndarray ap, object items, object values) - - void PyArray_ITER_RESET(flatiter it) nogil - void PyArray_ITER_NEXT(flatiter it) nogil - void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil - void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil - void* PyArray_ITER_DATA(flatiter it) nogil - bint PyArray_ITER_NOTDONE(flatiter it) nogil - - void PyArray_MultiIter_RESET(broadcast multi) nogil - void PyArray_MultiIter_NEXT(broadcast multi) nogil - void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil - void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil - void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil - void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil - bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil - npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil - int PyArray_MultiIter_NDIM(broadcast multi) nogil - npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil - int PyArray_MultiIter_NUMITER(broadcast multi) nogil - npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil - void** PyArray_MultiIter_ITERS(broadcast multi) nogil - - # Functions from __multiarray_api.h - - # Functions taking dtype and returning object/ndarray are disabled - # for now as they steal dtype references. I'm conservative and disable - # more than is probably needed until it can be checked further. - int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... - int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - dtype PyArray_DescrFromType (int) - object PyArray_TypeObjectFromType (int) - char * PyArray_Zero (ndarray) - char * PyArray_One (ndarray) - #object PyArray_CastToType (ndarray, dtype, int) - int PyArray_CanCastSafely (int, int) # writes errors - npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors - int PyArray_ObjectType (object, int) except 0 - dtype PyArray_DescrFromObject (object, dtype) - #ndarray* PyArray_ConvertToCommonType (object, int *) - dtype PyArray_DescrFromScalar (object) - dtype PyArray_DescrFromTypeObject (object) - npy_intp PyArray_Size (object) - #object PyArray_Scalar (void *, dtype, object) - #object PyArray_FromScalar (object, dtype) - void PyArray_ScalarAsCtype (object, void *) - #int PyArray_CastScalarToCtype (object, void *, dtype) - #int PyArray_CastScalarDirect (object, dtype, void *, int) - #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) - #object PyArray_FromAny (object, dtype, int, int, int, object) - object PyArray_EnsureArray (object) - object PyArray_EnsureAnyArray (object) - #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) - #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) - #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) - #object PyArray_FromIter (object, dtype, npy_intp) - object PyArray_Return (ndarray) - #object PyArray_GetField (ndarray, dtype, int) - #int PyArray_SetField (ndarray, dtype, int, object) except -1 - object PyArray_Byteswap (ndarray, npy_bool) - object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) - int PyArray_CopyInto (ndarray, ndarray) except -1 - int PyArray_CopyAnyInto (ndarray, ndarray) except -1 - int PyArray_CopyObject (ndarray, object) except -1 - object PyArray_NewCopy (ndarray, NPY_ORDER) - object PyArray_ToList (ndarray) - object PyArray_ToString (ndarray, NPY_ORDER) - int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 - int PyArray_Dump (object, object, int) except -1 - object PyArray_Dumps (object, int) - int PyArray_ValidType (int) # Cannot error - void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) - #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) - #dtype PyArray_DescrNew (dtype) - dtype PyArray_DescrNewFromType (int) - double PyArray_GetPriority (object, double) # clears errors as of 1.25 - object PyArray_IterNew (object) - object PyArray_MultiIterNew (int, ...) - - int PyArray_PyIntAsInt (object) except? -1 - npy_intp PyArray_PyIntAsIntp (object) - int PyArray_Broadcast (broadcast) except -1 - int PyArray_FillWithScalar (ndarray, object) except -1 - npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) - dtype PyArray_DescrNewByteorder (dtype, char) - object PyArray_IterAllButAxis (object, int *) - #object PyArray_CheckFromAny (object, dtype, int, int, int, object) - #object PyArray_FromArray (ndarray, dtype, int) - object PyArray_FromInterface (object) - object PyArray_FromStructInterface (object) - #object PyArray_FromArrayAttr (object, dtype, object) - #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) - int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) - npy_bool PyArray_CanCastScalar (type, type) - int PyArray_RemoveSmallest (broadcast) except -1 - int PyArray_ElementStrides (object) - void PyArray_Item_INCREF (char *, dtype) except * - void PyArray_Item_XDECREF (char *, dtype) except * - object PyArray_Transpose (ndarray, PyArray_Dims *) - object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) - object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) - object PyArray_PutMask (ndarray, object, object) - object PyArray_Repeat (ndarray, object, int) - object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) - int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 - object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) - object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) - object PyArray_ArgMax (ndarray, int, ndarray) - object PyArray_ArgMin (ndarray, int, ndarray) - object PyArray_Reshape (ndarray, object) - object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) - object PyArray_Squeeze (ndarray) - #object PyArray_View (ndarray, dtype, type) - object PyArray_SwapAxes (ndarray, int, int) - object PyArray_Max (ndarray, int, ndarray) - object PyArray_Min (ndarray, int, ndarray) - object PyArray_Ptp (ndarray, int, ndarray) - object PyArray_Mean (ndarray, int, int, ndarray) - object PyArray_Trace (ndarray, int, int, int, int, ndarray) - object PyArray_Diagonal (ndarray, int, int, int) - object PyArray_Clip (ndarray, object, object, ndarray) - object PyArray_Conjugate (ndarray, ndarray) - object PyArray_Nonzero (ndarray) - object PyArray_Std (ndarray, int, int, ndarray, int) - object PyArray_Sum (ndarray, int, int, ndarray) - object PyArray_CumSum (ndarray, int, int, ndarray) - object PyArray_Prod (ndarray, int, int, ndarray) - object PyArray_CumProd (ndarray, int, int, ndarray) - object PyArray_All (ndarray, int, ndarray) - object PyArray_Any (ndarray, int, ndarray) - object PyArray_Compress (ndarray, object, int, ndarray) - object PyArray_Flatten (ndarray, NPY_ORDER) - object PyArray_Ravel (ndarray, NPY_ORDER) - npy_intp PyArray_MultiplyList (npy_intp *, int) - int PyArray_MultiplyIntList (int *, int) - void * PyArray_GetPtr (ndarray, npy_intp*) - int PyArray_CompareLists (npy_intp *, npy_intp *, int) - #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) - int PyArray_Free (object, void *) - #int PyArray_Converter (object, object*) - int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 - object PyArray_Concatenate (object, int) - object PyArray_InnerProduct (object, object) - object PyArray_MatrixProduct (object, object) - object PyArray_Correlate (object, object, int) - #int PyArray_DescrConverter (object, dtype*) except 0 - #int PyArray_DescrConverter2 (object, dtype*) except 0 - int PyArray_IntpConverter (object, PyArray_Dims *) except 0 - #int PyArray_BufferConverter (object, chunk) except 0 - int PyArray_AxisConverter (object, int *) except 0 - int PyArray_BoolConverter (object, npy_bool *) except 0 - int PyArray_ByteorderConverter (object, char *) except 0 - int PyArray_OrderConverter (object, NPY_ORDER *) except 0 - unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors - #object PyArray_Zeros (int, npy_intp *, dtype, int) - #object PyArray_Empty (int, npy_intp *, dtype, int) - object PyArray_Where (object, object, object) - object PyArray_Arange (double, double, double, int) - #object PyArray_ArangeObj (object, object, object, dtype) - int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 - object PyArray_LexSort (object, int) - object PyArray_Round (ndarray, int, ndarray) - unsigned char PyArray_EquivTypenums (int, int) - int PyArray_RegisterDataType (dtype) except -1 - int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 - int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 - #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) - object PyArray_IntTupleFromIntp (int, npy_intp *) - int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 - #int PyArray_OutputConverter (object, ndarray*) except 0 - object PyArray_BroadcastToShape (object, npy_intp *, int) - #int PyArray_DescrAlignConverter (object, dtype*) except 0 - #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 - int PyArray_SearchsideConverter (object, void *) except 0 - object PyArray_CheckAxis (ndarray, int *, int) - npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) - int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. - - # The memory handler functions require the NumPy 1.22 API - # and may require defining NPY_TARGET_VERSION - ctypedef struct PyDataMemAllocator: - void *ctx - void* (*malloc) (void *ctx, size_t size) - void* (*calloc) (void *ctx, size_t nelem, size_t elsize) - void* (*realloc) (void *ctx, void *ptr, size_t new_size) - void (*free) (void *ctx, void *ptr, size_t size) - - ctypedef struct PyDataMem_Handler: - char* name - npy_uint8 version - PyDataMemAllocator allocator - - object PyDataMem_SetHandler(object handler) - object PyDataMem_GetHandler() - - # additional datetime related functions are defined below - - -# Typedefs that matches the runtime dtype objects in -# the numpy module. - -# The ones that are commented out needs an IFDEF function -# in Cython to enable them only on the right systems. - -ctypedef npy_int8 int8_t -ctypedef npy_int16 int16_t -ctypedef npy_int32 int32_t -ctypedef npy_int64 int64_t - -ctypedef npy_uint8 uint8_t -ctypedef npy_uint16 uint16_t -ctypedef npy_uint32 uint32_t -ctypedef npy_uint64 uint64_t - -ctypedef npy_float32 float32_t -ctypedef npy_float64 float64_t -#ctypedef npy_float80 float80_t -#ctypedef npy_float128 float128_t - -ctypedef float complex complex64_t -ctypedef double complex complex128_t - -ctypedef npy_longlong longlong_t -ctypedef npy_ulonglong ulonglong_t - -ctypedef npy_intp intp_t -ctypedef npy_uintp uintp_t - -ctypedef npy_double float_t -ctypedef npy_double double_t -ctypedef npy_longdouble longdouble_t - -ctypedef float complex cfloat_t -ctypedef double complex cdouble_t -ctypedef double complex complex_t -ctypedef long double complex clongdouble_t - -cdef inline object PyArray_MultiIterNew1(a): - return PyArray_MultiIterNew(1, a) - -cdef inline object PyArray_MultiIterNew2(a, b): - return PyArray_MultiIterNew(2, a, b) - -cdef inline object PyArray_MultiIterNew3(a, b, c): - return PyArray_MultiIterNew(3, a, b, c) - -cdef inline object PyArray_MultiIterNew4(a, b, c, d): - return PyArray_MultiIterNew(4, a, b, c, d) - -cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, a, b, c, d, e) - -cdef inline tuple PyDataType_SHAPE(dtype d): - if PyDataType_HASSUBARRAY(d): - return d.subarray.shape - else: - return () - - -cdef extern from "numpy/ndarrayobject.h": - PyTypeObject PyTimedeltaArrType_Type - PyTypeObject PyDatetimeArrType_Type - ctypedef int64_t npy_timedelta - ctypedef int64_t npy_datetime - -cdef extern from "numpy/ndarraytypes.h": - ctypedef struct PyArray_DatetimeMetaData: - NPY_DATETIMEUNIT base - int64_t num - - ctypedef struct npy_datetimestruct: - int64_t year - int32_t month, day, hour, min, sec, us, ps, as - - # Iterator API added in v1.6 - # - # These don't match the definition in the C API because Cython can't wrap - # function pointers that return functions. - # https://github.com/cython/cython/issues/6720 - ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil - ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil - -cdef extern from "numpy/arrayscalars.h": - - # abstract types - ctypedef class numpy.generic [object PyObject]: - pass - ctypedef class numpy.number [object PyObject]: - pass - ctypedef class numpy.integer [object PyObject]: - pass - ctypedef class numpy.signedinteger [object PyObject]: - pass - ctypedef class numpy.unsignedinteger [object PyObject]: - pass - ctypedef class numpy.inexact [object PyObject]: - pass - ctypedef class numpy.floating [object PyObject]: - pass - ctypedef class numpy.complexfloating [object PyObject]: - pass - ctypedef class numpy.flexible [object PyObject]: - pass - ctypedef class numpy.character [object PyObject]: - pass - - ctypedef struct PyDatetimeScalarObject: - # PyObject_HEAD - npy_datetime obval - PyArray_DatetimeMetaData obmeta - - ctypedef struct PyTimedeltaScalarObject: - # PyObject_HEAD - npy_timedelta obval - PyArray_DatetimeMetaData obmeta - - ctypedef enum NPY_DATETIMEUNIT: - NPY_FR_Y - NPY_FR_M - NPY_FR_W - NPY_FR_D - NPY_FR_B - NPY_FR_h - NPY_FR_m - NPY_FR_s - NPY_FR_ms - NPY_FR_us - NPY_FR_ns - NPY_FR_ps - NPY_FR_fs - NPY_FR_as - NPY_FR_GENERIC - - -cdef extern from "numpy/arrayobject.h": - # These are part of the C-API defined in `__multiarray_api.h` - - # NumPy internal definitions in datetime_strings.c: - int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( - int local, NPY_DATETIMEUNIT base) - int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( - npy_datetimestruct *dts, char *outstr, npy_intp outlen, - int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, - NPY_CASTING casting) except -1 - - # NumPy internal definition in datetime.c: - # May return 1 to indicate that object does not appear to be a datetime - # (returns 0 on success). - int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( - PyObject *obj, npy_datetimestruct *out, - NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 - int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( - PyArray_DatetimeMetaData *meta, npy_datetime dt, - npy_datetimestruct *out) except -1 - int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( - PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, - npy_datetime *out) except -1 - - -# -# ufunc API +# See __init__.cython-30.pxd for the real Cython header # -cdef extern from "numpy/ufuncobject.h": - - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) - - ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops - - cdef enum: - PyUFunc_Zero - PyUFunc_One - PyUFunc_None - # deprecated - UFUNC_FPE_DIVIDEBYZERO - UFUNC_FPE_OVERFLOW - UFUNC_FPE_UNDERFLOW - UFUNC_FPE_INVALID - # use these instead - NPY_FPE_DIVIDEBYZERO - NPY_FPE_OVERFLOW - NPY_FPE_UNDERFLOW - NPY_FPE_INVALID - - object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, - void **, char *, int, int, int, int, char *, char *, int) - int PyUFunc_RegisterLoopForType(ufunc, int, - PyUFuncGenericFunction, int *, void *) except -1 - void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_clearfperr() - int PyUFunc_getfperr() - int PyUFunc_ReplaceLoopBySignature \ - (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) - object PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, - int, char *, char *, int, char *) - - int _import_umath() except -1 - -cdef inline void set_array_base(ndarray arr, object base): - Py_INCREF(base) # important to do this before stealing the reference below! - PyArray_SetBaseObject(arr, base) - -cdef inline object get_array_base(ndarray arr): - base = PyArray_BASE(arr) - if base is NULL: - return None - return base - -# Versions of the import_* functions which are more suitable for -# Cython code. -cdef inline int import_array() except -1: - try: - __pyx_import_array() - except Exception: - raise ImportError("numpy._core.multiarray failed to import") - -cdef inline int import_umath() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - -cdef inline int import_ufunc() except -1: - try: - _import_umath() - except Exception: - raise ImportError("numpy._core.umath failed to import") - - -cdef inline bint is_timedelta64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.timedelta64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) - - -cdef inline bint is_datetime64_object(object obj): - """ - Cython equivalent of `isinstance(obj, np.datetime64)` - - Parameters - ---------- - obj : object - - Returns - ------- - bool - """ - return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) - - -cdef inline npy_datetime get_datetime64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy datetime64 object - - Note that to interpret this as a datetime, the corresponding unit is - also needed. That can be found using `get_datetime64_unit`. - """ - return (obj).obval - - -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: - """ - returns the int64 value underlying scalar numpy timedelta64 object - """ - return (obj).obval - - -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: - """ - returns the unit part of the dtype for a numpy datetime64 object. - """ - return (obj).obmeta.base - - -cdef extern from "numpy/arrayobject.h": - - ctypedef struct NpyIter: - pass - - cdef enum: - NPY_FAIL - NPY_SUCCEED - - cdef enum: - # Track an index representing C order - NPY_ITER_C_INDEX - # Track an index representing Fortran order - NPY_ITER_F_INDEX - # Track a multi-index - NPY_ITER_MULTI_INDEX - # User code external to the iterator does the 1-dimensional innermost loop - NPY_ITER_EXTERNAL_LOOP - # Convert all the operands to a common data type - NPY_ITER_COMMON_DTYPE - # Operands may hold references, requiring API access during iteration - NPY_ITER_REFS_OK - # Zero-sized operands should be permitted, iteration checks IterSize for 0 - NPY_ITER_ZEROSIZE_OK - # Permits reductions (size-0 stride with dimension size > 1) - NPY_ITER_REDUCE_OK - # Enables sub-range iteration - NPY_ITER_RANGED - # Enables buffering - NPY_ITER_BUFFERED - # When buffering is enabled, grows the inner loop if possible - NPY_ITER_GROWINNER - # Delay allocation of buffers until first Reset* call - NPY_ITER_DELAY_BUFALLOC - # When NPY_KEEPORDER is specified, disable reversing negative-stride axes - NPY_ITER_DONT_NEGATE_STRIDES - NPY_ITER_COPY_IF_OVERLAP - # The operand will be read from and written to - NPY_ITER_READWRITE - # The operand will only be read from - NPY_ITER_READONLY - # The operand will only be written to - NPY_ITER_WRITEONLY - # The operand's data must be in native byte order - NPY_ITER_NBO - # The operand's data must be aligned - NPY_ITER_ALIGNED - # The operand's data must be contiguous (within the inner loop) - NPY_ITER_CONTIG - # The operand may be copied to satisfy requirements - NPY_ITER_COPY - # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements - NPY_ITER_UPDATEIFCOPY - # Allocate the operand if it is NULL - NPY_ITER_ALLOCATE - # If an operand is allocated, don't use any subtype - NPY_ITER_NO_SUBTYPE - # This is a virtual array slot, operand is NULL but temporary data is there - NPY_ITER_VIRTUAL - # Require that the dimension match the iterator dimensions exactly - NPY_ITER_NO_BROADCAST - # A mask is being used on this array, affects buffer -> array copy - NPY_ITER_WRITEMASKED - # This array is the mask for all WRITEMASKED operands - NPY_ITER_ARRAYMASK - # Assume iterator order data access for COPY_IF_OVERLAP - NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE - - # construction and destruction functions - NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, dtype datatype) except NULL - NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, - NPY_ORDER order, NPY_CASTING casting, npy_uint32* - op_flags, PyArray_Descr** op_dtypes) except NULL - NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, - npy_uint32 flags, NPY_ORDER order, - NPY_CASTING casting, npy_uint32* op_flags, - PyArray_Descr** op_dtypes, int oa_ndim, - int** op_axes, const npy_intp* itershape, - npy_intp buffersize) except NULL - NpyIter* NpyIter_Copy(NpyIter* it) except NULL - int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL - int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL - int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL - int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL - int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL - int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, - npy_intp iend, char** errmsg) except NPY_FAIL - int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL - int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL - int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL - npy_intp NpyIter_GetIterSize(NpyIter* it) nogil - npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil - void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, - npy_intp* iend) nogil - int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL - npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil - npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil - npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil - npy_bool NpyIter_HasIndex(NpyIter* it) nogil - npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil - npy_bool NpyIter_IsBuffered(NpyIter* it) nogil - npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil - npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil - int NpyIter_GetNDim(NpyIter* it) nogil - int NpyIter_GetNOp(NpyIter* it) nogil - npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL - int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil - PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) - PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) - ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) - void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) - void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) - int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, - npy_intp* outstrides) except NPY_FAIL - npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil - # functions for iterating an NpyIter object - # - # These don't match the definition in the C API because Cython can't wrap - # function pointers that return functions. - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL - char** NpyIter_GetDataPtrArray(NpyIter* it) nogil - char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil - npy_intp* NpyIter_GetIndexPtr(NpyIter* it) - npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil - npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil - void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil - npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil - void NpyIter_DebugPrint(NpyIter* it) - -# NpyString API -cdef extern from "numpy/ndarraytypes.h": - ctypedef struct npy_string_allocator: - pass - - ctypedef struct npy_packed_static_string: - pass - - ctypedef struct npy_static_string: - size_t size - const char *buf - - ctypedef struct PyArray_StringDTypeObject: - PyArray_Descr base - PyObject *na_object - char coerce - char has_nan_na - char has_string_na - char array_owned - npy_static_string default_string - npy_static_string na_name - npy_string_allocator *allocator - -cdef extern from "numpy/arrayobject.h": - npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) - void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) - void NpyString_release_allocator(npy_string_allocator *allocator) - void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) - int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) - int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) - int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) +# intentionally created compiler error that only triggers on Cython < 3.0.0 +DEF err = int('Build aborted: the NumPy Cython headers require Cython 3.0.0 or newer.') From 0f46be8b9c7f0b41779906e0ef318ab135e6384f Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 10 Feb 2026 21:38:43 +0100 Subject: [PATCH 1388/1718] another tweak to note on PyPy [skip actions] [skip cirrus] [skip azp] --- doc/neps/nep-0057-numpy-platform-support.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 1a652410c2bb..570287d4d0d4 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -257,8 +257,7 @@ list): +------------------------------------+--------------------------------------------------+ | Platform | Notes | +====================================+==================================================+ -| PyPy | Was Tier 2 until the 2.4.x releases, see | -| | gh-30416_ | +| PyPy | Was Tier 2 for releases <=2.4.x, see gh-30416_ | +------------------------------------+--------------------------------------------------+ | macOS ppc64, universal, universal2 | | +------------------------------------+--------------------------------------------------+ From 9a3507763a734f67b5af5fa8f0049d3561f53ee1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Feb 2026 17:57:37 +0000 Subject: [PATCH 1389/1718] MAINT: Bump hypothesis from 6.151.4 to 6.151.5 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.4 to 6.151.5. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.4...hypothesis-python-6.151.5) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 87a48ab32b9d..e8ab96996cf8 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.4 +hypothesis==6.151.5 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 18e207e8fc47..8693bb4d58d9 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.4 +hypothesis==6.151.5 pytest==9.0.2 pytest-cov==7.0.0 meson From 7a205df72ca94563351442f2f2eeb16a0b8833e8 Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Thu, 12 Feb 2026 03:33:03 +0530 Subject: [PATCH 1390/1718] DEP: deprecate numpy.typename (#30774) --- .../upcoming_changes/30774.deprecation.rst | 4 ++++ numpy/_core/tests/test_deprecations.py | 8 +++++++ numpy/conftest.py | 1 + numpy/lib/_type_check_impl.py | 10 ++++++++ numpy/lib/_type_check_impl.pyi | 23 +++++++++++++++++++ numpy/typing/tests/data/reveal/type_check.pyi | 8 +++---- 6 files changed, 50 insertions(+), 4 deletions(-) create mode 100644 doc/release/upcoming_changes/30774.deprecation.rst diff --git a/doc/release/upcoming_changes/30774.deprecation.rst b/doc/release/upcoming_changes/30774.deprecation.rst new file mode 100644 index 000000000000..00b941ef3a8c --- /dev/null +++ b/doc/release/upcoming_changes/30774.deprecation.rst @@ -0,0 +1,4 @@ +``typename`` is deprecated +-------------------------- +``numpy.typename`` is deprecated because the names returned by it were outdated and inconsistent. +``numpy.dtype.name`` can be used as a replacement. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 6c52aea84e5d..8507d369fe29 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -347,3 +347,11 @@ class TestTooManyArgsExtremum(_DeprecationTestCase): @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) def test_extremem_3_args(self, ufunc): self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) + + +class TestTypenameDeprecation(_DeprecationTestCase): + # Deprecation in Numpy 2.5, 2026-02 + + def test_typename_emits_deprecation_warning(self): + self.assert_deprecated(lambda: np.typename("S1")) + self.assert_deprecated(lambda: np.typename("h")) diff --git a/numpy/conftest.py b/numpy/conftest.py index 30625d90c69c..d260225568da 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -185,6 +185,7 @@ def warnings_errors_and_rng(test=None): "NumPy warning suppression and assertion utilities are deprecated.", "numpy.fix is deprecated", # fix -> trunc "The chararray class is deprecated", # char.chararray + "numpy.typename is deprecated", # typename -> dtype.name ] msg = "|".join(msgs) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 37192043513f..e3c942be0d99 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -2,6 +2,7 @@ """ import functools +import warnings __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', 'isreal', 'nan_to_num', 'real', 'real_if_close', @@ -587,6 +588,9 @@ def typename(char): """ Return a description for the given data type code. + .. deprecated:: 2.5 + `numpy.typename` is deprecated. Use `numpy.dtype.name` instead. + Parameters ---------- char : str @@ -633,6 +637,12 @@ def typename(char): q : long long integer """ + # Deprecated in NumPy 2.5, 2026-02-03 + warnings.warn( + "numpy.typename is deprecated. Use numpy.dtype.name instead.", + DeprecationWarning, + stacklevel=2 + ) return _namefromtype[char] #----------------------------------------------------------------------------- diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index dbef0ca87280..d23123f80c8e 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,6 +1,7 @@ from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Literal as L, Protocol, overload, type_check_only +from typing_extensions import deprecated import numpy as np from numpy._typing import ( @@ -148,48 +149,70 @@ def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["S1"]) -> L["character"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["?"]) -> L["bool"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["b"]) -> L["signed char"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["B"]) -> L["unsigned char"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["h"]) -> L["short"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["H"]) -> L["unsigned short"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["i"]) -> L["integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["I"]) -> L["unsigned integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["l"]) -> L["long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["L"]) -> L["unsigned long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["q"]) -> L["long long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["f"]) -> L["single precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["d"]) -> L["double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["g"]) -> L["long precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["F"]) -> L["complex single precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["D"]) -> L["complex double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["G"]) -> L["complex long double precision"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["S"]) -> L["string"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["U"]) -> L["unicode"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["V"]) -> L["void"]: ... @overload +@deprecated("numpy.typename is deprecated. Use numpy.dtype.name instead.") def typename(char: L["O"]) -> L["object"]: ... # NOTE: The [overload-overlap] mypy errors are false positives diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index df95da78ffb7..22eed7493689 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -54,10 +54,10 @@ assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) -assert_type(np.typename("h"), Literal["short"]) -assert_type(np.typename("B"), Literal["unsigned char"]) -assert_type(np.typename("V"), Literal["void"]) -assert_type(np.typename("S1"), Literal["character"]) +assert_type(np.typename("h"), Literal["short"]) # type: ignore[deprecated] +assert_type(np.typename("B"), Literal["unsigned char"]) # type: ignore[deprecated] +assert_type(np.typename("V"), Literal["void"]) # type: ignore[deprecated] +assert_type(np.typename("S1"), Literal["character"]) # type: ignore[deprecated] assert_type(np.common_type(AR_i4), type[np.float64]) assert_type(np.common_type(AR_f2), type[np.float16]) From 0abf67d1785abfb4a2ad72fcbe7e00ab09f469b8 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 12 Feb 2026 15:03:45 +0100 Subject: [PATCH 1391/1718] CI: replace pkgconfig-lite from Chocolatey with `pip install pkgconf` (#26574) Co-authored-by: mattip --- .github/workflows/windows.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index cebb42fbdf08..a1a76d363be3 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -43,7 +43,7 @@ jobs: - name: Install pkg-config run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + python -m pip install pkgconf echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - name: Install NumPy (Clang-cl) @@ -153,18 +153,19 @@ jobs: - name: pkg-config run: | - choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite + python -m pip install pkgconf - name: Dependencies run: | python -m pip install -r requirements/test_requirements.txt + python -m pip install -r requirements/build_requirements.txt - name: Build and install run: | pip install -r requirements/ci_requirements.txt spin config-openblas --with-scipy-openblas=${{ matrix.BLAS }} $env:PKG_CONFIG_PATH="$pwd/.openblas" - python -m pip install . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" + python -m pip install --no-build-isolation . -v -Csetup-args="--vsenv" -Csetup-args="-Dallow-noblas=false" - name: Run test suite ${{ matrix.TEST_MODE }} run: | From e8f77e1075d0616ac7849ac948b9d7e091ff96aa Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Thu, 12 Feb 2026 22:01:54 +0530 Subject: [PATCH 1392/1718] MAINT: update pythoncapi-compat for PyUnstable_SetImmortal() (#30825) --- numpy/_core/src/common/pythoncapi-compat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 90c06a4cae55..8636bccf29ad 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c +Subproject commit 8636bccf29adfa23463f810b3c2830f7cff1e933 From 36324ee63056b29bc9eaced9a51d3f5cc4ba5c66 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Fri, 13 Feb 2026 01:28:38 +0100 Subject: [PATCH 1393/1718] MAINT: Replace %-formatting with f-strings in numpy/_build_utils (UP031) (#30799) Co-authored-by: Joren Hammudoglu --- numpy/_build_utils/conv_template.py | 14 ++-- numpy/_build_utils/tempita/_looper.py | 6 +- numpy/_build_utils/tempita/_tempita.py | 92 ++++++++++++-------------- 3 files changed, 51 insertions(+), 61 deletions(-) diff --git a/numpy/_build_utils/conv_template.py b/numpy/_build_utils/conv_template.py index fb57abdf1587..3f6347371ae0 100644 --- a/numpy/_build_utils/conv_template.py +++ b/numpy/_build_utils/conv_template.py @@ -110,8 +110,8 @@ def parse_structure(astr, level): loopbeg = "/**begin repeat" loopend = "/**end repeat**/" else: - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level + loopbeg = f"/**begin repeat{level}" + loopend = f"/**end repeat{level}**/" ind = 0 line = 0 @@ -214,7 +214,7 @@ def parse_loop_header(loophead): def parse_string(astr, env, level, line): - lineno = "#line %d\n" % line + lineno = f"#line {line}\n" # local function for string replacement, uses env def replace(match): @@ -242,7 +242,7 @@ def replace(match): try: envlist = parse_loop_header(head) except ValueError as e: - msg = "line %d: %s" % (newline, e) + msg = f"line {newline}: {e}" raise ValueError(msg) for newenv in envlist: newenv.update(env) @@ -289,8 +289,8 @@ def process_file(source): try: code = process_str(''.join(lines)) except ValueError as e: - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None - return '#line 1 "%s"\n%s' % (sourcefile, code) + raise ValueError(f'In "{sourcefile}" loop at {e}') from None + return f'#line 1 "{sourcefile}"\n{code}' def unique_key(adict): @@ -326,7 +326,7 @@ def main(): try: writestr = process_str(allstr) except ValueError as e: - raise ValueError("In %s loop at %s" % (file, e)) from None + raise ValueError(f"In {file} loop at {e}") from None outfile.write(writestr) diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py index e7d6b2649fb5..0d3de22ac80c 100644 --- a/numpy/_build_utils/tempita/_looper.py +++ b/numpy/_build_utils/tempita/_looper.py @@ -41,8 +41,7 @@ def __iter__(self): return looper_iter(self.seq) def __repr__(self): - return '<%s for %r>' % ( - self.__class__.__name__, self.seq) + return f'<{self.__class__.__name__} for {self.seq!r}>' class looper_iter: @@ -69,8 +68,7 @@ def __init__(self, seq, pos): self.pos = pos def __repr__(self): - return '' % ( - self.seq[self.pos], self.pos) + return f'' def index(self): return self.pos diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index 88ead791574b..3d5113085183 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -64,9 +64,9 @@ def __init__(self, message, position, name=None): def __str__(self): msg = " ".join(self.args) if self.position: - msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) + msg = f"{msg} at line {self.position[0]} column {self.position[1]}" if self.name: - msg += " in %s" % self.name + msg += f" in {self.name}" return msg @@ -140,7 +140,7 @@ def __init__( else: name = "" if lineno: - name += ":%s" % lineno + name += f":{lineno}" self.name = name self._parsed = parse( content, name=name, line_offset=line_offset, delimiters=self.delimiters @@ -185,8 +185,8 @@ def substitute(self, *args, **kw): if not hasattr(args[0], "items"): raise TypeError( "If you pass in a single argument, you must pass in a " - "dictionary-like object (with a .items() method); you gave %r" - % (args[0],) + "dictionary-like object (with a .items() method); " + f"you gave {args[0]!r}" ) kw = args[0] ns = kw @@ -278,7 +278,7 @@ def _interpret_code(self, code, ns, out, defs): elif name == "comment": return else: - assert 0, "Unknown code: %r" % name + assert 0, f"Unknown code: {name!r}" def _interpret_for(self, vars, expr, content, ns, out, defs): __traceback_hide__ = True @@ -288,8 +288,7 @@ def _interpret_for(self, vars, expr, content, ns, out, defs): else: if len(vars) != len(item): raise ValueError( - "Need %i items to unpack (got %i items)" - % (len(vars), len(item)) + f"Need {len(vars)} items to unpack (got {len(item)} items)" ) for name, value in zip(vars, item): ns[name] = value @@ -320,7 +319,7 @@ def _eval(self, code, ns, pos): try: value = eval(code, self.default_namespace, ns) except SyntaxError as e: - raise SyntaxError("invalid syntax in expression: %s" % code) + raise SyntaxError(f"invalid syntax in expression: {code}") return value except Exception as e: if getattr(e, "args", None): @@ -363,8 +362,8 @@ def _repr(self, value, pos): if self._unicode and isinstance(value, bytes): if not self.default_encoding: raise UnicodeDecodeError( - "Cannot decode bytes value %r into unicode " - "(no default_encoding provided)" % value + f"Cannot decode bytes value {value!r} into unicode " + "(no default_encoding provided)" ) try: value = value.decode(self.default_encoding) @@ -374,21 +373,21 @@ def _repr(self, value, pos): e.object, e.start, e.end, - e.reason + " in string %r" % value, + e.reason + f" in string {value!r}", ) elif not self._unicode and isinstance(value, str): if not self.default_encoding: raise UnicodeEncodeError( - "Cannot encode unicode value %r into bytes " - "(no default_encoding provided)" % value + f"Cannot encode unicode value {value!r} into bytes " + "(no default_encoding provided)" ) value = value.encode(self.default_encoding) return value def _add_line_info(self, msg, pos): - msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) + msg = f"{msg} at line {pos[0]} column {pos[1]}" if self.name: - msg += " in file %s" % self.name + msg += f" in file {self.name}" return msg @@ -427,10 +426,8 @@ def __getitem__(self, key): return dict.__getitem__(self, key) def __repr__(self): - return "<%s %s>" % ( - self.__class__.__name__, - " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), - ) + items_str = " ".join([f"{k}={v!r}" for k, v in sorted(self.items())]) + return f"<{self.__class__.__name__} {items_str}>" class TemplateDef: @@ -446,12 +443,8 @@ def __init__( self._bound_self = bound_self def __repr__(self): - return "" % ( - self._func_name, - self._func_signature, - self._template.name, - self._pos, - ) + return (f"") def __str__(self): return self() @@ -486,7 +479,7 @@ def _parse_signature(self, args, kw): extra_kw = {} for name, value in kw.items(): if not var_kw and name not in sig_args: - raise TypeError("Unexpected argument %s" % name) + raise TypeError(f"Unexpected argument {name}") if name in sig_args: values[sig_args] = value else: @@ -503,15 +496,14 @@ def _parse_signature(self, args, kw): values[var_args] = tuple(args) break else: - raise TypeError( - "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) - ) + args_str = ", ".join([repr(v) for v in args]) + raise TypeError(f"Extra position arguments: {args_str}") for name, value_expr in defaults.items(): if name not in values: values[name] = self._template._eval(value_expr, self._ns, self._pos) for name in sig_args: if name not in values: - raise TypeError("Missing argument: %s" % name) + raise TypeError(f"Missing argument: {name}") if var_kw: values[var_kw] = extra_kw return values @@ -523,7 +515,7 @@ def __init__(self, name): self.get = TemplateObjectGetter(self) def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, self.__name) + return f"<{self.__class__.__name__} {self.__name}>" class TemplateObjectGetter: @@ -534,7 +526,7 @@ def __getattr__(self, attr): return getattr(self.__template_obj, attr, Empty) def __repr__(self): - return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) + return f"<{self.__class__.__name__} around {self.__template_obj!r}>" class _Empty: @@ -598,18 +590,18 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): last_pos = (line_offset + 1, 1) token_re = re.compile( - r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + rf"{re.escape(delimiters[0])}|{re.escape(delimiters[1])}" ) for match in token_re.finditer(s): expr = match.group(0) pos = find_position(s, match.end(), last, last_pos) if expr == delimiters[0] and in_expr: raise TemplateError( - "%s inside expression" % delimiters[0], position=pos, name=name + f"{delimiters[0]} inside expression", position=pos, name=name ) elif expr == delimiters[1] and not in_expr: raise TemplateError( - "%s outside expression" % delimiters[1], position=pos, name=name + f"{delimiters[1]} outside expression", position=pos, name=name ) if expr == delimiters[0]: part = s[last:match.start()] @@ -623,7 +615,7 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): last_pos = pos if in_expr: raise TemplateError( - "No %s to finish last expression" % delimiters[1], + f"No {delimiters[1]} to finish last expression", name=name, position=last_pos, ) @@ -800,12 +792,12 @@ def parse_expr(tokens, name, context=()): return parse_cond(tokens, name, context) elif expr.startswith("elif ") or expr == "else": raise TemplateError( - "%s outside of an if block" % expr.split()[0], position=pos, name=name + f"{expr.split()[0]} outside of an if block", position=pos, name=name ) elif expr in ("if", "elif", "for"): - raise TemplateError("%s with no expression" % expr, position=pos, name=name) + raise TemplateError(f"{expr} with no expression", position=pos, name=name) elif expr in ("endif", "endfor", "enddef"): - raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + raise TemplateError(f"Unexpected {expr}", position=pos, name=name) elif expr.startswith("for "): return parse_for(tokens, name, context) elif expr.startswith("default "): @@ -843,7 +835,7 @@ def parse_one_cond(tokens, name, context): elif first == "else": part = ("else", pos, None, content) else: - assert 0, "Unexpected token %r at %s" % (first, pos) + assert 0, f"Unexpected token {first!r} at {pos}" while 1: if not tokens: raise TemplateError("No {{endif}}", position=pos, name=name) @@ -867,11 +859,11 @@ def parse_for(tokens, name, context): first = first[3:].strip() match = in_re.search(first) if not match: - raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + raise TemplateError(f'Bad for (no "in") in {first!r}', position=pos, name=name) vars = first[: match.start()] if "(" in vars: raise TemplateError( - "You cannot have () in the variable section of a for loop (%r)" % vars, + f"You cannot have () in the variable section of a for loop ({vars!r})", position=pos, name=name, ) @@ -893,7 +885,7 @@ def parse_default(tokens, name, context): parts = first.split("=", 1) if len(parts) == 1: raise TemplateError( - "Expression must be {{default var=value}}; no = found in %r" % first, + f"Expression must be {{{{default var=value}}}}; no = found in {first!r}", position=pos, name=name, ) @@ -904,7 +896,7 @@ def parse_default(tokens, name, context): ) if not var_re.search(var): raise TemplateError( - "Not a valid variable name for {{default}}: %r" % var, + f"Not a valid variable name for {{{{default}}}}: {var!r}", position=pos, name=name, ) @@ -930,7 +922,7 @@ def parse_def(tokens, name, context): sig = ((), None, None, {}) elif not first.endswith(")"): raise TemplateError( - "Function definition doesn't end with ): %s" % first, + f"Function definition doesn't end with ): {first}", position=start, name=name, ) @@ -976,7 +968,7 @@ def get_token(pos=False): tok_type, tok_string = get_token() if tok_type != tokenize.NAME: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) var_name = tok_string tok_type, tok_string = get_token() @@ -994,7 +986,7 @@ def get_token(pos=False): continue if var_arg_type is not None: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) if tok_type == tokenize.OP and tok_string == "=": nest_type = None @@ -1009,7 +1001,7 @@ def get_token(pos=False): end_pos = e if tok_type == tokenize.ENDMARKER and nest_count: raise TemplateError( - "Invalid signature: (%s)" % sig_text, position=pos, name=name + f"Invalid signature: ({sig_text})", position=pos, name=name ) if not nest_count and ( tok_type == tokenize.ENDMARKER @@ -1098,7 +1090,7 @@ def fill_command(args=None): vars.update(os.environ) for value in args: if "=" not in value: - print("Bad argument: %r" % value) + print(f"Bad argument: {value!r}") sys.exit(2) name, value = value.split("=", 1) if name.startswith("py:"): From a9741485505e17e528768fcd8b66957e02f36901 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 13 Feb 2026 15:07:16 +0100 Subject: [PATCH 1394/1718] TYP: ``matlib``: missing extended precision imports --- numpy/matlib.pyi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 3eee0f441e8d..0904c2744015 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -99,6 +99,7 @@ from numpy import ( # type: ignore[deprecated] # noqa: F401 common_type, complex64, complex128, + complex192, complex256, complexfloating, compress, @@ -175,6 +176,7 @@ from numpy import ( # type: ignore[deprecated] # noqa: F401 float16, float32, float64, + float96, float128, float_power, floating, From c455b03891a5141c87cfd998786111e57cb49fee Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 8 Dec 2025 16:04:35 +0000 Subject: [PATCH 1395/1718] ENH: linalg: return complex arrays from eigvals/eigvecs --- numpy/linalg/_linalg.py | 21 ++++----------------- numpy/linalg/tests/test_linalg.py | 12 ++++++------ 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index d11699bd1c5e..0e16a281025a 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -1247,14 +1247,7 @@ def eigvals(a): under='ignore'): w = _umath_linalg.eigvals(a, signature=signature) - if not isComplexType(t): - if all(w.imag == 0): - w = w.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - return w.astype(result_t, copy=False) + return w.astype(_complexType(result_t), copy=False) def _eigvalsh_dispatcher(a, UPLO=None): @@ -1500,15 +1493,9 @@ def eig(a): under='ignore'): w, vt = _umath_linalg.eig(a, signature=signature) - if not isComplexType(t) and all(w.imag == 0.0): - w = w.real - vt = vt.real - result_t = _realType(result_t) - else: - result_t = _complexType(result_t) - - vt = vt.astype(result_t, copy=False) - return EigResult(w.astype(result_t, copy=False), wrap(vt)) + w = w.astype(_complexType(result_t), copy=False) + vt = vt.astype(_complexType(result_t), copy=False) + return EigResult(w, wrap(vt)) @array_function_dispatch(_eigvalsh_dispatcher) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 2c4cc2891ab5..cd93acaf79c0 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -604,7 +604,7 @@ class TestEigvals(EigvalsCases): @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) - assert_equal(linalg.eigvals(x).dtype, dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) @@ -614,7 +614,7 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.eigvals(a) - assert_(res.dtype.type is np.float64) + assert_(res.dtype.type is np.complex128) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: assert_(isinstance(res, np.ndarray)) @@ -643,8 +643,8 @@ class TestEig(EigCases): def test_types(self, dtype): x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) w, v = np.linalg.eig(x) - assert_equal(w.dtype, dtype) - assert_equal(v.dtype, dtype) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) w, v = np.linalg.eig(x) @@ -657,8 +657,8 @@ class ArraySubclass(np.ndarray): pass a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res, res_v = linalg.eig(a) - assert_(res_v.dtype.type is np.float64) - assert_(res.dtype.type is np.float64) + assert_(res_v.dtype.type is np.complex128) + assert_(res.dtype.type is np.complex128) assert_equal(a.shape, res_v.shape) assert_equal((0, 1), res.shape) # This is just for documentation, it might make sense to change: From 0465b2e206341b6c317b2f8e3c4b0c6379e7c37e Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Wed, 10 Dec 2025 11:35:38 +0100 Subject: [PATCH 1396/1718] DOC: linalg: add a release note snippet for eig/eigvals --- .../upcoming_changes/30411.compatibility.rst | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 doc/release/upcoming_changes/30411.compatibility.rst diff --git a/doc/release/upcoming_changes/30411.compatibility.rst b/doc/release/upcoming_changes/30411.compatibility.rst new file mode 100644 index 000000000000..c013160272dd --- /dev/null +++ b/doc/release/upcoming_changes/30411.compatibility.rst @@ -0,0 +1,20 @@ +* `linalg.eig` and `linalg.eigvals` always return complex arrays. Previously, the return + values were depening on whether the eigenvalues happen to lie on the real line (which, for + a general, non-symmetric matrix, is not guaranteed). + + The user change depends on your usage: + + - to retain the previous behavior, do + + ``` + w = eigvals(a) + if w.imag == 0: # this is what NumPy used to do + w = w.real + ``` + + - if your matrix is symmetrix/hermitian, use `eigh` and `eigvalsh` instead of `eig` and `eigvals`: + these are guaranteed to return real values. (A common case is covariance matrices, which are + symmetric and positive definite by construction) + + + From be82af142f0a180ca6dcde20bbda85c5bfc3f98a Mon Sep 17 00:00:00 2001 From: antareepsarkar Date: Sat, 14 Feb 2026 07:56:23 +0530 Subject: [PATCH 1397/1718] DEP: deprecate numpy.ma.round_ --- .../upcoming_changes/30738.deprecation.rst | 4 ++ numpy/_core/tests/test_deprecations.py | 10 +++ numpy/conftest.py | 1 + numpy/ma/core.py | 62 +++++++++++++++++-- numpy/ma/core.pyi | 19 +++++- 5 files changed, 90 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/30738.deprecation.rst diff --git a/doc/release/upcoming_changes/30738.deprecation.rst b/doc/release/upcoming_changes/30738.deprecation.rst new file mode 100644 index 000000000000..381117ec84cc --- /dev/null +++ b/doc/release/upcoming_changes/30738.deprecation.rst @@ -0,0 +1,4 @@ +``numpy.ma.round_`` is deprecated +--------------------------------- +``numpy.ma.round_`` is deprecated. +``numpy.ma.round`` can be used as a replacement. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 8507d369fe29..b7a2444fbbc0 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -355,3 +355,13 @@ class TestTypenameDeprecation(_DeprecationTestCase): def test_typename_emits_deprecation_warning(self): self.assert_deprecated(lambda: np.typename("S1")) self.assert_deprecated(lambda: np.typename("h")) + +class TestRoundDeprecation(_DeprecationTestCase): + # Deprecation in NumPy 2.5, 2026-02 + + def test_round_emits_deprecation_warning_array(self): + a = np.array([1.5, 2.7, -1.5, -2.7]) + self.assert_deprecated(lambda: np.ma.round_(a)) + + def test_round_emits_deprecation_warning_scalar(self): + self.assert_deprecated(lambda: np.ma.round_(3.14)) diff --git a/numpy/conftest.py b/numpy/conftest.py index d260225568da..c8f810aeda64 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -186,6 +186,7 @@ def warnings_errors_and_rng(test=None): "numpy.fix is deprecated", # fix -> trunc "The chararray class is deprecated", # char.chararray "numpy.typename is deprecated", # typename -> dtype.name + "numpy.ma.round_ is deprecated", # ma.round_ -> ma.round ] msg = "|".join(msgs) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 196462720cd8..4ab7a18fabfb 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -8089,7 +8089,7 @@ def nmask(x): return d -def round_(a, decimals=0, out=None): +def round(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. @@ -8123,7 +8123,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x) + >>> ma.round(masked_x) masked_array(data=[11.0, -4.0, 1.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -8131,7 +8131,7 @@ def round_(a, decimals=0, out=None): masked_array(data=[11.2, -4.0, 0.8, --], mask=[False, False, False, True], fill_value=1e+20) - >>> ma.round_(masked_x, decimals=-1) + >>> ma.round(masked_x, decimals=-1) masked_array(data=[10.0, -0.0, 0.0, --], mask=[False, False, False, True], fill_value=1e+20) @@ -8145,8 +8145,62 @@ def round_(a, decimals=0, out=None): return out -round = round_ +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + .. deprecated:: 2.5 + `numpy.ma.round_` is deprecated. Use `numpy.ma.round` instead. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + warnings.warn( + "numpy.ma.round_ is deprecated. Use numpy.ma.round instead.", + DeprecationWarning, + stacklevel=2, + ) + return round(a, decimals, out) def _mask_propagate(a, axis): """ diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index fb54cc7b8238..b9e207182026 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3774,18 +3774,33 @@ def choose[ArrayT: np.ndarray]( # @overload # a: masked_array, out: None (default) +def round[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... +@overload # a: known array-like, out: None (default) +def round[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... +@overload # a: unknown array-like, out: None (default) +def round(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... +@overload # out: ndarray (keyword) +def round[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... + +# +@overload # a: masked_array, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") def round_[MArray: MaskedArray](a: MArray, decimals: int = 0, out: None = None) -> MArray: ... @overload # a: known array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") def round_[ScalarT: np.number](a: _ArrayLike[ScalarT], decimals: int = 0, out: None = None) -> _MaskedArray[ScalarT]: ... @overload # a: unknown array-like, out: None (default) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") def round_(a: _ArrayLikeNumber_co, decimals: int = 0, out: None = None) -> _MaskedArray[Incomplete]: ... @overload # out: ndarray (positional) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> ArrayT: ... @overload # out: ndarray (keyword) +@deprecated("numpy.ma.round_ is deprecated. Use numpy.ma.round instead.") def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... -round = round_ - # keep in sync with `_core.multiarray.inner` def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... From 7be2b881031b751e51c79fdff5313d502ca723ea Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Fri, 13 Feb 2026 15:44:19 +0100 Subject: [PATCH 1398/1718] MAINT: ensure strict backwards compat of polynomial roots --- numpy/lib/_polynomial_impl.py | 4 ++++ numpy/lib/tests/test_polynomial.py | 8 ++++++++ numpy/linalg/_linalg.py | 25 +++++++++++++++++++------ numpy/polynomial/hermite.py | 4 ++++ numpy/polynomial/laguerre.py | 4 ++++ numpy/polynomial/polynomial.py | 4 ++++ 6 files changed, 43 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index e17d6a49ef66..81f2a0e5d7cd 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -250,6 +250,10 @@ def roots(p): A = diag(NX.ones((N - 2,), p.dtype), -1) A[0, :] = -p[1:] / p[0] roots = eigvals(A) + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + roots = _to_real_if_imag_zero(roots, A) else: roots = NX.array([]) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 32547f8e6c18..a388ab7bace5 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -143,6 +143,14 @@ def test_roots(self): # to take into account numerical calculation error assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + @pytest.mark.parametrize("dtyp", [int, np.float32, np.float64]) + def test_roots_dtype(self, dtyp): + coef = np.asarray([1, 0, -1], dtype=dtyp) # x**2 - 1 + r = np.roots(coef) + r.sort() + assert_allclose(r, np.asarray([-1, 1])) + assert r.dtype == {int: np.float64}.get(dtyp, dtyp) + def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index 0e16a281025a..c0a42f302055 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -186,6 +186,19 @@ def _realType(t, default=double): def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) + +def _to_real_if_imag_zero(w, t): + """Backwards compat helper: force w to be real if t.dtype is real and w.imag == 0 + """ + result_t = t.dtype.type + if not isComplexType(result_t) and all(w.imag == 0.0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + return w.astype(result_t, copy=False) + + def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single @@ -1229,11 +1242,11 @@ def eigvals(a): >>> D = np.diag((-1,1)) >>> LA.eigvals(D) - array([-1., 1.]) + array([-1. + 0.j, 1. + 0.j]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) - array([ 1., -1.]) # random + array([ 1., -1.]) # random """ a, wrap = _makearray(a) @@ -1443,8 +1456,8 @@ def eig(a): >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) >>> eigenvalues - array([1., 2., 3.]) - >>> eigenvectors + array([1. + 0j, 2. + 0j, 3. + 0j]) + >>> eigenvectors.real array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) @@ -1476,8 +1489,8 @@ def eig(a): >>> # Theor. eigenvalues are 1 +/- 1e-9 >>> eigenvalues, eigenvectors = LA.eig(a) >>> eigenvalues - array([1., 1.]) - >>> eigenvectors + array([1.+0j, 1.+0j]) + >>> eigenvectors.real array([[1., 0.], [0., 1.]]) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index c6007d19df7f..819906e92b47 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1543,6 +1543,10 @@ def hermroots(c): m = hermcompanion(c)[::-1, ::-1] r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index b1d87bf6d035..eb34cbef17ca 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1525,6 +1525,10 @@ def lagroots(c): m = lagcompanion(c)[::-1, ::-1] r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index e3823c89cd98..75ae7aee7419 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1547,6 +1547,10 @@ def polyroots(c): m = polycompanion(c) r = np.linalg.eigvals(m) r.sort() + + # backwards compat: return real values if possible + from numpy.linalg._linalg import _to_real_if_imag_zero + r = _to_real_if_imag_zero(r, m) return r From 155617fc66056a0acc9df26b616ab70d3d07247d Mon Sep 17 00:00:00 2001 From: Saba Siddique Date: Sun, 15 Feb 2026 18:31:32 +0500 Subject: [PATCH 1399/1718] DOC: Use asterisks for ctypes conclusion list bullets --- doc/source/user/c-info.python-as-glue.rst | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 75fca544d9e5..19763f7c2a51 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -825,19 +825,13 @@ Conclusion Using ctypes is a powerful way to connect Python with arbitrary C-code. Its advantages for extending Python include -- clean separation of C code from Python code - - - no need to learn a new syntax except Python and C - - - allows reuse of C code - - - functionality in shared libraries written for other purposes can be - obtained with a simple Python wrapper and search for the library. - - -- easy integration with NumPy through the ctypes attribute - -- full argument checking with the ndpointer class factory +* clean separation of C code from Python code +* no need to learn a new syntax except Python and C +* allows reuse of C code +* functionality in shared libraries written for other purposes can be + obtained with a simple Python wrapper and search for the library. +* easy integration with NumPy through the ctypes attribute +* full argument checking with the ndpointer class factory Its disadvantages include From ef871217490188f52e6d99f0197d6f11a8303f7c Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 16 Feb 2026 16:08:54 +0100 Subject: [PATCH 1400/1718] TYP: linalg: fix eig(...) overloads --- numpy/linalg/_linalg.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 6c78cb10ba2b..c982bf524c50 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -334,11 +334,11 @@ def eig(a: NDArray[np.inexact[Never]]) -> EigResult: ... @overload # ~complex128 def eig(a: _AsArrayC128) -> EigResult[np.complex128]: ... @overload # +float64 -def eig(a: _ToArrayF64) -> EigResult[np.complex128] | EigResult[np.float64]: ... +def eig(a: _ToArrayF64) -> EigResult[np.complex128]: ... @overload # ~complex64 def eig(a: _ArrayLike[np.complex64]) -> EigResult[np.complex64]: ... @overload # ~float32 -def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64] | EigResult[np.float32]: ... +def eig(a: _ArrayLike[np.float32]) -> EigResult[np.complex64]: ... @overload # fallback def eig(a: _ArrayLikeComplex_co) -> EigResult: ... From 357b804b6af9f225bee416c7948c92c41b888486 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Mon, 16 Feb 2026 16:31:12 +0100 Subject: [PATCH 1401/1718] TYP/TST: linalg: try fixing mypy errors --- numpy/typing/tests/data/reveal/linalg.pyi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index f6de644bd7da..a1155e2bb5ed 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -137,9 +137,9 @@ assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.float64]) assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.float64]) -assert_type(np.linalg.eig(AR_i8), EigResult[np.float64] | EigResult[np.complex128]) -assert_type(np.linalg.eig(AR_f4), EigResult[np.float32] | EigResult[np.complex64]) -assert_type(np.linalg.eig(AR_f8), EigResult[np.float64] | EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_i8), EigResult[np.complex128]) +assert_type(np.linalg.eig(AR_f4), EigResult[np.complex64]) +assert_type(np.linalg.eig(AR_f8), EigResult[np.complex128]) assert_type(np.linalg.eig(AR_c8), EigResult[np.complex64]) assert_type(np.linalg.eig(AR_c16), EigResult[np.complex128]) # Mypy bug: `Expression is of type "EigResult[Any]", not "EigResult[Any]"` From 552c46f7b114466c760bb63fba4ae1e4842f5f53 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Mon, 16 Feb 2026 19:05:28 +0100 Subject: [PATCH 1402/1718] BUG: Fix weak hash function in np.isin(). (#30840) --- numpy/_core/src/multiarray/unique.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index 675e0226d14f..a8b897446182 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -63,7 +63,7 @@ empty_array_like(PyArrayObject *arr, npy_intp length) template size_t hash_integer(const T *value, npy_bool equal_nan) { - return std::hash{}(*value); + return npy_fnv1a(reinterpret_cast(value), sizeof(T)); } template From 457920672fbe6b8c15d6b556e7d47aa106e4d14a Mon Sep 17 00:00:00 2001 From: star1327p Date: Mon, 16 Feb 2026 15:38:02 -0800 Subject: [PATCH 1403/1718] DOC: Improve numpy.blackman documentation [skip azp][skip cirrus][skip actions] --- numpy/lib/_function_base_impl.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 8698df708b89..2e76633136ee 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -3073,15 +3073,14 @@ def blackman(M): "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) - as the kaiser window. + as the Kaiser window. References ---------- - Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, - Dover Publications, New York. - - Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. - Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [1] Blackman, R.B. and Tukey, J.W., (1958) + The measurement of power spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- From 9d833928eb93c7134d1375a631bd5d9cf4138457 Mon Sep 17 00:00:00 2001 From: star1327p Date: Mon, 16 Feb 2026 15:46:27 -0800 Subject: [PATCH 1404/1718] DOC: Correct math typos in polynomials [skip azp][skip cirrus][skip actions] --- numpy/polynomial/chebyshev.py | 2 +- numpy/polynomial/hermite.py | 2 +- numpy/polynomial/hermite_e.py | 2 +- numpy/polynomial/polynomial.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 55b48b905848..4dd2a85e15d7 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1420,7 +1420,7 @@ def chebvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index c6007d19df7f..1bf99461b8e8 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1198,7 +1198,7 @@ def hermvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index f5d82aa543b9..730b60804e9a 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1147,7 +1147,7 @@ def hermevander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index e3823c89cd98..a2ea62c9e666 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1154,7 +1154,7 @@ def polyvander2d(x, y, deg): correspond to the elements of a 2-D coefficient array `c` of shape (xdeg + 1, ydeg + 1) in the order - .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same up to roundoff. This equivalence is useful both for least squares From 90d774919117b12d8a44e9c57cc648b3fab79dc6 Mon Sep 17 00:00:00 2001 From: star1327p Date: Mon, 16 Feb 2026 15:53:56 -0800 Subject: [PATCH 1405/1718] DOC: Correct typos in code comments [skip azp][skip cirrus][skip actions] --- doc/Makefile | 2 +- numpy/_core/code_generators/ufunc_docstrings.py | 2 +- numpy/_core/fromnumeric.py | 6 +++--- numpy/_core/numeric.py | 2 +- numpy/lib/_datasource.py | 16 ++++++++-------- numpy/lib/tests/test__datasource.py | 2 +- numpy/ma/core.pyi | 2 +- numpy/polynomial/legendre.py | 2 +- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index 545b10de3384..e6e0689481ca 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -39,7 +39,7 @@ help: @echo " clean to remove generated doc files and start fresh" @echo " docenv make a virtual environment in which to build docs" @echo " html to make standalone HTML files" - @echo " htmlhelp to make HTML files and a HTML help project" + @echo " htmlhelp to make HTML files and an HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 1630a9d6f136..40842b1cea43 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -1619,7 +1619,7 @@ def add_newdoc(place, name, doc): returned. In a two's-complement system, this operation effectively flips all the bits, resulting in a representation that corresponds to the negative of the input plus one. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement + representing signed integers on computers [1]_. An N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index dd94b4d0bed9..e5f4ec0e77f5 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -1188,7 +1188,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): array([[0, 3], [2, 2]]) - Indices of the sorted elements of a N-dimensional array: + Indices of the sorted elements of an N-dimensional array: >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) >>> ind @@ -1275,7 +1275,7 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmax(a, axis=1) array([2, 2]) - Indexes of the maximal elements of a N-dimensional array: + Indexes of the maximal elements of an N-dimensional array: >>> a.flat[np.argmax(a)] 15 @@ -1375,7 +1375,7 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): >>> np.argmin(a, axis=1) array([0, 0]) - Indices of the minimum elements of a N-dimensional array: + Indices of the minimum elements of an N-dimensional array: >>> a.flat[np.argmin(a)] 10 diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index f52d8cdbad8f..f2b79002cfc1 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -2010,7 +2010,7 @@ def binary_repr(num, width=None): In a two's-complement system negative numbers are represented by the two's complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement + representing signed integers on computers [1]_. An N-bit two's-complement system can represent every integer in the range :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 72398c5479f8..1c9331fe553a 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -155,7 +155,7 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): """ Open `path` with `mode` and return the file object. - If ``path`` is an URL, it will be downloaded, stored in the + If ``path`` is a URL, it will be downloaded, stored in the `DataSource` `destpath` directory and opened from there. Parameters @@ -340,7 +340,7 @@ def _cache(self, path): def _findfile(self, path): """Searches for ``path`` and returns full path if found. - If path is an URL, _findfile will cache a local copy and return the + If path is a URL, _findfile will cache a local copy and return the path to the cached file. If path is a local file, _findfile will return a path to that local file. @@ -372,7 +372,7 @@ def abspath(self, path): """ Return absolute path of file in the DataSource directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -448,7 +448,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -484,7 +484,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters @@ -594,7 +594,7 @@ def abspath(self, path): """ Return absolute path of file in the Repository directory. - If `path` is an URL, then `abspath` will return either the location + If `path` is a URL, then `abspath` will return either the location the file exists locally or the location it would exist when opened using the `open` method. @@ -639,7 +639,7 @@ def exists(self, path): Notes ----- - When `path` is an URL, `exists` will return True if it's either + When `path` is a URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. @@ -651,7 +651,7 @@ def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object prepending Repository base URL. - If `path` is an URL, it will be downloaded, stored in the + If `path` is a URL, it will be downloaded, stored in the DataSource directory and opened from there. Parameters diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 2dd19410bbf0..dece6823f09f 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -289,7 +289,7 @@ def test_RemoveHTTPFile(self, tmp_path): def test_CachedHTTPFile(self, tmp_path): localfile = valid_httpurl() - # Create a locally cached temp file with an URL based + # Create a locally cached temp file with a URL based # directory structure. This is similar to what Repository.open # would do. repos = datasource.Repository(valid_baseurl(), tmp_path) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index fb54cc7b8238..2173e0e76f04 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -618,7 +618,7 @@ def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... def getmaskarray[ShapeT: _Shape](arr: np.ndarray[ShapeT, Any]) -> _MaskArray[ShapeT]: ... # It's sufficient for `m` to have dtype with type: `type[np.bool_]`, -# which isn't necessarily a ndarray. Please open an issue if this causes issues. +# which isn't necessarily an ndarray. Please open an issue if this causes issues. def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... # diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 237e340cbf45..b611aed844e7 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1372,7 +1372,7 @@ def legcompanion(c): """Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is - symmetric when `c` is an Legendre basis polynomial. This provides + symmetric when `c` is a Legendre basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if `numpy.linalg.eigvalsh` is used to obtain them. From b7d78e785ed02b693653d6e79ba79f0e35ec48ca Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 17 Feb 2026 20:41:23 +0530 Subject: [PATCH 1406/1718] ENH: make PyDataMem_DefaultHandler and default extobj capsule immortal in free-threading (#30826) --- numpy/_core/src/multiarray/multiarraymodule.c | 8 +++++++- numpy/_core/src/umath/extobj.c | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 2f381a1c7aa6..8afad4ed93cd 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5170,7 +5170,13 @@ _multiarray_umath_exec(PyObject *m) { if (PyDataMem_DefaultHandler == NULL) { return -1; } - +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(PyDataMem_DefaultHandler) == 0) { + PyErr_SetString(PyExc_RuntimeError, + "Could not mark memory handler capsule as immortal"); + return -1; + } +#endif /* * Initialize the context-local current handler * with the default PyDataMem_Handler capsule. diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 91b0b4c62d30..77a76873d20f 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -15,6 +15,7 @@ #include "numpy/ufuncobject.h" #include "common.h" +#include "npy_pycompat.h" #define UFUNC_ERR_IGNORE 0 @@ -145,6 +146,13 @@ init_extobj(void) if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(npy_static_pydata.default_extobj_capsule) == 0) { + PyErr_SetString(PyExc_RuntimeError, "Could not mark extobj capsule as immortal"); + Py_CLEAR(npy_static_pydata.default_extobj_capsule); + return -1; + } +#endif npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); if (npy_static_pydata.npy_extobj_contextvar == NULL) { From b739b57127dd97dedb3adf1a2ee7c6a0a5d8a99f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 17:57:44 +0000 Subject: [PATCH 1407/1718] MAINT: Bump hypothesis from 6.151.5 to 6.151.6 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.5 to 6.151.6. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.5...hypothesis-python-6.151.6) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index e8ab96996cf8..3c5e86e39a84 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.5 +hypothesis==6.151.6 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 8693bb4d58d9..8a14ddf0478d 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.5 +hypothesis==6.151.6 pytest==9.0.2 pytest-cov==7.0.0 meson From 5c2288f36096bb9ae5d5aed277df1a3cda481ef6 Mon Sep 17 00:00:00 2001 From: mattip Date: Thu, 19 Feb 2026 12:35:59 +0200 Subject: [PATCH 1408/1718] DOC: improve release note --- .../upcoming_changes/30411.compatibility.rst | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/doc/release/upcoming_changes/30411.compatibility.rst b/doc/release/upcoming_changes/30411.compatibility.rst index c013160272dd..54ba1b1fd32d 100644 --- a/doc/release/upcoming_changes/30411.compatibility.rst +++ b/doc/release/upcoming_changes/30411.compatibility.rst @@ -1,20 +1,18 @@ -* `linalg.eig` and `linalg.eigvals` always return complex arrays. Previously, the return - values were depening on whether the eigenvalues happen to lie on the real line (which, for - a general, non-symmetric matrix, is not guaranteed). +``linalg.eig`` and ``linalg.eigvals`` now always return complex arrays +---------------------------------------------------------------------- - The user change depends on your usage: +Previously, the return values depended on whether the eigenvalues happen to lie +on the real line (which, for a general, non-symmetric matrix, is not guaranteed). - - to retain the previous behavior, do +The change makes consistent what was a value-dependent result. To retain the +previous behavior, do:: - ``` w = eigvals(a) - if w.imag == 0: # this is what NumPy used to do - w = w.real - ``` - - - if your matrix is symmetrix/hermitian, use `eigh` and `eigvalsh` instead of `eig` and `eigvals`: - these are guaranteed to return real values. (A common case is covariance matrices, which are - symmetric and positive definite by construction) - + if np.any(w.imag == 0): # this is what NumPy used to do + w = w.real +If your matrix is symmetrix/hermitian, use ``eigh`` and ``eigvalsh`` instead of +``eig`` and ``eigvals``. These are guaranteed to return real values. A common +case is covariance matrices, which are symmetric and positive definite by +construction. From aaa07e0d71d9507beb89f22dbf72014cef57300b Mon Sep 17 00:00:00 2001 From: ChristinaDec <103141476+ChristinaDec@users.noreply.github.com> Date: Thu, 19 Feb 2026 18:03:39 -0800 Subject: [PATCH 1409/1718] Update Contributor Guide with copyright and license reminders Relates to https://github.com/numpy/numpy/issues/23315. Adds two sentences to the Contributor Guide "guidelines" section which discourage copyright notices in the source code and remind contributors that they agree to the license. --- doc/source/dev/index.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index f1ac71e3ba5c..68c94911a1f6 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -183,6 +183,8 @@ Guidelines * No changes are ever committed without review and approval by a core team member. Please ask politely on the PR or on the `mailing list`_ if you get no response to your pull request within a week. +* Do not include copyright notices in source code. Any code you contribute to + the project is in agreement with the project `license `_. .. _stylistic-guidelines: From 036d07c5d9f32ea236e4f34a8da81bc93429d385 Mon Sep 17 00:00:00 2001 From: ChristinaDec <103141476+ChristinaDec@users.noreply.github.com> Date: Fri, 20 Feb 2026 00:58:25 -0800 Subject: [PATCH 1410/1718] Update doc/source/dev/index.rst Co-authored-by: Matti Picus --- doc/source/dev/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 68c94911a1f6..625e69712030 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -183,8 +183,8 @@ Guidelines * No changes are ever committed without review and approval by a core team member. Please ask politely on the PR or on the `mailing list`_ if you get no response to your pull request within a week. -* Do not include copyright notices in source code. Any code you contribute to - the project is in agreement with the project `license `_. +* Do not include copyright notices in source code without explicitly discussing the need first. + In general, any code you contribute to the project is under the project `license `_. .. _stylistic-guidelines: From 0eb137ee69c251c406b81eb8f7fcaa4586ec989b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 20 Feb 2026 19:28:00 +0100 Subject: [PATCH 1411/1718] TYP: Type-checking with Pyrefly (#30729) Add Pyrefly type checking and suppress warnings in the specific way as needed. --- .github/workflows/{mypy.yml => typecheck.yml} | 16 +++++++++- .spin/cmds.py | 7 ++++ numpy/_array_api_info.pyi | 2 +- numpy/_core/_exceptions.pyi | 2 +- numpy/_core/defchararray.pyi | 14 ++++---- numpy/_core/memmap.pyi | 2 +- numpy/_core/records.pyi | 6 ++-- numpy/_typing/_ufunc.pyi | 32 ++++++++++++------- numpy/char/__init__.pyi | 2 +- numpy/ctypeslib/_ctypeslib.pyi | 2 +- numpy/dtypes.pyi | 4 +-- numpy/lib/_arrayterator_impl.pyi | 4 +-- numpy/lib/_function_base_impl.pyi | 2 +- numpy/lib/_npyio_impl.pyi | 2 +- numpy/lib/_polynomial_impl.pyi | 2 +- numpy/ma/core.pyi | 4 +-- numpy/ma/extras.pyi | 2 +- numpy/ma/mrecords.pyi | 2 +- numpy/random/_mt19937.pyi | 2 +- numpy/random/_pcg64.pyi | 11 ++----- numpy/random/_philox.pyi | 2 +- numpy/random/_sfc64.pyi | 2 +- numpy/testing/__init__.pyi | 2 +- pyproject.toml | 27 ++++++++++++++++ requirements/test_requirements.txt | 1 + 25 files changed, 104 insertions(+), 50 deletions(-) rename .github/workflows/{mypy.yml => typecheck.yml} (85%) diff --git a/.github/workflows/mypy.yml b/.github/workflows/typecheck.yml similarity index 85% rename from .github/workflows/mypy.yml rename to .github/workflows/typecheck.yml index 1e37b0e777ab..661b9a8c1693 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/typecheck.yml @@ -1,4 +1,4 @@ -name: Run MyPy +name: Type-checking # Mypy is too slow to run as part of regular CI. The purpose of the jobs in # this file is to cover running Mypy across: @@ -89,3 +89,17 @@ jobs: --verifytypes numpy --ignoreexternal --exclude-like '*.tests.*' '*.conftest.*' + + pyrefly: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + with: + activate-environment: true + - name: Install dependencies + run: >- + uv pip install + -r requirements/test_requirements.txt + - name: Run pyrefly + run: pyrefly check diff --git a/.spin/cmds.py b/.spin/cmds.py index 609b32485e19..ef20d068fc5d 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -547,6 +547,13 @@ def mypy(ctx): ctx.forward(test) +@click.command() +def pyrefly() -> None: + """🪲 Type-check the stubs with Pyrefly + """ + spin.util.run(['pyrefly', 'check']) + + @click.command() @click.option( '--concise', diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 0bad9c65b137..f6fc86d38dbb 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -97,7 +97,7 @@ type _EmptyDict = dict[Never, Never] @final class __array_namespace_info__: - __module__: Literal["numpy"] = "numpy" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] def capabilities(self) -> _Capabilities: ... def default_device(self) -> _Device: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi index 00c1cdbaa575..dd559be44fee 100644 --- a/numpy/_core/_exceptions.pyi +++ b/numpy/_core/_exceptions.pyi @@ -15,7 +15,7 @@ class _UFuncNoLoopError(UFuncTypeError): def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncBinaryResolutionError(_UFuncNoLoopError): - dtypes: tuple[np.dtype, np.dtype] + dtypes: tuple[np.dtype, np.dtype] # pyrefly: ignore[bad-override] def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... class _UFuncCastingError(UFuncTypeError): diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 1fb086a3d451..5685a988227e 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -143,37 +143,37 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): # @overload # type: ignore[override] - def __ge__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... + def __ge__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload def __ge__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] # @overload # type: ignore[override] - def __le__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... + def __le__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload def __le__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] # @overload # type: ignore[override] - def __gt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... + def __gt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload def __gt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] # @overload # type: ignore[override] - def __lt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... + def __lt__(self: _CharArray[str_], other: U_co, /) -> NDArray[np.bool]: ... # pyrefly: ignore[bad-override] @overload def __lt__(self: _CharArray[bytes_], other: S_co, /) -> NDArray[np.bool]: ... # pyright: ignore[reportIncompatibleMethodOverride] # @overload # type: ignore[override] - def __add__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... + def __add__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload def __add__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] # @overload # type: ignore[override] - def __radd__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... + def __radd__(self: _CharArray[str_], other: U_co, /) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload def __radd__(self: _CharArray[bytes_], other: S_co, /) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] @@ -241,7 +241,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): # @overload # type: ignore[override] - def partition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... + def partition(self: _CharArray[str_], sep: U_co) -> _CharArray[str_]: ... # pyrefly: ignore[bad-override] @overload def partition(self: _CharArray[bytes_], sep: S_co) -> _CharArray[bytes_]: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index ebc750f849a8..766c4581aa2a 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -30,7 +30,7 @@ class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protoco ### class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): - __module__: Literal["numpy"] = "numpy" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] filename: Final[str | None] diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 87a2e7616d00..9c8963f5f7af 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -55,7 +55,7 @@ class _SupportsReadInto(Protocol): # exported in `numpy.rec` class record(np.void): # type: ignore[misc] __name__: ClassVar[Literal["record"]] = "record" - __module__: Literal["numpy"] = "numpy" + __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] def pprint(self) -> str: ... @@ -64,14 +64,14 @@ class record(np.void): # type: ignore[misc] # @overload # type: ignore[override] - def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... # pyrefly: ignore[bad-override] @overload def __getitem__(self, key: list[str], /) -> record: ... # exported in `numpy.rec` class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): __name__: ClassVar[Literal["recarray"]] = "recarray" - __module__: Literal["numpy.rec"] = "numpy.rec" + __module__: Literal["numpy.rec"] = "numpy.rec" # pyrefly: ignore[bad-override] @overload def __new__( diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index b9dc5fd5b975..14ec3e913eb1 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -19,6 +19,7 @@ from typing import ( TypedDict, Unpack, overload, + override, type_check_only, ) @@ -226,8 +227,9 @@ class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # t out: np.ndarray | EllipsisType | None = None, ) -> NDArray[Incomplete]: ... - @overload # type: ignore[override] - def reduce( # out=None (default), keepdims=False (default) + @override # type: ignore[override] + @overload # out=None (default), keepdims=False (default) + def reduce( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -263,6 +265,7 @@ class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # t **kwargs: Unpack[_ReduceKwargs], ) -> NDArray[Incomplete]: ... + @override def reduceat( self, array: ArrayLike, @@ -273,7 +276,8 @@ class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # t out: np.ndarray | EllipsisType | None = None, ) -> NDArray[Incomplete]: ... - @overload # type: ignore[override] + @override # type: ignore[override] + @overload def outer( # (scalar, scalar) -> scalar self, A: _ScalarLike_co, @@ -329,6 +333,7 @@ class _UFunc_Nin2_Nout1[NameT: LiteralString, NTypesT: int, IdentT](ufunc): # t **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Incomplete] | Incomplete: ... + @override def at( # type: ignore[override] self, a: np.ndarray | _SupportsArrayUFunc, @@ -680,8 +685,9 @@ class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Incomplete: ... - @overload # type: ignore[override] - def accumulate( + @override # type: ignore[override] + @overload + def accumulate( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -700,8 +706,9 @@ class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] out: OutT, ) -> OutT: ... - @overload # type: ignore[override] - def reduce[OutT: np.ndarray]( # out=array + @override # type: ignore[override] + @overload # out=array + def reduce[OutT: np.ndarray]( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -748,8 +755,9 @@ class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] **kwargs: Unpack[_ReduceKwargs], ) -> ReturnT | NDArray[np.object_]: ... - @overload # type: ignore[override] - def reduceat[OutT: np.ndarray]( + @override # type: ignore[override] + @overload + def reduceat[OutT: np.ndarray]( # pyrefly: ignore[bad-override] self, array: ArrayLike, /, @@ -780,8 +788,9 @@ class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, ) -> Incomplete: ... - @overload # type: ignore[override] - def outer( + @override # type: ignore[override] + @overload + def outer( # pyrefly: ignore[bad-override] self, A: _ScalarLike_co, B: _ScalarLike_co, @@ -831,6 +840,7 @@ class _PyFunc_Nin2_Nout1[ReturnT, IdentT](ufunc): # type: ignore[misc] **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Incomplete: ... + @override def at( # type: ignore[override] self, a: np.ndarray | _SupportsArrayUFunc, diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index 11dd885e50f0..f53ff7e483b0 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -4,7 +4,7 @@ from numpy._core.defchararray import ( # type: ignore[deprecated] asarray, capitalize, center, - chararray, + chararray, # pyrefly: ignore[deprecated] compare_chararrays, count, decode, diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 2e88d7d9464f..3ab72549f472 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -58,7 +58,7 @@ class _ndptr[OptionalDTypeT: np.dtype | None](ct.c_void_p): @overload # type: ignore[override] @classmethod - def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... + def from_param(cls: type[_ndptr[None]], obj: np.ndarray) -> _ctypes[Any]: ... # pyrefly: ignore[bad-override] @overload @classmethod def from_param[DTypeT: np.dtype](cls: type[_ndptr[DTypeT]], obj: np.ndarray[Any, DTypeT]) -> _ctypes[Any]: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 20a503ad2557..d2087becc99a 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -62,7 +62,7 @@ _NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) @type_check_only class _SimpleDType[ScalarT: np.generic](np.dtype[ScalarT]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] - names: None # pyright: ignore[reportIncompatibleVariableOverride] + names: None # pyright: ignore[reportIncompatibleVariableOverride] # pyrefly: ignore[bad-override] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property @@ -456,7 +456,7 @@ class VoidDType( # type: ignore[misc] _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, _NBit[L[1], _ItemSizeT_co], - np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] # pyrefly: ignore[invalid-inheritance] Generic[_ItemSizeT_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index a1a4428885fd..2d221f9007e9 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -28,14 +28,14 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): step: Final[list[int]] @property # type: ignore[misc] - def shape(self) -> _ShapeT_co: ... + def shape(self) -> _ShapeT_co: ... # pyrefly: ignore[bad-override] @property def flat[ScalarT: np.generic](self: Arrayterator[Any, np.dtype[ScalarT]]) -> Generator[ScalarT]: ... # type: ignore[override] # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] - def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # pyrefly: ignore[bad-override] # @overload diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 5f9e65d1a4a2..5887d7d496ce 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -171,7 +171,7 @@ class _SizedIterable[T](Protocol): class vectorize: __doc__: str | None - __module__: L["numpy"] = "numpy" + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] pyfunc: Callable[..., Incomplete] cache: bool signature: str | None diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 10be679f9c74..efe7c0886719 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -91,7 +91,7 @@ class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): # @override @overload - def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... # pyrefly: ignore[bad-override] @overload def get[T](self, key: str, default: NDArray[_ScalarT_co] | T, /) -> NDArray[_ScalarT_co] | T: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index a88581f2228f..82ec616d6458 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -57,7 +57,7 @@ __all__ = [ ] class poly1d: - __module__: L["numpy"] = "numpy" + __module__: L["numpy"] = "numpy" # pyrefly: ignore[bad-override] __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 26f1dc591d51..64cf6e36f972 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2220,7 +2220,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.argmin @overload # type: ignore[override] - def argmin( + def argmin( # pyrefly: ignore[bad-param-name-override] self, axis: None = None, fill_value: _ScalarLike_co | None = None, @@ -2258,7 +2258,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # Keep in-sync with np.ma.argmax @overload # type: ignore[override] - def argmax( + def argmax( # pyrefly: ignore[bad-param-name-override] self, axis: None = None, fill_value: _ScalarLike_co | None = None, diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index f564fc13e9a0..2d57a4f94bf4 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -625,7 +625,7 @@ class MAxisConcatenator(AxisConcatenator): @override # type: ignore[override] @overload @staticmethod - def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MArray[ScalarT]: ... + def concatenate[ScalarT: np.generic](arrays: _ArrayLike[ScalarT], axis: SupportsIndex | None = 0) -> _MArray[ScalarT]: ... # pyrefly: ignore[bad-override] @overload @staticmethod def concatenate(arrays: SupportsLenAndGetItem[ArrayLike], axis: SupportsIndex | None = 0) -> _MArray[Incomplete]: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 597551d1bf7d..f6b5d6424044 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -81,7 +81,7 @@ class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DT # keep in sync with `MaskedArray.view`, but without the `fill_value` @override # type: ignore[override] @overload # () - def view(self, /, dtype: None = None, type: None = None) -> Self: ... + def view(self, /, dtype: None = None, type: None = None) -> Self: ... # pyrefly: ignore[bad-override] @overload # (dtype: DTypeT) def view[DTypeT: np.dtype]( self, /, dtype: DTypeT | _HasDType[DTypeT], type: None = None diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 03373a6dd6ea..074a8eec5447 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -22,6 +22,6 @@ class MT19937(BitGenerator): def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... def jumped(self, jumps: int = 1) -> MT19937: ... @property # type: ignore[override] - def state(self) -> _MT19937State: ... + def state(self) -> _MT19937State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _MT19937State) -> None: ... diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index a9e81f7f181b..aede210d4026 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -21,21 +21,16 @@ class PCG64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64: ... @property # type: ignore[override] - def state( - self, - ) -> _PCG64State: ... + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter - def state( - self, - value: _PCG64State, - ) -> None: ... + def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64: ... class PCG64DXSM(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... def jumped(self, jumps: int = 1) -> PCG64DXSM: ... @property # type: ignore[override] - def state(self) -> _PCG64State: ... + def state(self) -> _PCG64State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _PCG64State) -> None: ... def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 3089f11ea629..ea9880ea10e2 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -29,7 +29,7 @@ class Philox(BitGenerator): key: _ArrayLikeInt_co | None = ..., ) -> None: ... @property # type: ignore[override] - def state(self) -> _PhiloxState: ... + def state(self) -> _PhiloxState: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _PhiloxState) -> None: ... def jumped(self, jumps: int = 1) -> Philox: ... diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index f5f3fed9c251..5bddaf2b7676 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -20,6 +20,6 @@ class _SFC64State(TypedDict): class SFC64(BitGenerator): def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... @property # type: ignore[override] - def state(self) -> _SFC64State: ... + def state(self) -> _SFC64State: ... # pyrefly: ignore[bad-override] @state.setter def state(self, value: _SFC64State) -> None: ... diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 95db8728ef70..3d6c53b1e638 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -47,7 +47,7 @@ from ._private.utils import ( # type: ignore[deprecated] run_threaded, rundocs, runstring, - suppress_warnings, + suppress_warnings, # pyrefly: ignore[deprecated] tempdir, temppath, verbose, diff --git a/pyproject.toml b/pyproject.toml index 962aed0571cd..732ab2741993 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -251,6 +251,7 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:build", ".spin/cmds.py:test", ".spin/cmds.py:mypy", + ".spin/cmds.py:pyrefly", ".spin/cmds.py:stubtest", ".spin/cmds.py:config_openblas", ".spin/cmds.py:lint", @@ -270,3 +271,29 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:check_tutorials", ] "Metrics" = [".spin/cmds.py:bench"] + + +[tool.pyrefly] +project-includes = ["numpy/**/*.pyi"] +project-excludes = ["numpy/typing/tests/**"] + +[tool.pyrefly.errors] +implicit-any = "error" +unannotated-parameter = "error" +unannotated-return = "error" + +[[tool.pyrefly.sub-config]] +matches = "numpy/__init__.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/_typing/_nbit_base.pyi" +errors = { invalid-inheritance = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/ma/core.pyi" +errors = { bad-override = "ignore" } + +[[tool.pyrefly.sub-config]] +matches = "numpy/matrixlib/defmatrix.pyi" +errors = { bad-override = "ignore" } diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 8a14ddf0478d..2e21b90385ea 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -8,6 +8,7 @@ pytest-xdist pytest-timeout # For testing types mypy==1.19.1 +pyrefly==0.49.0 # for optional f2py encoding detection charset-normalizer tzdata From 2d0e96b90dff9d80545f6cf154c1cdb067d7b1ae Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 20 Feb 2026 20:02:37 +0100 Subject: [PATCH 1412/1718] MAINT: bump ``ruff`` to ``0.15.2`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 774d6c0209ac..e74ba4aba356 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.14.7 + - ruff=0.15.2 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index fcc9af70a93e..cffefb4b0183 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.0 +ruff==0.15.2 GitPython>=3.1.30 spin From f99ae9019c9e213e13d6f1a84b0ccd2dcac54ba6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 20 Feb 2026 20:13:23 +0100 Subject: [PATCH 1413/1718] TYP/STY: fix new ``UP047`` ruff errors --- numpy/_core/numeric.pyi | 12 +++++++----- numpy/lib/_arraysetops_impl.pyi | 18 ++++++++++++------ numpy/linalg/_linalg.pyi | 14 ++++++++------ numpy/ma/core.pyi | 8 +++++--- numpy/ma/extras.pyi | 10 ++++++---- numpy/testing/_private/utils.pyi | 17 ++++++++--------- 6 files changed, 46 insertions(+), 33 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 2a69e2922253..1dd34d6a4fcd 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1046,9 +1046,11 @@ def isfortran(a: ndarray | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + # keep in sync with `convolve` and `ma.core.correlate` @overload -def correlate( +def correlate( # noqa: UP047 a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" ) -> _Array1D[_AnyNumericScalarT]: ... @overload @@ -1068,7 +1070,7 @@ def correlate( # keep in sync with `correlate` @overload -def convolve( +def convolve( # noqa: UP047 a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" ) -> _Array1D[_AnyNumericScalarT]: ... @overload @@ -1089,7 +1091,7 @@ def convolve( # keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload, # and also keep in sync with `ma.core.outer` (minus `out`) @overload -def outer( +def outer( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None ) -> _Array2D[_AnyNumericScalarT]: ... @overload @@ -1107,7 +1109,7 @@ def outer[ArrayT: np.ndarray](a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _Ar # keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) @overload -def tensordot( +def tensordot( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 ) -> NDArray[_AnyNumericScalarT]: ... @overload @@ -1127,7 +1129,7 @@ def tensordot( # @overload -def cross( +def cross( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axisa: int = -1, diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 4290a79f1dcd..77db8c4f3620 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -379,23 +379,25 @@ def unique_values[ScalarT: np.generic](x: _ArrayLike[ScalarT]) -> _Array1D[Scala @overload def unique_values(x: ArrayLike) -> _Array1D[Incomplete]: ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + # @overload # known scalar-type, return_indices=False (default) -def intersect1d( +def intersect1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, return_indices: L[False] = False, ) -> _Array1D[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (positional) -def intersect1d( +def intersect1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool, return_indices: L[True], ) -> _IntersectResult[_AnyScalarT]: ... @overload # known scalar-type, return_indices=True (keyword) -def intersect1d( +def intersect1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False, @@ -427,19 +429,23 @@ def intersect1d( # @overload -def setxor1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> _Array1D[_AnyScalarT]: ... +def setxor1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... # @overload -def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _Array1D[_AnyScalarT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _Array1D[_AnyScalarT]: ... # noqa: UP047 @overload def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _Array1D[Incomplete]: ... # @overload -def setdiff1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False) -> _Array1D[_AnyScalarT]: ... +def setdiff1d( # noqa: UP047 + ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False +) -> _Array1D[_AnyScalarT]: ... @overload def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _Array1D[Incomplete]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index c982bf524c50..8eb3e57cf1e2 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1036,9 +1036,11 @@ _AnyScalarT = TypeVar( np.float16, np.float32, np.longdouble, np.complex64, np.clongdouble, ) # fmt: skip +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + # @overload # ~T, ~T (we use constraints instead of a `: np.number` bound to prevent joins/unions) -def cross( +def cross( # noqa: UP047 x1: _ArrayLike1D2D[_AnyScalarT], x2: _ArrayLike1D2D[_AnyScalarT], /, @@ -1123,11 +1125,11 @@ def cross[ScalarT: np.number]( # - 9 overloads for the scalar cases (both args 1d) # - 18 overloads for the non-scalar cases (at least one arg >1d) @overload # ?d ~T, 1d ~T -def matmul( +def matmul( # noqa: UP047 x1: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], x2: _ArrayLike1D[_AnyScalarT], / ) -> NDArray[_AnyScalarT] | Any: ... @overload # 1d ~T, ?d ~T -def matmul( +def matmul( # noqa: UP047 x1: _ArrayLike1D[_AnyScalarT], x2: _SupportsArray[_JustAnyShape, np.dtype[_AnyScalarT]], / ) -> NDArray[_AnyScalarT] | Any: ... @overload # ?d bool, 1d bool @@ -1167,7 +1169,7 @@ def matmul( x1: _AsArrayC128_1d, x2: _SupportsArray[_JustAnyShape, np.dtype[_to_complex128_co]], / ) -> NDArray[np.complex128] | Any: ... # end workaround @overload # 1d ~T, 1d ~T -def matmul(x1: _ArrayLike1D[_AnyScalarT], x2: _ArrayLike1D[_AnyScalarT], /) -> _AnyScalarT: ... +def matmul(x1: _ArrayLike1D[_AnyScalarT], x2: _ArrayLike1D[_AnyScalarT], /) -> _AnyScalarT: ... # noqa: UP047 @overload # 1d +bool, 1d +bool def matmul(x1: _ToArrayBool_1d, x2: _ToArrayBool_1d, /) -> np.bool: ... @overload # 1d ~int, 1d +int @@ -1185,9 +1187,9 @@ def matmul(x1: _ToArrayComplex_1d, x2: _AsArrayC128_1d, /) -> np.complex128: ... @overload # 1d fallback, 1d fallback def matmul(x1: _ToArrayComplex_1d, x2: _ToArrayComplex_1d, /) -> Any: ... # end 1d x 1d @overload # >=1d ~T, >=2d ~T -def matmul(x1: _ArrayLike1ND[_AnyScalarT], x2: _ArrayLike2ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... +def matmul(x1: _ArrayLike1ND[_AnyScalarT], x2: _ArrayLike2ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 @overload # >=2d ~T, >=1d ~T -def matmul(x1: _ArrayLike2ND[_AnyScalarT], x2: _ArrayLike1ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... +def matmul(x1: _ArrayLike2ND[_AnyScalarT], x2: _ArrayLike1ND[_AnyScalarT], /) -> NDArray[_AnyScalarT]: ... # noqa: UP047 @overload # >=1d +bool, >=2d +bool def matmul(x1: _ToArrayBool_1nd, x2: _ToArrayBool_2nd, /) -> NDArray[np.bool]: ... @overload # >=2d +bool, >=1d +bool diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 64cf6e36f972..d7b92e12065b 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3806,9 +3806,11 @@ def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... innerproduct = inner +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + # keep in sync with `_core.numeric.outer` @overload -def outer(a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT]) -> _Masked2D[_AnyNumericScalarT]: ... +def outer(a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT]) -> _Masked2D[_AnyNumericScalarT]: ... # noqa: UP047 @overload def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> _Masked2D[np.bool]: ... @overload @@ -3824,7 +3826,7 @@ outerproduct = outer # keep in sync with `convolve` and `_core.numeric.correlate` @overload -def correlate( +def correlate( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], v: _ArrayLike[_AnyNumericScalarT], mode: _CorrelateMode = "valid", @@ -3868,7 +3870,7 @@ def correlate( # keep in sync with `correlate` and `_core.numeric.convolve` @overload -def convolve( +def convolve( # noqa: UP047 a: _ArrayLike[_AnyNumericScalarT], v: _ArrayLike[_AnyNumericScalarT], mode: _CorrelateMode = "full", diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 2d57a4f94bf4..232d040360ea 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -567,9 +567,11 @@ def unique( return_inverse: L[True], ) -> tuple[_MArray[Incomplete], _IntArray, _IntArray]: ... +# NOTE: we ignore UP047 because inlining `_AnyScalarT` would result in a lot of code duplication + # keep in sync with `lib._arraysetops_impl.intersect1d` @overload # known scalar-type, return_indices=False (default) -def intersect1d( +def intersect1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False ) -> _MArray1D[_AnyScalarT]: ... @overload # unknown scalar-type, return_indices=False (default) @@ -577,7 +579,7 @@ def intersect1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> # keep in sync with `lib._arraysetops_impl.setxor1d` @overload -def setxor1d( +def setxor1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False ) -> _MArray1D[_AnyScalarT]: ... @overload @@ -585,13 +587,13 @@ def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _MA # keep in sync with `lib._arraysetops_impl.union1d` @overload -def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _MArray1D[_AnyScalarT]: ... +def union1d(ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT]) -> _MArray1D[_AnyScalarT]: ... # noqa: UP047 @overload def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _MArray1D[Incomplete]: ... # keep in sync with `lib._arraysetops_impl.setdiff1d` @overload -def setdiff1d( +def setdiff1d( # noqa: UP047 ar1: _ArrayLike[_AnyScalarT], ar2: _ArrayLike[_AnyScalarT], assume_unique: bool = False ) -> _MArray1D[_AnyScalarT]: ... @overload diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index c408a51200a1..8bad88cecfcd 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -10,7 +10,6 @@ from pathlib import Path from re import Pattern from typing import ( Any, - AnyStr, ClassVar, Final, Generic, @@ -389,21 +388,21 @@ def tempdir( dir: None = None, ) -> _GeneratorContextManager[str]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, prefix: AnyStr | None = None, *, dir: GenericPath[AnyStr], ) -> _GeneratorContextManager[AnyStr]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, *, prefix: AnyStr, dir: GenericPath[AnyStr] | None = None, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def tempdir( +def tempdir[AnyStr: (bytes, str)]( suffix: AnyStr, prefix: AnyStr | None = None, dir: GenericPath[AnyStr] | None = None, @@ -418,14 +417,14 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[str]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None, prefix: AnyStr | None, dir: GenericPath[AnyStr], text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, prefix: AnyStr | None = None, *, @@ -433,14 +432,14 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None, prefix: AnyStr, dir: GenericPath[AnyStr] | None = None, text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr | None = None, *, prefix: AnyStr, @@ -448,7 +447,7 @@ def temppath( text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... @overload -def temppath( +def temppath[AnyStr: (bytes, str)]( suffix: AnyStr, prefix: AnyStr | None = None, dir: GenericPath[AnyStr] | None = None, From 81815550c1dae99d1f647e4fa7b339f34f283330 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 20 Feb 2026 20:25:20 +0100 Subject: [PATCH 1414/1718] STY: avoid using the ``ruff>=0.15.2`` default ruleset --- ruff.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ruff.toml b/ruff.toml index 7b6c340c0fa1..a70fac4bc07a 100644 --- a/ruff.toml +++ b/ruff.toml @@ -19,7 +19,7 @@ line-ending = "lf" [lint] preview = true -extend-select = [ +select = [ "B", # flake8-bugbear "C4", # flake8-comprehensions "ISC", # flake8-implicit-str-concat @@ -34,6 +34,7 @@ extend-select = [ "PERF", # perflint "E", # pycodestyle/error "W", # pycodestyle/warning + "F", # pyflakes "PGH", # pygrep-hooks "PLE", # pylint/error "UP", # pyupgrade From 26d5a20850e6a0306029b3cf659331bc28ac20e9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 22 Feb 2026 21:30:59 +0100 Subject: [PATCH 1415/1718] STY: enable ``RUF100`` --- ruff.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/ruff.toml b/ruff.toml index a70fac4bc07a..7266a07cf7a5 100644 --- a/ruff.toml +++ b/ruff.toml @@ -38,6 +38,7 @@ select = [ "PGH", # pygrep-hooks "PLE", # pylint/error "UP", # pyupgrade + "RUF100", # ruff: unused-noqa ] ignore = [ # flake8-bugbear From 53e1ca93eddf88d46ecd3d46bfcf285739f4a899 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 22 Feb 2026 21:32:04 +0100 Subject: [PATCH 1416/1718] STY: remove unused ``# noqa`` --- .spin/cmds.py | 8 ++++---- numpy/__init__.pyi | 10 +++++----- numpy/_core/arrayprint.py | 2 +- numpy/_core/fromnumeric.pyi | 1 - numpy/_core/multiarray.py | 4 ++-- numpy/_core/records.pyi | 1 - numpy/_core/tests/test_array_coercion.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 2 +- numpy/_core/tests/test_nep50_promotions.py | 4 ++-- numpy/_core/tests/test_scalarmath.py | 2 +- numpy/_core/tests/test_umath_complex.py | 18 +++++++++--------- numpy/_typing/_ufunc.pyi | 2 +- numpy/dtypes.pyi | 3 +-- numpy/f2py/crackfortran.py | 2 +- numpy/f2py/symbolic.pyi | 2 +- numpy/lib/_function_base_impl.py | 4 ++-- numpy/lib/_index_tricks_impl.pyi | 4 ++-- numpy/lib/_user_array_impl.pyi | 6 +++--- numpy/ma/core.py | 2 +- numpy/matlib.py | 2 +- numpy/polynomial/tests/test_printing.py | 2 +- numpy/random/__init__.pyi | 4 ++-- numpy/random/bit_generator.pyi | 2 +- numpy/testing/_private/utils.py | 2 +- numpy/testing/_private/utils.pyi | 4 ++-- numpy/typing/tests/test_typing.py | 2 +- tools/refguide_check.py | 2 +- 27 files changed, 48 insertions(+), 51 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index ef20d068fc5d..4dcafb3ff1f1 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -154,7 +154,7 @@ def test(*, parent_callback, pytest_args, tests, markexpr, parallel_threads, **k When pytest-run-parallel is avaliable, use `spin test -p auto` or `spin test -p ` to run tests sequentional in parallel threads. - """ # noqa: E501 + """ if (not pytest_args) and (not tests): pytest_args = ('--pyargs', 'numpy') @@ -201,10 +201,10 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): - This command only doctests public objects: those which are accessible from the top-level `__init__.py` file. - """ # noqa: E501 + """ try: # prevent obscure error later - import scipy_doctest # noqa: F401 + import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e if scipy_doctest.__version__ < '1.8.0': @@ -247,7 +247,7 @@ def check_tutorials(*, parent_callback, pytest_args, **kwargs): - This command only doctests public objects: those which are accessible from the top-level `__init__.py` file. - """ # noqa: E501 + """ # handle all of # - `spin check-tutorials` (pytest_args == ()) # - `spin check-tutorials path/to/rst`, and diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 531b1bb8711c..113ea5e010fa 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -594,7 +594,7 @@ from numpy.matrixlib import ( matrix, ) -__all__ = [ # noqa: RUF022 +__all__ = [ # __numpy_submodules__ "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", @@ -876,7 +876,7 @@ type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe" type _OrderKACF = L["K", "A", "C", "F"] | None type _OrderACF = L["A", "C", "F"] | None -type _OrderCF = L["C", "F"] | None # noqa: PYI047 +type _OrderCF = L["C", "F"] | None type _ModeKind = L["raise", "wrap", "clip"] type _PartitionKind = L["introselect"] @@ -2767,9 +2767,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __abs__[ArrayT: NDArray[bool_ | integer | floating | timedelta64 | object_]](self: ArrayT, /) -> ArrayT: ... - def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 - def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 - def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # noqa: PYI019 + def __invert__[ArrayT: NDArray[bool_ | integer | object_]](self: ArrayT, /) -> ArrayT: ... + def __neg__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... + def __pos__[ArrayT: _ArrayNumeric](self: ArrayT, /) -> ArrayT: ... # Binary ops diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 4fc9c95ce5e8..da4c6a95c47c 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -78,7 +78,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if legacy is False: options['legacy'] = sys.maxsize - elif legacy == False: # noqa: E712 + elif legacy == False: warnings.warn( f"Passing `legacy={legacy!r}` is deprecated.", FutureWarning, stacklevel=3 diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 08cce57adde7..3e210e757b4e 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,3 @@ -# ruff: noqa: ANN401 from _typeshed import Incomplete from collections.abc import Sequence from typing import ( diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 54d240c89e3e..1757270deb62 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -9,7 +9,7 @@ import functools from . import _multiarray_umath, overrides -from ._multiarray_umath import * # noqa: F403 +from ._multiarray_umath import * # These imports are needed for backward compatibility, # do not change them. issue gh-15518 @@ -899,7 +899,7 @@ def vdot(a, b, /): >>> 1*4 + 4*1 + 5*2 + 6*2 30 - """ # noqa: E501 + """ return (a, b) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 9c8963f5f7af..7b9c36057c35 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,4 +1,3 @@ -# ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false from _typeshed import Incomplete, StrOrBytesPath from collections.abc import Buffer, Iterable, Sequence diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 9c8c4a09cfc9..96bbb679d6c9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -840,7 +840,7 @@ class TestSpecialAttributeLookupFailure: class WeirdArrayLike: @property - def __array__(self, dtype=None, copy=None): # noqa: PLR0206 + def __array__(self, dtype=None, copy=None): raise RuntimeError("oops!") class WeirdArrayInterface: diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index c45f41028b7b..f090524d6e78 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -470,7 +470,7 @@ def test_time_to_time(self, from_dt, to_dt, orig_arr = values.view(from_dt) orig_out = np.empty_like(expected_out) - if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714 + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # Casting from non-generic to generic units is an error and should # probably be reported as an invalid cast earlier. with pytest.raises(ValueError): diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index abd99a92f24b..72f854c7b001 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -112,7 +112,7 @@ def test_weak_promotion_scalar_path(op): # Integer path: res = op(np.uint8(3), 5) assert res == op(3, 5) - assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 + assert res.dtype == np.uint8 or res.dtype == bool with pytest.raises(OverflowError): op(np.uint8(3), 1000) @@ -120,7 +120,7 @@ def test_weak_promotion_scalar_path(op): # Float path: res = op(np.float32(3), 5.) assert res == op(3., 5.) - assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 + assert res.dtype == np.float32 or res.dtype == bool def test_nep50_complex_promotion(): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 04c78b065f25..2f01a1dea16c 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -275,7 +275,7 @@ def test_modular_power(self): a = 5 b = 4 c = 10 - expected = pow(a, b, c) # noqa: F841 + expected = pow(a, b, c) for t in (np.int32, np.float32, np.complex64): # note that 3-operand power only dispatches on the first argument assert_raises(TypeError, operator.pow, t(a), b, c) diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 7012e7e357fe..24f74bd34569 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -610,15 +610,15 @@ def test_array(self, stride, astype, func): ('cont_chisq', ' tuple[Expr | _Pair, ...]: ... @overload - def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 2e76633136ee..69d1ed8a7c87 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -93,7 +93,7 @@ # --- HYNDMAN and FAN METHODS # Discrete methods 'inverted_cdf': { - 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108 + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), 'fix_gamma': None, # should never be called }, 'averaged_inverted_cdf': { @@ -105,7 +105,7 @@ where=gamma == 0), }, 'closest_observation': { - 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108 + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), 'fix_gamma': None, # should never be called }, # Continuous methods diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 2b21e754d5bc..ad3b5cb6e236 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -28,7 +28,7 @@ from numpy._typing import ( _SupportsArray, ) -__all__ = [ # noqa: RUF022 +__all__ = [ "ravel_multi_index", "unravel_index", "mgrid", @@ -56,7 +56,7 @@ _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) ### -class ndenumerate(Generic[_ScalarT_co]): # noqa: UP046 +class ndenumerate(Generic[_ScalarT_co]): @overload def __init__[ScalarT: np.generic]( self: ndenumerate[ScalarT], diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 4a6dfffbea92..0910e10dbde2 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -126,9 +126,9 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): def __abs__[ContainerT: _RealContainer](self: ContainerT, /) -> ContainerT: ... # - def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 - def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 - def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # noqa: PYI019 + def __neg__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __pos__[ContainerT: _NumericContainer](self: ContainerT, /) -> ContainerT: ... + def __invert__[ContainerT: _IntegralContainer](self: ContainerT, /) -> ContainerT: ... # TODO(jorenham): complete these binary ops diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 4ab7a18fabfb..32c2f07c162b 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -36,7 +36,7 @@ amax, amin, angle, - array as narray, # noqa: F401 + array as narray, bool_, expand_dims, finfo, # noqa: F401 diff --git a/numpy/matlib.py b/numpy/matlib.py index 151cb6b369b4..a19a4ed57e21 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -14,7 +14,7 @@ # Matlib.py contains all functions in the numpy namespace with a few # replacements. See doc/source/reference/routines.matlib.rst for details. # Need * as we're copying the numpy namespace. -from numpy import * # noqa: F403 +from numpy import * from numpy.matrixlib.defmatrix import asmatrix, matrix __version__ = np.__version__ diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index b41e73a28a01..fe56f9e16bef 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -551,7 +551,7 @@ def test_switch_to_exp(self): def test_non_finite(self): p = poly.Polynomial([nan, inf]) assert str(p) == 'nan + inf x' - assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' with printoptions(nanstr='NAN', infstr='INF'): assert str(p) == 'NAN + INF x' assert p._repr_latex_() == \ diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index e9b9fb50ab8c..f949c5aef113 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -16,7 +16,7 @@ from .mtrand import ( f, gamma, geometric, - get_bit_generator, # noqa: F401 + get_bit_generator, get_state, gumbel, hypergeometric, @@ -44,7 +44,7 @@ from .mtrand import ( rayleigh, sample, seed, - set_bit_generator, # noqa: F401 + set_bit_generator, set_state, shuffle, standard_cauchy, diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 3c2069aba408..51ee8188e65f 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -49,7 +49,7 @@ class _Interface(NamedTuple): @type_check_only class _CythonMixin: def __setstate_cython__(self, pyx_state: object, /) -> None: ... - def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + def __reduce_cython__(self) -> Any: ... @type_check_only class _GenerateStateMixin(_CythonMixin): diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index c7966360d3f5..54d040a6ed3f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -66,7 +66,7 @@ class KnownFailureException(Exception): try: if sys.version_info < (3, 13): # Backport importlib.metadata.Distribution.origin - import json # noqa: E401 + import json import types origin = json.loads( np_dist.read_text('direct_url.json') or '{}', diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 8bad88cecfcd..f2953298f02f 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -34,7 +34,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, ) -__all__ = [ # noqa: RUF022 +__all__ = [ "IS_EDITABLE", "IS_MUSL", "IS_PYPY", @@ -495,7 +495,7 @@ def run_threaded[*Ts]( ) -> None: ... # -def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... def break_cycles() -> None: ... diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index cc99b634d6e0..dbe16a37ada4 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -82,7 +82,7 @@ def run_mypy() -> None: """ if ( os.path.isdir(CACHE_DIR) - and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) ): shutil.rmtree(CACHE_DIR) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index da881574215f..486789414373 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -520,7 +520,7 @@ def check_rest(module, names, dots=True): traceback.format_exc())) continue - m = re.search("([\x00-\x09\x0b-\x1f])", text) # noqa: RUF039 + m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: msg = ("Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % (m.group(1),)) From 244ef6a5fc33c17270b055cca49d43494df09343 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 22 Feb 2026 22:27:57 +0100 Subject: [PATCH 1417/1718] STY: enable ``RUF012`` (``mutable-class-default``) --- ruff.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ruff.toml b/ruff.toml index 7266a07cf7a5..f204c77545c0 100644 --- a/ruff.toml +++ b/ruff.toml @@ -38,6 +38,7 @@ select = [ "PGH", # pygrep-hooks "PLE", # pylint/error "UP", # pyupgrade + "RUF012", # ruff: mutable-class-default "RUF100", # ruff: unused-noqa ] ignore = [ @@ -77,9 +78,9 @@ ignore = [ ] [lint.per-file-ignores] -"_tempita.py" = ["B909"] -"bench_*.py" = ["B015", "B018"] -"test*.py" = ["B015", "B018", "E201", "E714"] +"_tempita.py" = ["B909", "RUF012"] +"bench_*.py" = ["B015", "B018", "RUF012"] +"test*.py" = ["B015", "B018", "E201", "E714", "RUF012"] "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] From ceba92f9af2eba218e5ca8ac0041c877dc3da7fb Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 22 Feb 2026 22:29:10 +0100 Subject: [PATCH 1418/1718] STY: ignore harmless ``RUF012`` errors --- doc/source/conf.py | 2 +- numpy/_core/getlimits.py | 6 +++--- numpy/f2py/tests/util.py | 6 +++--- numpy/lib/_iotools.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 77fde4920f9f..3ae97041a5ea 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -605,7 +605,7 @@ def linkcode_resolve(domain, info): class NumPyLexer(CLexer): name = 'NUMPYLEXER' - tokens = { + tokens = { # noqa: RUF012 'statements': [ (r'@[a-zA-Z_]*@', Comment.Preproc, 'macro'), inherit, diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 7d2b9966fcd7..f0e2a7c05d86 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -168,7 +168,7 @@ class finfo: """ - _finfo_cache = {} + _finfo_cache = {} # noqa: RUF012 __class_getitem__ = classmethod(types.GenericAlias) @@ -395,8 +395,8 @@ class iinfo: """ - _min_vals = {} - _max_vals = {} + _min_vals = {} # noqa: RUF012 + _max_vals = {} # noqa: RUF012 __class_getitem__ = classmethod(types.GenericAlias) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 35e5d3bd8ac0..944b5ae6e084 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -358,9 +358,9 @@ def build_meson(source_files, module_name=None, **kwargs): class F2PyTest: code = None sources = None - options = [] - skip = [] - only = [] + options = [] # noqa: RUF012 + skip = [] # noqa: RUF012 + only = [] # noqa: RUF012 suffix = ".f" module = None _has_c_compiler = None diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index c823842b36b6..ad1ee8785328 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -497,7 +497,7 @@ class StringConverter: upgrade or not. Default is False. """ - _mapper = [(nx.bool, str2bool, False), + _mapper = [(nx.bool, str2bool, False), # noqa: RUF012 (nx.int_, int, -1),] # On 32-bit systems, we need to make sure that we explicitly include From 316f598b06fde24b69318e70f894ec88d6b03ce9 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Mon, 23 Feb 2026 19:13:16 +0530 Subject: [PATCH 1419/1718] ENH: use raw memory allocation APIs under free-threading (#30846) Co-authored-by: Sebastian Berg --- .../upcoming_changes/30846.compatibility.rst | 4 +++ .../upcoming_changes/30846.performance.rst | 18 ++++++++++ numpy/_core/src/multiarray/alloc.cpp | 36 ++++++++----------- 3 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 doc/release/upcoming_changes/30846.compatibility.rst create mode 100644 doc/release/upcoming_changes/30846.performance.rst diff --git a/doc/release/upcoming_changes/30846.compatibility.rst b/doc/release/upcoming_changes/30846.compatibility.rst new file mode 100644 index 000000000000..68a6685f6673 --- /dev/null +++ b/doc/release/upcoming_changes/30846.compatibility.rst @@ -0,0 +1,4 @@ +Default memory allocator change +------------------------------- +NumPy now uses ``PyMem_RawMalloc`` and ``PyMem_RawFree`` as the default memory allocator, +instead of system's ``malloc`` and ``free`` directly. diff --git a/doc/release/upcoming_changes/30846.performance.rst b/doc/release/upcoming_changes/30846.performance.rst new file mode 100644 index 000000000000..39d2d68cbda3 --- /dev/null +++ b/doc/release/upcoming_changes/30846.performance.rst @@ -0,0 +1,18 @@ +Improved scaling of ufuncs on free-threading +-------------------------------------------- + +NumPy's ufuncs now scale significantly better on free-threading builds +of CPython due to the following optimizations: + +* **Lock-free dispatch table:** The ufuncs dispatch table is now + implemented as a lock-free concurrent hash map, allowing multiple threads + to call ufuncs without contention. + +* **Immortal shared objects:** Certain shared objects, such as global memory + handlers, have been made immortal. This effectively reduces reference + counting contention across threads. + +* **Optimized memory allocation:** NumPy now utilizes ``PyMem_RawMalloc`` and + ``PyMem_RawFree`` for memory allocation. On Python 3.15 and newer, this + leverages ``mimalloc`` and significantly reduces memory allocation overhead + in multi-threaded workloads. \ No newline at end of file diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.cpp index 958d9a309db7..ca2027403b96 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -75,12 +75,12 @@ typedef struct cache_destructor { ~cache_destructor() { for (npy_uint i = 0; i < NBUCKETS; ++i) { while (datacache[i].available > 0) { - free(datacache[i].ptrs[--datacache[i].available]); + PyMem_RawFree(datacache[i].ptrs[--datacache[i].available]); } } for (npy_uint i = 0; i < NBUCKETS_DIM; ++i) { while (dimcache[i].available > 0) { - PyArray_free(dimcache[i].ptrs[--dimcache[i].available]); + PyMem_RawFree(dimcache[i].ptrs[--dimcache[i].available]); } } } @@ -217,7 +217,6 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) { void * p; size_t sz = nmemb * size; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyDataMem_NEW); if (p) { @@ -225,9 +224,7 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) } return p; } - NPY_BEGIN_THREADS; p = PyDataMem_NEW_ZEROED(nmemb, size); - NPY_END_THREADS; if (p) { indicate_hugepages(p, sz); } @@ -255,7 +252,7 @@ npy_alloc_cache_dim(npy_uintp sz) sz = 2; } return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache, - &PyArray_malloc); + &PyMem_RawMalloc); } NPY_NO_EXPORT void @@ -296,10 +293,10 @@ PyDataMem_NEW(size_t size) void *result; assert(size != 0); - result = malloc(size); + result = PyMem_RawMalloc(size); int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -313,10 +310,10 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) { void *result; - result = calloc(nmemb, size); + result = PyMem_RawCalloc(nmemb, size); int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -329,7 +326,7 @@ NPY_NO_EXPORT void PyDataMem_FREE(void *ptr) { PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - free(ptr); + PyMem_RawFree(ptr); } /*NUMPY_API @@ -342,10 +339,10 @@ PyDataMem_RENEW(void *ptr, size_t size) assert(size != 0); PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); - result = realloc(ptr, size); + result = PyMem_RawRealloc(ptr, size); int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { - free(result); + PyMem_RawFree(result); return NULL; } return result; @@ -357,7 +354,7 @@ PyDataMem_RENEW(void *ptr, size_t size) static inline void * default_malloc(void *NPY_UNUSED(ctx), size_t size) { - return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &malloc); + return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &PyMem_RawMalloc); } // The default data mem allocator calloc routine does not make use of a ctx. @@ -368,20 +365,17 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) { void * p; size_t sz = nelem * elsize; - NPY_BEGIN_THREADS_DEF; if (sz < NBUCKETS) { - p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &malloc); + p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyMem_RawMalloc); if (p) { memset(p, 0, sz); } return p; } - NPY_BEGIN_THREADS; - p = calloc(nelem, elsize); + p = PyMem_RawCalloc(nelem, elsize); if (p) { indicate_hugepages(p, sz); } - NPY_END_THREADS; return p; } @@ -391,7 +385,7 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) static inline void * default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) { - return realloc(ptr, new_size); + return PyMem_RawRealloc(ptr, new_size); } // The default data mem allocator free routine does not make use of a ctx. @@ -400,7 +394,7 @@ default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) static inline void default_free(void *NPY_UNUSED(ctx), void *ptr, size_t size) { - _npy_free_cache(ptr, size, NBUCKETS, datacache, &free); + _npy_free_cache(ptr, size, NBUCKETS, datacache, &PyMem_RawFree); } /* Memory handler global default */ From 8e4f739e23b13d6c3bc839b78d9691d1b8101c44 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 19:49:29 +0000 Subject: [PATCH 1420/1718] MAINT: Bump hypothesis from 6.151.6 to 6.151.8 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.6 to 6.151.8. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.6...hypothesis-python-6.151.8) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 3c5e86e39a84..97d585a8ba1b 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.6 +hypothesis==6.151.8 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 2e21b90385ea..64d8b6b54fba 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.6 +hypothesis==6.151.8 pytest==9.0.2 pytest-cov==7.0.0 meson From 2c5b1ebb9c30652099e5510edd604ff481135796 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 24 Feb 2026 11:05:47 +0100 Subject: [PATCH 1421/1718] MAINT: remove download-wheels.py and update release-requirements.txt We're releasing from `numpy/numpy-release` with trusted publishing, hence no more manual shuffling of wheels. The main motivation of this change is that the script pulled in extra dependencies like `urllib3`, which are triggering some (non-applicable) security scanner alerts. [ci skip] --- doc/RELEASE_WALKTHROUGH.rst | 2 - requirements/release_requirements.txt | 7 -- tools/download-wheels.py | 132 -------------------------- 3 files changed, 141 deletions(-) delete mode 100644 tools/download-wheels.py diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c8e3da3095b5..2abc89fcd5aa 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -17,8 +17,6 @@ documentation. There are a few ways to streamline things: - Git can be set up to use a keyring to store your GitHub personal access token. Search online for the details. -- You can use the ``keyring`` app to store the PyPI password for twine. See the - online twine documentation for details. Prior to release diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index eaa092560d2d..55079d795ed9 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -1,16 +1,9 @@ # These packages are needed for a release in addition to those needed # for building, testing, and the creation of documentation. -# download-wheels.py -urllib3 -beautifulsoup4 - # changelog.py pygithub gitpython>=3.1.30 -# uploading wheels -twine - # uploading release documentation packaging diff --git a/tools/download-wheels.py b/tools/download-wheels.py deleted file mode 100644 index 38a8360f0437..000000000000 --- a/tools/download-wheels.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -""" -Script to download NumPy wheels from the Anaconda staging area. - -Usage:: - - $ ./tools/download-wheels.py -w - -The default wheelhouse is ``release/installers``. - -Dependencies ------------- - -- beautifulsoup4 -- urllib3 - -Examples --------- - -While in the repository root:: - - $ python tools/download-wheels.py 1.19.0 - $ python tools/download-wheels.py 1.19.0 -w ~/wheelhouse - -""" -import argparse -import os -import re -import shutil - -import urllib3 -from bs4 import BeautifulSoup - -__version__ = "0.2" - -# Edit these for other projects. - -# The first URL is used to get the file names as it avoids the need for paging -# when the number of files exceeds the page length. Note that files/page is not -# stable and can change when the page layout changes. The second URL is used to -# retrieve the files themselves. This workaround is copied from SciPy. -NAMES_URL = "https://pypi.anaconda.org/multibuild-wheels-staging/simple/numpy/" -FILES_URL = "https://anaconda.org/multibuild-wheels-staging/numpy" - -# Name prefix of the files to download. -PREFIX = "numpy" - -# Name endings of the files to download. -WHL = r"-.*\.whl$" -ZIP = r"\.zip$" -GZIP = r"\.tar\.gz$" -SUFFIX = rf"({WHL}|{GZIP}|{ZIP})" - - -def get_wheel_names(version): - """ Get wheel names from Anaconda HTML directory. - - This looks in the Anaconda multibuild-wheels-staging page and - parses the HTML to get all the wheel names for a release version. - - Parameters - ---------- - version : str - The release version. For instance, "1.18.3". - - """ - http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") - tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{NAMES_URL}" - index_html = http.request('GET', index_url) - soup = BeautifulSoup(index_html.data, 'html.parser') - return sorted(soup.find_all(string=tmpl)) - - -def download_wheels(version, wheelhouse, test=False): - """Download release wheels. - - The release wheels for the given NumPy version are downloaded - into the given directory. - - Parameters - ---------- - version : str - The release version. For instance, "1.18.3". - wheelhouse : str - Directory in which to download the wheels. - - """ - http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") - wheel_names = get_wheel_names(version) - - for i, wheel_name in enumerate(wheel_names): - wheel_url = f"{FILES_URL}/{version}/download/{wheel_name}" - wheel_path = os.path.join(wheelhouse, wheel_name) - with open(wheel_path, "wb") as f: - with http.request("GET", wheel_url, preload_content=False,) as r: - info = r.info() - length = int(info.get('Content-Length', '0')) - if length == 0: - length = 'unknown size' - else: - length = f"{(length / 1024 / 1024):.2f}MB" - print(f"{i + 1:<4}{wheel_name} {length}") - if not test: - shutil.copyfileobj(r, f) - print(f"\nTotal files downloaded: {len(wheel_names)}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "version", - help="NumPy version to download.") - parser.add_argument( - "-w", "--wheelhouse", - default=os.path.join(os.getcwd(), "release", "installers"), - help="Directory in which to store downloaded wheels\n" - "[defaults to /release/installers]") - parser.add_argument( - "-t", "--test", - action='store_true', - help="only list available wheels, do not download") - - args = parser.parse_args() - - wheelhouse = os.path.expanduser(args.wheelhouse) - if not os.path.isdir(wheelhouse): - raise RuntimeError( - f"{wheelhouse} wheelhouse directory is not present." - " Perhaps you need to use the '-w' flag to specify one.") - - download_wheels(args.version, wheelhouse, test=args.test) From 7d6d467266012ab31f911d331418c5342e705615 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 24 Feb 2026 11:33:57 +0100 Subject: [PATCH 1422/1718] DEV: separate ``typing_requirements.txt`` --- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/stubtest.yml | 6 +++--- .github/workflows/typecheck.yml | 6 +++--- requirements/all_requirements.txt | 1 + requirements/test_requirements.txt | 3 --- requirements/typing_requirements.txt | 4 ++++ 6 files changed, 12 insertions(+), 10 deletions(-) create mode 100644 requirements/typing_requirements.txt diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index b95cab9d71d4..852f36a966ea 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -41,7 +41,7 @@ jobs: shell: bash run: | cd numpy_to_test - MYPY_VERSION=$(grep mypy== requirements/test_requirements.txt | sed -n 's/mypy==\([^;]*\).*/\1/p') + MYPY_VERSION=$(grep mypy== requirements/typing_requirements.txt | sed -n 's/mypy==\([^;]*\).*/\1/p') echo "new commit" git checkout $GITHUB_SHA diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index f3673b11b7bd..a25e6c54cfb8 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -14,7 +14,7 @@ on: - ".github/workflows/stubtest.yml" - "numpy/**" - "!numpy/**/tests/**" - - "requirements/test_requirements.txt" + - "requirements/typing_requirements.txt" - "tools/stubtest/**" workflow_dispatch: @@ -49,13 +49,13 @@ jobs: activate-environment: true cache-dependency-glob: | requirements/build_requirements.txt - requirements/test_requirements.txt + requirements/typing_requirements.txt - name: uv pip install run: >- uv pip install -r requirements/build_requirements.txt - -r requirements/test_requirements.txt + -r requirements/typing_requirements.txt - name: spin build run: spin build -j2 -- -Dallow-noblas=true -Ddisable-optimization=true --vsenv diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index 661b9a8c1693..321530993b82 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -67,14 +67,14 @@ jobs: activate-environment: true cache-dependency-glob: | requirements/build_requirements.txt - requirements/test_requirements.txt + requirements/typing_requirements.txt - name: Install dependencies # orjson makes mypy faster but the default requirements.txt # can't install it because orjson doesn't support 32 bit Linux run: >- uv pip install -r requirements/build_requirements.txt - -r requirements/test_requirements.txt + -r requirements/typing_requirements.txt orjson basedpyright - name: Build @@ -100,6 +100,6 @@ jobs: - name: Install dependencies run: >- uv pip install - -r requirements/test_requirements.txt + -r requirements/typing_requirements.txt - name: Run pyrefly run: pyrefly check diff --git a/requirements/all_requirements.txt b/requirements/all_requirements.txt index 2e457cb0bdbe..ad15045c15e2 100644 --- a/requirements/all_requirements.txt +++ b/requirements/all_requirements.txt @@ -3,4 +3,5 @@ -r linter_requirements.txt -r release_requirements.txt -r test_requirements.txt +-r typing_requirements.txt -r ci_requirements.txt diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 64d8b6b54fba..cbc157eb6671 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -6,9 +6,6 @@ meson ninja; sys_platform != "emscripten" pytest-xdist pytest-timeout -# For testing types -mypy==1.19.1 -pyrefly==0.49.0 # for optional f2py encoding detection charset-normalizer tzdata diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt new file mode 100644 index 000000000000..328638bd29b7 --- /dev/null +++ b/requirements/typing_requirements.txt @@ -0,0 +1,4 @@ +-r test_requirements.txt + +mypy==1.19.1 +pyrefly==0.49.0 From 2d87c60b0b7d3ab695eab3713d6b4a02feb8150f Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 24 Feb 2026 16:01:45 +0100 Subject: [PATCH 1423/1718] TYP: add comment to ``typing_requirements.txt`` --- requirements/typing_requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 328638bd29b7..57a0e41e1d4a 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -1,3 +1,5 @@ +# static typing requirements that are not needed for runtime tests + -r test_requirements.txt mypy==1.19.1 From b03feba4becad2ebe37bb5918106c9f65317212a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Feb 2026 17:58:12 +0000 Subject: [PATCH 1424/1718] MAINT: Bump hypothesis from 6.151.8 to 6.151.9 in /requirements Bumps [hypothesis](https://github.com/HypothesisWorks/hypothesis) from 6.151.8 to 6.151.9. - [Release notes](https://github.com/HypothesisWorks/hypothesis/releases) - [Commits](https://github.com/HypothesisWorks/hypothesis/compare/hypothesis-python-6.151.8...hypothesis-python-6.151.9) --- updated-dependencies: - dependency-name: hypothesis dependency-version: 6.151.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/emscripten_test_requirements.txt | 2 +- requirements/test_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 97d585a8ba1b..71e736ceed90 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.8 +hypothesis==6.151.9 pytest==9.0.2 tzdata pytest-xdist diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index cbc157eb6671..f704fab81ede 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,5 +1,5 @@ Cython -hypothesis==6.151.8 +hypothesis==6.151.9 pytest==9.0.2 pytest-cov==7.0.0 meson From 7473213c31da68b431696c23b2efab60ee975b06 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 25 Feb 2026 14:35:27 +0100 Subject: [PATCH 1425/1718] ENH: add a variant of as_strided with bound checks. (#30771) Co-authored-by: Sebastian Berg --- doc/source/glossary.rst | 3 +- numpy/lib/_stride_tricks_impl.py | 79 +++++++-- numpy/lib/_stride_tricks_impl.pyi | 4 + numpy/lib/tests/test_stride_tricks.py | 228 ++++++++++++++++++++++++++ 4 files changed, 303 insertions(+), 11 deletions(-) diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index ae2ab6ea4247..838963eb9ccc 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -472,7 +472,8 @@ Glossary Strides are computed automatically from an array's dtype and shape, but can be directly specified using - :doc:`as_strided. ` + :doc:`as_strided `. + Bounds validation can be enabled with the ``check_bounds`` parameter. For details, see :doc:`numpy.ndarray.strides `. diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 98a79b325f66..9e8324cec259 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -7,8 +7,9 @@ import numpy as np from numpy._core.numeric import normalize_axis_tuple from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._array_utils_impl import byte_bounds -__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] class DummyArray: @@ -35,7 +36,9 @@ def _maybe_view_as_subclass(original_array, new_array): @set_module("numpy.lib.stride_tricks") -def as_strided(x, shape=None, strides=None, subok=False, writeable=True): +def as_strided( + x, shape=None, strides=None, subok=False, writeable=True, *, check_bounds=None +): """ Create a view into the array with the given shape and strides. @@ -55,11 +58,20 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): If set to False, the returned array will always be readonly. Otherwise it will be writable if the original array was. It is advisable to set this to False if possible (see Notes). + check_bounds : bool or None + Check new stride and shape for potential out of bound memory + access. Returns ------- view : ndarray + Raises + ------ + ValueError + If `check_bounds` is True the given shape and strides could result in + out-of-bounds memory access. + See also -------- broadcast_to : broadcast an array to a given shape. @@ -69,7 +81,7 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): Notes ----- - ``as_strided`` creates a view into the array given the exact strides + `as_strided` creates a view into the array given the exact strides and shape. This means it manipulates the internal data structure of ndarray and, if done incorrectly, the array elements can point to invalid memory and can corrupt results or crash your program. @@ -87,27 +99,73 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): care, you may want to use ``writeable=False`` to avoid accidental write operations. - For these reasons it is advisable to avoid ``as_strided`` when + For these reasons it is advisable to avoid `as_strided` when possible. + + Examples + -------- + + >>> import numpy as np + ... from numpy.lib.stride_tricks import as_strided + ... x = np.arange(10) + ... y = as_strided(x, shape=(5,), strides=(8,), check_bounds=True) + ... y + array([0, 1, 2, 3, 4]) + + Attempting to create an out-of-bounds view and use ``check_bounds=True`` + as_strided will raises an error: + + >>> as_strided(x, shape=(20,), strides=(8,), check_bounds=True) + Traceback (most recent call last): + ... + ValueError: Given shape and strides would access memory out of bounds... + + When working with views, bounds are checked against the base array: + + >>> a = np.arange(1000) + ... b = a[:2] + ... c = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + ... c[0], c[1] + (0, 50) """ + # first convert input to array, possibly keeping subclass - x = np.array(x, copy=None, subok=subok) - interface = dict(x.__array_interface__) + base = np.array(x, copy=None, subok=subok) + interface = dict(base.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) - array = np.asarray(DummyArray(interface, base=x)) + array = np.asarray(DummyArray(interface, base=base)) # The route via `__interface__` does not preserve structured # dtypes. Since dtype should remain unchanged, we set it explicitly. - array.dtype = x.dtype + array.dtype = base.dtype - view = _maybe_view_as_subclass(x, array) + view = _maybe_view_as_subclass(base, array) if view.flags.writeable and not writeable: view.flags.writeable = False + if check_bounds: + while isinstance(base.base, np.ndarray): + base = base.base + + base_low, base_high = byte_bounds(base) + view_low, view_high = byte_bounds(view) + + if view_low < base_low: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View starts {base_low - view_low} bytes before lowest address" + ) + + if view_high > base_high: + raise ValueError( + f"Given shape and strides would access memory out of bounds. " + f"View ends {view_high - base_high} bytes after highest address" + ) + return view @@ -168,7 +226,8 @@ def sliding_window_view(x, window_shape, axis=None, *, See Also -------- lib.stride_tricks.as_strided: A lower-level and less safe routine for - creating arbitrary views from custom shape and strides. + creating arbitrary views from custom shape and strides. Use the + ``check_bounds`` parameter for bounds validation. broadcast_to: broadcast an array to a given shape. Notes diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 77b9d60b9d7f..faba9ab80cd4 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -22,6 +22,8 @@ def as_strided[ScalarT: np.generic]( strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, + *, + check_bounds: bool | None = None ) -> NDArray[ScalarT]: ... @overload def as_strided( @@ -30,6 +32,8 @@ def as_strided( strides: Iterable[int] | None = None, subok: bool = False, writeable: bool = True, + *, + check_bounds: bool | None = None ) -> NDArray[Any]: ... @overload diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index fb654b4cfb85..c9a475b392c3 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -653,3 +653,231 @@ def test_reference_types(): actual, _ = broadcast_arrays(input_array, np.ones(3)) assert_array_equal(expected, actual) + + +@pytest.mark.parametrize( + "dtype", + [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float32, + np.float64, + np.complex64, + np.complex128, + ], +) +def test_as_strided_checked_different_dtypes(dtype): + """Test as_strided with check_bounds=True with different dtypes.""" + x = np.arange(10, dtype=dtype) + y = as_strided(x, shape=(5,), strides=(x.itemsize * 2,), check_bounds=True) + assert y.shape == (5,) + assert y.dtype == dtype + + +@pytest.mark.parametrize( + "size,view_size,stride_mult", + [ + (10, 5, 1), # Contiguous view + (10, 5, 2), # Every other element + (20, 10, 2), # Every other element + (100, 10, 10), # Every 10th element + ], +) +def test_as_strided_checked_1d_positive_strides(size, view_size, stride_mult): + """Test 1D arrays with positive strides.""" + x = np.arange(size, dtype=np.int64) + itemsize = x.itemsize + y = as_strided( + x, shape=(view_size,), strides=(itemsize * stride_mult,), check_bounds=True + ) + assert y.shape == (view_size,) + # Verify data correctness + expected = x[::stride_mult][:view_size] + assert_array_equal(y, expected) + + +@pytest.mark.parametrize( + "shape,window_shape", + [ + ((10,), (3,)), + ((20,), (5,)), + ((100,), (10,)), + ], +) +def test_as_strided_checked_sliding_window_1d(shape, window_shape): + """Test sliding window views in 1D.""" + x = np.arange(shape[0], dtype=np.int64) + itemsize = x.itemsize + n_windows = shape[0] - window_shape[0] + 1 + view_shape = (n_windows, window_shape[0]) + view_strides = (itemsize, itemsize) + + y = as_strided(x, shape=view_shape, strides=view_strides, check_bounds=True) + assert y.shape == view_shape + # Check first and last windows + assert_array_equal(y[0], x[: window_shape[0]]) + assert_array_equal(y[-1], x[-window_shape[0] :]) + + +@pytest.mark.parametrize( + "shape", + [ + (3, 4), + (5, 6), + (10, 10), + ], +) +def test_as_strided_checked_2d_default_strides(shape): + """Test 2D arrays with default strides.""" + x = np.arange(np.prod(shape), dtype=np.int64).reshape(shape) + y = as_strided(x, check_bounds=True) # Should use default shape and strides + assert_array_equal(y, x) + + +@pytest.mark.parametrize("size", [0, 1, 2, 10, 100]) +def test_as_strided_checked_zero_stride_broadcasting(size): + """Test zero strides (broadcasting a single value).""" + x = np.array([42], dtype=np.int64) + y = as_strided(x, shape=(size,), strides=(0,), check_bounds=True) + assert y.shape == (size,) + if size > 0: + assert_(np.all(y == 42)) + + +@pytest.mark.parametrize( + "size,shape,strides", + [ + # Strides too large + (10, (5,), (32,)), + (10, (10,), (16,)), + (20, (15,), (16,)), + # Shape too large for strides + (10, (20,), (8,)), + (10, (100,), (8,)), + # 2D out of bounds cases + (20, (5, 5), (80, 8)), + (20, (3, 10), (64, 8)), + # Negative strides that go before array start + (10, (5,), (-8,)), + (10, (10,), (-8,)), + (20, (5,), (-16,)), + # ND negative strides + (10, (2, 3, 4), (96, 32, -8)), + (20, (3, 4), (64, -8)), + (30, (2, 3, 4), (-96, 32, 8)), + ], +) +def test_as_strided_checked_out_of_bounds_positive_strides(size, shape, strides): + """Test that out-of-bounds positive strides raise ValueError.""" + x = np.arange(size, dtype=np.int64) + with pytest.raises(ValueError, match="out of bounds"): + as_strided(x, shape=shape, strides=strides, check_bounds=True) + + +def test_as_strided_checked_view_of_larger_array(): + """Test as_strided + + - with check_bounds=True + - considers the base array bounds, not just the view. + + """ + a = np.arange(1000, dtype=np.int64) + + b = a[:2] + + # This should succeed because the underlying array has enough memory + y = as_strided(b, shape=(2,), strides=(400,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 0) + assert_equal(y[1], 50) + + +def test_as_strided_checked_view_with_offset(): + """Test as_strided + + - with check_bounds=True + - on a view that doesn't start at the beginning. + """ + a = np.arange(1000, dtype=np.int64) + + b = a[100:102] + + y = as_strided(b, shape=(2,), strides=(80,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 100) + assert_equal(y[1], 110) + + +def test_as_strided_checked_view_out_of_bounds_negative(): + """Test that negative strides on a view correctly detect out of bounds.""" + a = np.arange(1000, dtype=np.int64) + + b = a[5:7] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(-48,), check_bounds=True) + + +def test_as_strided_checked_view_out_of_bounds_positive(): + """Test that positive strides on a view correctly detect out of bounds.""" + a = np.arange(100, dtype=np.int64) + + b = a[95:97] + + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(200,), check_bounds=True) + + +def test_as_strided_checked_nested_views(): + """Test as_strided with check_bounds=True on a view of a view.""" + a = np.arange(1000, dtype=np.int64) + b = a[10:100] + c = b[5:10] + + y = as_strided(c, shape=(2,), strides=(160,), check_bounds=True) + assert_equal(y.shape, (2,)) + assert_equal(y[0], 15) + assert_equal(y[1], 35) + + +def test_as_strided_checked_sliced_array(): + """Test various slicing scenarios.""" + a = np.arange(200, dtype=np.int64) + + b = a[10:20] + y = as_strided(b, shape=(5,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (5,)) + + c = a[::2] + y = as_strided(c, shape=(10,), strides=(16,), check_bounds=True) + assert_equal(y.shape, (10,)) + + +@pytest.mark.parametrize( + "start,stop,stride_bytes,should_pass", + [ + (0, 10, 552, True), + (0, 10, 552 + 1, True), + (90, 95, 72, True), + (90, 95, 72 + 1, False), + (5, 7, -40, True), + (5, 7, -40 - 1, False), + ], +) +def test_as_strided_checked_view_parametrized(start, stop, stride_bytes, should_pass): + """Parametrized test for various view and stride combinations.""" + a = np.arange(100, dtype=np.int64) + b = a[start:stop] + + if should_pass: + y = as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) + assert_equal(y.shape, (2,)) + else: + with pytest.raises(ValueError, match="out of bounds"): + as_strided(b, shape=(2,), strides=(stride_bytes,), check_bounds=True) From 41f3673b434158cffe85f727ff435988d35ab6f6 Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Wed, 25 Feb 2026 20:29:42 +0530 Subject: [PATCH 1426/1718] BUG: fix infinite recursion in np.ma.flatten_structured_array (#30855) --- numpy/ma/core.py | 2 +- numpy/ma/tests/test_core.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 32c2f07c162b..6e3b3a54c7c8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -2590,7 +2590,7 @@ def flatten_sequence(iterable): """ for elm in iter(iterable): - if hasattr(elm, '__iter__'): + if hasattr(elm, "__iter__") and not isinstance(elm, (str, bytes)): yield from flatten_sequence(elm) else: yield elm diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 067fce2f0777..2dc7fe5a9b17 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -998,6 +998,13 @@ def test_flatten_structured_array(self): control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) + # for strings + ndtype = [('a', 'U5'), ('b', [('c', 'U5')])] + arr = np.array([('NumPy', ('array',)), ('array', ('numpy',))], dtype=ndtype) + test = flatten_structured_array(arr) + control = np.array([['NumPy', 'array'], ['array', 'numpy']], dtype='U5') + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) def test_void0d(self): # Test creating a mvoid object From 47c2188142407ecd95b3cbd3260b086c7dcfc9db Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 25 Feb 2026 19:08:11 +0100 Subject: [PATCH 1427/1718] MAINT: bump ``pyrefly`` to ``0.53.0`` --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 57a0e41e1d4a..5b5d3c5b4cab 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.19.1 -pyrefly==0.49.0 +pyrefly==0.53.0 From d834baccc6f8816c4c997b080380c1fe39170e8c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 25 Feb 2026 19:08:28 +0100 Subject: [PATCH 1428/1718] TYP: ignore new inevitable pyrefly errors --- numpy/_core/memmap.pyi | 2 +- numpy/polynomial/chebyshev.pyi | 2 +- numpy/polynomial/hermite.pyi | 2 +- numpy/polynomial/hermite_e.pyi | 2 +- numpy/polynomial/laguerre.pyi | 2 +- numpy/polynomial/legendre.pyi | 2 +- numpy/polynomial/polynomial.pyi | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 766c4581aa2a..bdb3dc721de2 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -31,7 +31,7 @@ class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protoco class memmap(np.ndarray[_ShapeT_co, _DTypeT_co]): __module__: Literal["numpy"] = "numpy" # pyrefly: ignore[bad-override] - __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] + __array_priority__: ClassVar[float] = 100.0 # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] filename: Final[str | None] offset: Final[int] diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 157b0e5d0f46..857ce8f6f377 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -136,7 +136,7 @@ def chebinterpolate[CoefScalarT: np.number | np.bool | np.object_]( ) -> npt.NDArray[CoefScalarT]: ... class Chebyshev(ABCPolyBase[L["T"]]): - basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 60f4af5a1fd7..17375c9210c4 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -102,6 +102,6 @@ hermgauss: Final[_FuncGauss] = ... hermweight: Final[_FuncWeight] = ... class Hermite(ABCPolyBase[L["H"]]): - basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index 6997c8a381ef..f1ebf9066a4f 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -102,6 +102,6 @@ hermegauss: Final[_FuncGauss] = ... hermeweight: Final[_FuncWeight] = ... class HermiteE(ABCPolyBase[L["He"]]): - basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 8b70b899ed59..48fecfd07efe 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -95,6 +95,6 @@ laggauss: Final[_FuncGauss] = ... lagweight: Final[_FuncWeight] = ... class Laguerre(ABCPolyBase[L["L"]]): - basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 53f8f7c210fa..75fa47b44d3e 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -95,6 +95,6 @@ leggauss: Final[_FuncGauss] = ... legweight: Final[_FuncWeight] = ... class Legendre(ABCPolyBase[L["P"]]): - basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 86f288468a15..c394d56affed 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -104,6 +104,6 @@ polycompanion: Final[_FuncCompanion] = ... polyroots: Final[_FuncRoots] = ... class Polynomial(ABCPolyBase[None]): - basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] # pyrefly: ignore[bad-override] domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] From 805ef21ac208524dc77ba7abf010e09be3dfaae7 Mon Sep 17 00:00:00 2001 From: Ganesh Kathiresan Date: Thu, 26 Feb 2026 16:32:35 +0530 Subject: [PATCH 1429/1718] CI: Use `spin` for debug builds (#30868) --- .github/workflows/linux.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index ef76656eb69c..7522638e01ad 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -94,18 +94,18 @@ jobs: with: python-version: '3.14' debug: true - - name: Build and install NumPy + - name: Install dependencies run: | python --version pip install -U pip - pip install . -v -Csetup-args=-Dbuildtype=debug -Csetup-args=-Dallow-noblas=true - - name: Install test dependencies - run: | + pip install -r requirements/build_requirements.txt pip install -r requirements/test_requirements.txt + - name: Build NumPy debug + run: | + spin build -- -Dbuildtype=debug -Dallow-noblas=true - name: Run test suite run: | - cd tools - pytest --timeout=600 --durations=10 --pyargs numpy -m "not slow" + spin test -- --timeout=600 --durations=10 all_versions: # like the smoke tests but runs on more Python versions From 089ceb74a8d7c7e96c34dc01d930693d90e6b0f5 Mon Sep 17 00:00:00 2001 From: stratakis Date: Thu, 26 Feb 2026 15:17:50 +0100 Subject: [PATCH 1430/1718] BUG: Fix buffer overrun in CPU baseline validation (#30877) Co-authored-by: Sebastian Berg --- numpy/_core/src/common/npy_cpu_features.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 91dafa96de0a..faffb7fc0781 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -235,14 +235,13 @@ npy__cpu_validate_baseline(void) #define NPY__CPU_VALIDATE_CB(FEATURE, DUMMY) \ if (!npy__cpu_have[NPY_CAT(NPY_CPU_FEATURE_, FEATURE)]) { \ - const int size = sizeof(NPY_TOSTRING(FEATURE)); \ + const int size = sizeof(NPY_TOSTRING(FEATURE)) - 1; \ memcpy(fptr, NPY_TOSTRING(FEATURE), size); \ fptr[size] = ' '; fptr += size + 1; \ } NPY_WITH_CPU_BASELINE_CALL(NPY__CPU_VALIDATE_CB, DUMMY) // extra arg for msvc - *fptr = '\0'; - if (baseline_failure[0] != '\0') { + if (fptr > baseline_failure) { *(fptr-1) = '\0'; // trim the last space PyErr_Format(PyExc_RuntimeError, "NumPy was built with baseline optimizations: \n" @@ -448,7 +447,7 @@ npy__cpu_cpuid_count(int reg[4], int func_id, int count) static void npy__cpu_cpuid(int reg[4], int func_id) { - return npy__cpu_cpuid_count(reg, func_id, 0); + npy__cpu_cpuid_count(reg, func_id, 0); } static void From 980c1088b63765e915065c34b9584a2483bbb2a5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 26 Feb 2026 14:43:41 -0700 Subject: [PATCH 1431/1718] CI: unpin CPython in pixi CI (#30889) Co-authored-by: Lucas Colley --- numpy/_core/src/common/pythoncapi-compat | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 8 +------- numpy/_core/src/umath/extobj.c | 8 -------- pixi-packages/asan/pixi.toml | 1 - 4 files changed, 2 insertions(+), 17 deletions(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 8636bccf29ad..90c06a4cae55 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 8636bccf29adfa23463f810b3c2830f7cff1e933 +Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8afad4ed93cd..2f381a1c7aa6 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5170,13 +5170,7 @@ _multiarray_umath_exec(PyObject *m) { if (PyDataMem_DefaultHandler == NULL) { return -1; } -#ifdef Py_GIL_DISABLED - if (PyUnstable_SetImmortal(PyDataMem_DefaultHandler) == 0) { - PyErr_SetString(PyExc_RuntimeError, - "Could not mark memory handler capsule as immortal"); - return -1; - } -#endif + /* * Initialize the context-local current handler * with the default PyDataMem_Handler capsule. diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 77a76873d20f..91b0b4c62d30 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -15,7 +15,6 @@ #include "numpy/ufuncobject.h" #include "common.h" -#include "npy_pycompat.h" #define UFUNC_ERR_IGNORE 0 @@ -146,13 +145,6 @@ init_extobj(void) if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } -#ifdef Py_GIL_DISABLED - if (PyUnstable_SetImmortal(npy_static_pydata.default_extobj_capsule) == 0) { - PyErr_SetString(PyExc_RuntimeError, "Could not mark extobj capsule as immortal"); - Py_CLEAR(npy_static_pydata.default_extobj_capsule); - return -1; - } -#endif npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); if (npy_static_pydata.npy_extobj_contextvar == NULL) { diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml index 19a6a6553806..b15b7691cd9e 100644 --- a/pixi-packages/asan/pixi.toml +++ b/pixi-packages/asan/pixi.toml @@ -19,7 +19,6 @@ extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=deb [package.host-dependencies] python.git = "https://github.com/python/cpython" python.subdirectory = "Tools/pixi-packages/asan" -python.rev = "8bb5b6e8ce61da41b5affd5eb12d9cc46b5af448" meson-python = "*" cython = "*" From 3b22285f299e98f025e998e20fcb10ee25aec5b9 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade <1324225+hugovk@users.noreply.github.com> Date: Fri, 27 Feb 2026 17:51:36 +0200 Subject: [PATCH 1432/1718] Fix os.path.commonprefix deprecation --- tools/c_coverage/c_coverage_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index 917b977dc195..d188b3280e95 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -84,7 +84,7 @@ def get_file(self, path): if self.prefix is None: self.prefix = path else: - self.prefix = os.path.commonprefix([self.prefix, path]) + self.prefix = os.path.commonpath([self.prefix, path]) return self.files[path] def clean_path(self, path): From de9a2be1857defed4e8213089589647bf84f0fa8 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Fri, 27 Feb 2026 14:23:27 -0500 Subject: [PATCH 1433/1718] BUG: Fix busdaycalendar's handling of a bool array weekmask. (#30885) --- numpy/_core/src/multiarray/datetime_busdaycal.c | 6 +++--- numpy/_core/tests/test_datetime.py | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 3a7e3a383dca..4c6986544f6c 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -159,15 +159,15 @@ PyArray_WeekMaskConverter(PyObject *weekmask_in, npy_bool *weekmask) int i; for (i = 0; i < 7; ++i) { - long val; + int val; PyObject *f = PySequence_GetItem(obj, i); if (f == NULL) { Py_DECREF(obj); return 0; } - val = PyLong_AsLong(f); - if (error_converting(val)) { + val = PyObject_IsTrue(f); + if (val == -1) { Py_DECREF(f); Py_DECREF(obj); return 0; diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 7515aacc6d1b..da964923d2c6 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2220,6 +2220,11 @@ def test_datetime_busdaycalendar(self): bdd = np.busdaycalendar(weekmask="0011001") assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) + # Check length 7 bool array. + mask = np.array([False, True, True, True, True, False, False]) + bdd = np.busdaycalendar(weekmask=mask) + assert_equal(bdd.weekmask, mask, strict=True) + # Check length 7 string weekmask. bdd = np.busdaycalendar(weekmask="Mon Tue") assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) From fb3b1788ac87e8b6c4322281604f28933bd54d96 Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Fri, 27 Feb 2026 20:30:47 +0100 Subject: [PATCH 1434/1718] CI: add Linux x86_64 clang ASAN+LSAN job (#30800) --- .github/workflows/compiler_sanitizers.yml | 84 +++++++++++++++++++++++ numpy/_core/tests/test_dtype.py | 4 ++ numpy/_core/tests/test_regression.py | 2 + numpy/f2py/tests/test_f2py2e.py | 4 +- tools/ci/lsan_suppressions.txt | 33 +++++++++ 5 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 tools/ci/lsan_suppressions.txt diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 146fb5d15b34..aeb1600b13a5 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -108,3 +108,87 @@ jobs: python -m spin test \ `find numpy -name "test*.py" | xargs grep -E -l "import threading|ThreadPoolExecutor" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 + + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + + - name: Check pyenv is working + run: pyenv --version + + - name: Install python & numpy build dependencies + run: | + sudo apt-get update + sudo apt-get -yq install \ + libb2-dev \ + libbz2-dev \ + libffi-dev \ + libgdbm-dev \ + libgdbm-compat-dev \ + liblzma-dev \ + libncurses5-dev \ + libopenblas-dev \ + libreadline6-dev \ + libsqlite3-dev \ + libssl-dev \ + libzstd-dev \ + lzma-dev \ + ninja-build \ + tk-dev \ + uuid-dev \ + zlib1g-dev + + # TODO install as a regular ubuntu package in the previous step once + # ubuntu 26.04 is available in GHA + - name: Install clang 21 + run: | + curl -fsSLo /tmp/llvm.sh https://apt.llvm.org/llvm.sh + chmod +x /tmp/llvm.sh + sudo /tmp/llvm.sh 21 + CC=/usr/bin/clang-21 + CXX=/usr/bin/clang++-21 + echo "CC=$CC" >> $GITHUB_ENV + echo "CXX=$CXX" >> $GITHUB_ENV + sudo update-alternatives --install /usr/bin/cc cc $CC 100 + sudo update-alternatives --install /usr/bin/c++ c++ $CXX 100 + + - name: Build Python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 + pyenv global 3.14 + + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + + - name: Build + run: python -m spin build -j4 -- -Db_sanitize=address,leak + + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + export ASAN_OPTIONS="detect_leaks=1:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" + export LSAN_OPTIONS="suppressions=$GITHUB_WORKSPACE/tools/ci/lsan_suppressions.txt" + python -m spin test -- -v -s --timeout=600 --durations=10 diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 0aa7a95d8835..7b5966d0a56b 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -2,6 +2,7 @@ import ctypes import inspect import operator +import os import pickle import sys import types @@ -1518,6 +1519,7 @@ class dt: with pytest.raises(ValueError): np.dtype(dt_instance) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_void_subtype(self): class dt(np.void): # This code path is fully untested before, so it is unclear @@ -1897,6 +1899,7 @@ class TestUserDType: @pytest.mark.thread_unsafe( reason="crashes when GIL disabled, dtype setup is thread-unsafe", ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_custom_structured_dtype(self): class mytype: pass @@ -1920,6 +1923,7 @@ class mytype: @pytest.mark.thread_unsafe( reason="crashes when GIL disabled, dtype setup is thread-unsafe", ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_custom_structured_dtype_errors(self): class mytype: pass diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 24ce9330005d..27a97faad19f 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1,5 +1,6 @@ import copy import gc +import os import pickle import sys import tempfile @@ -1908,6 +1909,7 @@ def test_pickle_bytes_overwrite(self): @pytest.mark.filterwarnings( "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", ) + @pytest.mark.xfail("LSAN_OPTIONS" in os.environ, reason="known leak", run=False) def test_pickle_py2_array_latin1_hack(self): # Check that unpickling hacks in Py3 that support # encoding='latin1' work correctly. diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index bd7064fd348a..90063d474a33 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -1,3 +1,4 @@ +import os import platform import re import shlex @@ -831,7 +832,8 @@ def test_freethreading_compatible(hello_world_f90, monkeypatch): rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout - assert rout.stderr == "" + if "LSAN_OPTIONS" not in os.environ: + assert rout.stderr == "" assert rout.returncode == 0 diff --git a/tools/ci/lsan_suppressions.txt b/tools/ci/lsan_suppressions.txt new file mode 100644 index 000000000000..74e2b335f575 --- /dev/null +++ b/tools/ci/lsan_suppressions.txt @@ -0,0 +1,33 @@ +# This file contains suppressions for the LSAN tool +# +# Reference: https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer#suppressions + +# 2 leaks when importing "numpy.exceptions" in initialize_static_globals +# (check the duplicate frame number for the second leak) +#0 0xffffb64476d0 in malloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:69 +#1 0xffffb598e8d0 in PyFloat_FromDouble Objects/floatobject.c:128 +#2 0xffffb5dac0fc in fill_time Modules/posixmodule.c:2622 +#3 0xffffb5dc137c in _pystat_fromstructstat Modules/posixmodule.c:2740 +#3 0xffffb5dc13b4 in _pystat_fromstructstat Modules/posixmodule.c:2743 +#4 0xffffb5dc2d2c in posix_do_stat Modules/posixmodule.c:2868 +#5 0xffffb5dc331c in os_stat_impl Modules/posixmodule.c:3235 +#6 0xffffb5dc331c in os_stat Modules/clinic/posixmodule.c.h:105 +#7 0xffffb58484d8 in _PyEval_EvalFrameDefault Python/generated_cases.c.h:2383 +#8 0xffffb5c18174 in _PyEval_EvalFrame Include/internal/pycore_ceval.h:121 +#9 0xffffb5c18174 in _PyEval_Vector Python/ceval.c:2083 +#10 0xffffb593ccf4 in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169 +#11 0xffffb593ccf4 in object_vacall Objects/call.c:819 +#12 0xffffb593d168 in PyObject_CallMethodObjArgs Objects/call.c:880 +#13 0xffffb5cbc6f0 in import_find_and_load Python/import.c:3737 +#14 0xffffb5cbc6f0 in PyImport_ImportModuleLevelObject Python/import.c:3819 +#15 0xffffb5bffca0 in builtin___import___impl Python/bltinmodule.c:285 +#16 0xffffb5bffca0 in builtin___import__ Python/clinic/bltinmodule.c.h:110 +#17 0xffffb593db9c in _PyObject_VectorcallTstate Include/internal/pycore_call.h:169 +#18 0xffffb593db9c in _PyObject_CallFunctionVa Objects/call.c:552 +#19 0xffffb593df38 in PyObject_CallFunction Objects/call.c:574 +#20 0xffffb5cbdb5c in PyImport_Import Python/import.c:4011 +#21 0xffffb5cbe17c in PyImport_ImportModule Python/import.c:3434 +#22 0xffffb1c61b44 in npy_import ../numpy/_core/src/common/npy_import.h:71 +#23 0xffffb1c61b44 in initialize_static_globals ../numpy/_core/src/multiarray/npy_static_data.c:124 + +leak:initialize_static_globals From e87a3c5fef53f5e1256f5d6f50561592da82992a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 27 Feb 2026 13:16:32 -0700 Subject: [PATCH 1435/1718] CI: pin pixi asan build to 3.15.0a6 and re-introduce SetImmortal bindings (#30901) --- numpy/_core/src/common/pythoncapi-compat | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 8 +++++++- numpy/_core/src/umath/extobj.c | 8 ++++++++ pixi-packages/asan/pixi.toml | 2 ++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 90c06a4cae55..8636bccf29ad 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 90c06a4cae557bdbfa4f231a781d2b5c1a8f6d1c +Subproject commit 8636bccf29adfa23463f810b3c2830f7cff1e933 diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 2f381a1c7aa6..8afad4ed93cd 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5170,7 +5170,13 @@ _multiarray_umath_exec(PyObject *m) { if (PyDataMem_DefaultHandler == NULL) { return -1; } - +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(PyDataMem_DefaultHandler) == 0) { + PyErr_SetString(PyExc_RuntimeError, + "Could not mark memory handler capsule as immortal"); + return -1; + } +#endif /* * Initialize the context-local current handler * with the default PyDataMem_Handler capsule. diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 91b0b4c62d30..77a76873d20f 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -15,6 +15,7 @@ #include "numpy/ufuncobject.h" #include "common.h" +#include "npy_pycompat.h" #define UFUNC_ERR_IGNORE 0 @@ -145,6 +146,13 @@ init_extobj(void) if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } +#ifdef Py_GIL_DISABLED + if (PyUnstable_SetImmortal(npy_static_pydata.default_extobj_capsule) == 0) { + PyErr_SetString(PyExc_RuntimeError, "Could not mark extobj capsule as immortal"); + Py_CLEAR(npy_static_pydata.default_extobj_capsule); + return -1; + } +#endif npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); if (npy_static_pydata.npy_extobj_contextvar == NULL) { diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml index b15b7691cd9e..ce25939a0fcb 100644 --- a/pixi-packages/asan/pixi.toml +++ b/pixi-packages/asan/pixi.toml @@ -19,6 +19,8 @@ extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=deb [package.host-dependencies] python.git = "https://github.com/python/cpython" python.subdirectory = "Tools/pixi-packages/asan" +# v3.15.0a6 +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" meson-python = "*" cython = "*" From 2a1bfb5d5b7a49aaf3e33ee29be35965d524266e Mon Sep 17 00:00:00 2001 From: Chanda Decker Date: Fri, 27 Feb 2026 21:10:29 -0800 Subject: [PATCH 1436/1718] Removing reference to Python 2 from count_nonzero() docstring --- numpy/_core/numeric.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index f2b79002cfc1..cb0ccda32dbb 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -484,16 +484,11 @@ def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): def count_nonzero(a, axis=None, *, keepdims=False): """ Counts the number of non-zero values in the array ``a``. - - The word "non-zero" is in reference to the Python 2.x - built-in method ``__nonzero__()`` (renamed ``__bool__()`` - in Python 3.x) of Python objects that tests an object's - "truthfulness". For example, any number is considered - truthful if it is nonzero, whereas any string is considered - truthful if it is not the empty string. Thus, this function - (recursively) counts how many elements in ``a`` (and in - sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` - method evaluated to ``True``. + + A non-zero value is one that evaluates to truthful in a boolean + context, including any non-zero number and any string that + is not empty. This function recursively counts how many elements + in ``a`` (and its sub-arrays) are non-zero values. Parameters ---------- From 2f98eb395e05b7081f954efe2b0e73322bfcf5ae Mon Sep 17 00:00:00 2001 From: Chanda Decker Date: Fri, 27 Feb 2026 22:38:41 -0800 Subject: [PATCH 1437/1718] Fixed linting issues found by CI. --- numpy/_core/numeric.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index cb0ccda32dbb..6bd03ae75c5d 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -484,10 +484,10 @@ def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): def count_nonzero(a, axis=None, *, keepdims=False): """ Counts the number of non-zero values in the array ``a``. - + A non-zero value is one that evaluates to truthful in a boolean - context, including any non-zero number and any string that - is not empty. This function recursively counts how many elements + context, including any non-zero number and any string that + is not empty. This function recursively counts how many elements in ``a`` (and its sub-arrays) are non-zero values. Parameters From 945d5a628566422856a856d5e567832c2a53bbe8 Mon Sep 17 00:00:00 2001 From: mayeut Date: Sun, 1 Feb 2026 18:21:14 +0100 Subject: [PATCH 1438/1718] CI: use nascheme/cpython_sanity for clang ASAN nascheme/cpython_sanity now provides an ASAN GIL-enabled python image. This image can be used similarly as is done for the clang_TSAN job and thus speed-up the clang_ASAN job as building python is no longer necessary. --- .github/workflows/compiler_sanitizers.yml | 97 ++++++----------------- 1 file changed, 23 insertions(+), 74 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index aeb1600b13a5..013b1a3b7831 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -113,82 +113,31 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest + container: + image: ghcr.io/nascheme/cpython-asan:3.14 + options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - submodules: recursive - fetch-tags: true - persist-credentials: false - - - name: Set up pyenv - run: | - git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" - PYENV_ROOT="$HOME/.pyenv" - PYENV_BIN="$PYENV_ROOT/bin" - PYENV_SHIMS="$PYENV_ROOT/shims" - echo "$PYENV_BIN" >> $GITHUB_PATH - echo "$PYENV_SHIMS" >> $GITHUB_PATH - echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV - - - name: Check pyenv is working - run: pyenv --version - - - name: Install python & numpy build dependencies - run: | - sudo apt-get update - sudo apt-get -yq install \ - libb2-dev \ - libbz2-dev \ - libffi-dev \ - libgdbm-dev \ - libgdbm-compat-dev \ - liblzma-dev \ - libncurses5-dev \ - libopenblas-dev \ - libreadline6-dev \ - libsqlite3-dev \ - libssl-dev \ - libzstd-dev \ - lzma-dev \ - ninja-build \ - tk-dev \ - uuid-dev \ - zlib1g-dev - - # TODO install as a regular ubuntu package in the previous step once - # ubuntu 26.04 is available in GHA - - name: Install clang 21 - run: | - curl -fsSLo /tmp/llvm.sh https://apt.llvm.org/llvm.sh - chmod +x /tmp/llvm.sh - sudo /tmp/llvm.sh 21 - CC=/usr/bin/clang-21 - CXX=/usr/bin/clang++-21 - echo "CC=$CC" >> $GITHUB_ENV - echo "CXX=$CXX" >> $GITHUB_ENV - sudo update-alternatives --install /usr/bin/cc cc $CC 100 - sudo update-alternatives --install /usr/bin/c++ c++ $CXX 100 - - - name: Build Python with address sanitizer - run: | - CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.14 - pyenv global 3.14 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - name: Trust working directory and initialize submodules + run: | + git config --global --add safe.directory /__w/numpy/numpy + git submodule update --init --recursive - - name: Install dependencies - run: | - pip install -r requirements/build_requirements.txt - pip install -r requirements/ci_requirements.txt - pip install -r requirements/test_requirements.txt - # xdist captures stdout/stderr, but we want the ASAN output - pip uninstall -y pytest-xdist + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist - - name: Build - run: python -m spin build -j4 -- -Db_sanitize=address,leak + - name: Build NumPy with AddressSanitizer & LeakSanitizer + run: python -m spin build -j4 -- -Db_sanitize=address,leak - - name: Test - run: | - # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them - export ASAN_OPTIONS="detect_leaks=1:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" - export LSAN_OPTIONS="suppressions=$GITHUB_WORKSPACE/tools/ci/lsan_suppressions.txt" - python -m spin test -- -v -s --timeout=600 --durations=10 + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + export ASAN_OPTIONS="detect_leaks=1:symbolize=1:strict_init_order=true:allocator_may_return_null=1:use_sigaltstack=0" + export LSAN_OPTIONS="suppressions=$GITHUB_WORKSPACE/tools/ci/lsan_suppressions.txt" + python -m spin test -- -v -s --timeout=600 --durations=10 From 845f93ce5152c3cfb2cbabeba41a828f5b2e3524 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 1 Mar 2026 22:04:20 -0500 Subject: [PATCH 1439/1718] BUG: fft: Fix handling of out parameter for hfft, ifft2 and irfft2. Closes gh-30909. --- numpy/fft/_pocketfft.py | 6 +++--- numpy/fft/tests/test_pocketfft.py | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 93f96c9a10b6..90de21607ad2 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -625,7 +625,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): if n is None: n = (a.shape[axis] - 1) * 2 new_norm = _swap_direction(norm) - output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=out) return output @@ -1260,7 +1260,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) """ - return _raw_fftnd(a, s, axes, ifft, norm, out=None) + return _raw_fftnd(a, s, axes, ifft, norm, out=out) @array_function_dispatch(_fftn_dispatcher) @@ -1690,4 +1690,4 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): [3., 3., 3., 3., 3.], [4., 4., 4., 4., 4.]]) """ - return irfftn(a, s, axes, norm, out=None) + return irfftn(a, s, axes, norm, out=out) diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 6f26ab6c6d65..f294a26da58e 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -216,6 +216,12 @@ def test_ifft2(self): assert_allclose(np.fft.ifft2(x) * (30. * 20.), np.fft.ifft2(x, norm="forward"), atol=1e-6) + def test_ifft2_out(self): + z = np.array([[1 + 2j, 3 - 4j], [0.5 - 2j, 4 + 1j]]) + out = np.zeros_like(z) + result = np.fft.ifft2(z, out=out) + assert result is out + def test_fftn(self): x = random((30, 20, 10)) + 1j * random((30, 20, 10)) assert_allclose( @@ -298,6 +304,13 @@ def test_irfft2(self): assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), norm="forward"), atol=1e-6) + def test_irfft2_out(self): + z = np.array([[7, 1 + 4j, -5], [2 - 1j, -2 - 1j, -8 + 1j], + [-3, 1 + 2j, 5], [2 + 1j, 4 - 1j, -8 - 1j]]) + out = np.zeros((4, 4), dtype=np.float64) + result = np.fft.irfft2(z, out=out) + assert result is out + def test_rfftn(self): x = random((30, 20, 10)) assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) @@ -338,6 +351,13 @@ def test_hfft(self): assert_allclose(np.fft.hfft(x_herm) / 30., np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + def test_hfft_out(self): + a = np.array([1, 2, 3, 4, 3, 2], dtype=complex) + n = (len(a) - 1) * 2 + out = np.zeros(n, dtype=np.float64) + result = np.fft.hfft(a, n=n, out=out) + assert result is out + def test_ihfft(self): x = random(14) + 1j * random(14) x_herm = np.concatenate((random(1), x, random(1))) From dd102ade8afbe0bf16870cca75fa391fe17cc634 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 2 Mar 2026 08:45:16 +0100 Subject: [PATCH 1440/1718] BUG: Fix reference leaks and NULL pointer dereferences (#30908) The PR addresses some reference leaks and NULL pointer dereferences in uncommon paths (mostly failures due to out-of-memory). The issues addressed have been found using Claude. --- numpy/_core/src/multiarray/arrayobject.c | 3 +++ numpy/_core/src/multiarray/common.c | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 12 ++++++++++-- numpy/_core/src/multiarray/number.c | 13 +++++++++++-- 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 9a536227c3be..460b007d7a7d 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -961,6 +961,9 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); + if (array_other == NULL) { + return NULL; + } if (PyArray_TYPE(array_other) == NPY_VOID) { /* * Void arrays are currently not handled by ufuncs, so if the other diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 501472807713..fd4f24151331 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -418,8 +418,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* set copy-back */ Py_INCREF(out); if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { - Py_DECREF(out); Py_DECREF(out_buf); + // PyArray_SetWritebackIfCopyBase steals reference to second argument return NULL; } } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8afad4ed93cd..9587ea5753c7 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -123,6 +123,7 @@ get_legacy_print_mode(void) { PyObject *legacy_print_mode = NULL; if (PyDict_GetItemRef(format_options, npy_interned_str.legacy, &legacy_print_mode) == -1) { + Py_DECREF(format_options); return -1; } Py_DECREF(format_options); @@ -303,6 +304,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, n = PyArray_DIMS(ap)[0]; ptr2 = (char **)PyArray_malloc(n * sizeof(char *)); if (!ptr2) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -316,6 +318,7 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd, m = PyArray_DIMS(ap)[1]; ptr3 = (char ***)PyArray_malloc(n*(m+1) * sizeof(char *)); if (!ptr3) { + Py_DECREF(ap); PyErr_NoMemory(); return -1; } @@ -2305,7 +2308,9 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_ if (descr == NULL) { return NULL; } - return PyArray_Scalar(&count, descr, NULL); + PyObject *result = PyArray_Scalar(&count, descr, NULL); + Py_DECREF(descr); + return result; } static PyObject * @@ -3228,6 +3233,7 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) PyArrayObject *arr = NULL, *ax = NULL, *ay = NULL; PyObject *ret = NULL; PyArray_Descr *common_dt = NULL; + NpyIter *iter = NULL; arr = (PyArrayObject *)PyArray_FROM_O(condition); if (arr == NULL) { @@ -3297,7 +3303,6 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; - NpyIter * iter; NPY_BEGIN_THREADS_DEF; iter = NpyIter_MultiNew( @@ -3431,6 +3436,9 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(common_dt); NPY_cast_info_xfree(&x_cast_info); NPY_cast_info_xfree(&y_cast_info); + if (iter != NULL) { + NpyIter_Deallocate(iter); + } return NULL; } diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index de4012641684..e27079a569ef 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -146,6 +146,9 @@ _get_keywords(int rtype, PyArrayObject *out) PyObject *kwds = NULL; if (rtype != NPY_NOTYPE || out != NULL) { kwds = PyDict_New(); + if (kwds == NULL) { + return NULL; + } if (rtype != NPY_NOTYPE) { PyArray_Descr *descr; descr = PyArray_DescrFromType(rtype); @@ -169,13 +172,16 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "reduce"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } @@ -189,13 +195,16 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, PyObject *kwds; args = Py_BuildValue("(Oi)", m1, axis); + if (args == NULL) { + return NULL; + } kwds = _get_keywords(rtype, out); meth = PyObject_GetAttrString(op, "accumulate"); if (meth && PyCallable_Check(meth)) { ret = PyObject_Call(meth, args, kwds); } Py_DECREF(args); - Py_DECREF(meth); + Py_XDECREF(meth); Py_XDECREF(kwds); return ret; } From 5d04361228454e2bbacdd746d0240a809413a960 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 2 Mar 2026 09:15:06 -0700 Subject: [PATCH 1441/1718] MAINT: fix two minor issues noticed when touching the C API setup --- numpy/_core/code_generators/verify_c_api_version.py | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/code_generators/verify_c_api_version.py b/numpy/_core/code_generators/verify_c_api_version.py index 955ec595327e..9c39b044e955 100644 --- a/numpy/_core/code_generators/verify_c_api_version.py +++ b/numpy/_core/code_generators/verify_c_api_version.py @@ -45,7 +45,7 @@ def check_api_version(apiversion): f"{apiversion}, with checksum {curapi_hash}, but recorded " f"checksum in _core/codegen_dir/cversions.txt is {api_hash}. " "If functions were added in the C API, you have to update " - f"C_API_VERSION in {__file__}." + f"C_API_VERSION in numpy/core/meson.build." ) raise MismatchCAPIError(msg) diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 71c95a8ae39c..9989835f6700 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -180,7 +180,7 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) } descr_proto->type_num = -1; if (PyDataType_ISUNSIZED(descr_proto)) { - PyErr_SetString(PyExc_ValueError, "cannot register a" \ + PyErr_SetString(PyExc_ValueError, "cannot register a " \ "flexible data-type"); return -1; } From a643c9f3398215c295c57123f04eaf3da13c1eeb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 3 Mar 2026 12:40:32 -0700 Subject: [PATCH 1442/1718] BUG: fix type issues in uses if PyDataType macros --- numpy/_core/src/multiarray/arraytypes.c.src | 2 +- numpy/_core/src/multiarray/descriptor.c | 2 +- numpy/_core/src/multiarray/usertypes.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index d67bdd046c6d..b00aad2333bd 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4716,7 +4716,7 @@ set_typeinfo(PyObject *dict) * #name = STRING, UNICODE, VOID# */ - PyDataType_MAKEUNSIZED(&@name@_Descr); + PyDataType_MAKEUNSIZED((PyArray_Descr *)&@name@_Descr); /**end repeat**/ diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 0bbc6358f75b..e712ff5372bf 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2089,7 +2089,7 @@ arraydescr_subdescr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) NPY_NO_EXPORT PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self, void *NPY_UNUSED(ignored)) { - if (!PyDataType_ISLEGACY(NPY_DTYPE(self))) { + if (!PyDataType_ISLEGACY(self)) { return (PyObject *) Py_TYPE(self)->tp_str((PyObject *)self); } diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 9989835f6700..78559fe9c80e 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -179,7 +179,7 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) return -1; } descr_proto->type_num = -1; - if (PyDataType_ISUNSIZED(descr_proto)) { + if (descr_proto->elsize == 0) { PyErr_SetString(PyExc_ValueError, "cannot register a " \ "flexible data-type"); return -1; From 694cbb6e864ebbebec29ced71ea2bbad95c2d263 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Tue, 3 Mar 2026 23:06:01 +0100 Subject: [PATCH 1443/1718] Merge pull request #30854 from Noxaster/fix-numpy/f2py-UP031 --- numpy/f2py/auxfuncs.py | 17 ++-- numpy/f2py/capi_maps.py | 102 +++++++++++----------- numpy/f2py/common_rules.py | 36 ++++---- numpy/f2py/crackfortran.py | 172 ++++++++++++++++++------------------- numpy/f2py/diagnose.py | 4 +- numpy/f2py/f2py2e.py | 8 +- numpy/f2py/f90mod_rules.py | 84 +++++++++--------- numpy/f2py/func2subr.py | 8 +- numpy/f2py/rules.py | 12 +-- numpy/f2py/tests/util.py | 2 +- numpy/f2py/use_rules.py | 13 +-- 11 files changed, 234 insertions(+), 224 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index a5af31d976ec..cc3889c192c9 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -627,16 +627,16 @@ def __call__(self, var): def l_and(*f): l1, l2 = 'lambda v', [] for i in range(len(f)): - l1 = '%s,f%d=f[%d]' % (l1, i, i) - l2.append('f%d(v)' % (i)) + l1 = f'{l1},f{i}=f[{i}]' + l2.append(f'f{i}(v)') return eval(f"{l1}:{' and '.join(l2)}") def l_or(*f): l1, l2 = 'lambda v', [] for i in range(len(f)): - l1 = '%s,f%d=f[%d]' % (l1, i, i) - l2.append('f%d(v)' % (i)) + l1 = f'{l1},f{i}=f[{i}]' + l2.append(f'f{i}(v)') return eval(f"{l1}:{' or '.join(l2)}") @@ -988,17 +988,18 @@ def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): if v1 in c2py_map: if k1 in f2cmap_all[k]: outmess( - "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" - % (k, k1, f2cmap_all[k][k1], v1) + "\tWarning: redefinition of " + f"{{'{k}':{{'{k1}':'{f2cmap_all[k][k1]}'->'{v1}'}}}}\n" ) f2cmap_all[k][k1] = v1 if verbose: outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') f2cmap_mapped.append(v1) elif verbose: + c2py_map_keys = list(c2py_map.keys()) errmess( - "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" - % (k, k1, v1, v1, list(c2py_map.keys())) + f"\tIgnoring map {{'{k}':{{'{k1}':'{v1}'}}}}: '{v1}' " + f"must be in {c2py_map_keys}\n" ) return f2cmap_all, f2cmap_mapped diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 290ac2f467ad..567bf3a00b90 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -215,8 +215,9 @@ def getctype(var): try: ctype = f2cmap[var['kindselector']['*']] except KeyError: - errmess('getctype: "%s %s %s" not supported.\n' % - (var['typespec'], '*', var['kindselector']['*'])) + raw_typespec = var['typespec'] + star = var['kindselector']['*'] + errmess(f'getctype: "{raw_typespec} * {star}" not supported.\n') elif 'kind' in var['kindselector']: if typespec + 'kind' in f2cmap_all: f2cmap = f2cmap_all[typespec + 'kind'] @@ -228,9 +229,11 @@ def getctype(var): try: ctype = f2cmap[str(var['kindselector']['kind'])] except KeyError: - errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' - % (typespec, var['kindselector']['kind'], ctype, - typespec, var['kindselector']['kind'], os.getcwd())) + kind = var['kindselector']['kind'] + errmess(f'getctype: "{typespec}({kind=})" is mapped to C ' + f'"{ctype}" (to override define {{{typespec!r}: ' + f'{{{kind!r}: ""}}}} ' + f'in {os.getcwd()}/.f2py_f2cmap file).\n') elif not isexternal(var): errmess(f'getctype: No C-type found in "{var}", assuming void.\n') return ctype @@ -273,8 +276,8 @@ def getstrlength(var): len = f2cexpr(a['len']) if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): if isintent_hide(var): - errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( - repr(var))) + errmess(f'getstrlength:intent(hide): expected a string with defined length ' + f'but got: {var!r}\n') len = '-1' return len @@ -305,7 +308,7 @@ def getarrdims(a, var, verbose=0): v = [dim[i]] else: for va in depargs: - if re.match(r'.*?\b%s\b.*' % va, dim[i]): + if re.match(rf'.*?\b{va}\b.*', dim[i]): v.append(va) for va in v: if depargs.index(va) > depargs.index(a): @@ -315,21 +318,18 @@ def getarrdims(a, var, verbose=0): for d in dim: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: - ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['setdims'], i, d) + ret['setdims'] = f"{ret['setdims']}#varname#_Dims[{i}]={d}," if ret['setdims']: ret['setdims'] = ret['setdims'][:-1] ret['cbsetdims'], i = '', -1 for d in var['dimension']: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, d) + ret['cbsetdims'] = f"{ret['cbsetdims']}#varname#_Dims[{i}]={d}," elif isintent_in(var): - outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' - % (d)) - ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( - ret['cbsetdims'], i, 0) + outmess('getarrdims:warning: assumed shape array, using 0 ' + f'instead of {d!r}\n') + ret['cbsetdims'] = f"{ret['cbsetdims']}#varname#_Dims[{i}]={0}," elif verbose: errmess( f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') @@ -372,31 +372,30 @@ def getpydocsign(a, var): init = f', optional\\n Default: {showinit}' if isscalar(var): if isintent_inout(var): - sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], - c2pycode_map[ctype], init) + sig = (f"{a} : {opt} rank-0 array({c2py_map[ctype]}," + f"'{c2pycode_map[ctype]}'){init}") else: sig = f'{a} : {opt} {c2py_map[ctype]}{init}' sigout = f'{out_a} : {c2py_map[ctype]}' elif isstring(var): if isintent_inout(var): - sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( - a, opt, getstrlength(var), init) + sig = (f"{a} : {opt} rank-0 array(string(len={getstrlength(var)})," + f"'c'){init}") else: sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' sigout = f'{out_a} : string(len={getstrlength(var)})' elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) - sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, - c2pycode_map[ - ctype], - ','.join(dim), init) + dim_str = ','.join(dim) + sig = (f"{a} : {opt} rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str}){init}") if a == out_a: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ - % (a, rank, c2pycode_map[ctype], ','.join(dim)) + sigout = (f"{a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str})") else: - sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + sigout = (f"{out_a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str}) and {a} storage") elif isexternal(var): ua = '' if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: @@ -422,10 +421,9 @@ def getarrdocsign(a, var): elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) - sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, - c2pycode_map[ - ctype], - ','.join(dim)) + dim_str = ','.join(dim) + sig = (f"{a} : rank-{rank} array('{c2pycode_map[ctype]}') with " + f"bounds ({dim_str})") return sig @@ -457,7 +455,8 @@ def getinit(a, var): if not init: init, showinit = '""', "''" if init[0] == "'": - init = '"%s"' % (init[1:-1].replace('"', '\\"')) + escaped_init = init[1:-1].replace('"', '\\"') + init = f'"{escaped_init}"' if init[0] == '"': showinit = f"'{init[1:-1]}'" return init, showinit @@ -522,8 +521,9 @@ def sign2map(a, var): ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] else: ret['cbname'] = a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( - a, list(lcb_map.keys()))) + lcb_map_keys = list(lcb_map.keys()) + errmess(f'sign2map: Confused: external {a} is not in ' + f'lcb_map{lcb_map_keys}.\n') if isstring(var): ret['length'] = getstrlength(var) if isarray(var): @@ -557,24 +557,24 @@ def sign2map(a, var): ddim = ','.join( map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) rl.append(f'dims({ddim})') + rl_str = ','.join(rl) if isexternal(var): - ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{rl_str}" else: - ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( - ret['ctype'], a, ret['showinit'], ','.join(rl)) + ret['vardebuginfo'] = (f"debug-capi:{ret['ctype']} " + f"{a}={ret['showinit']}:{rl_str}") if isscalar(var): if ret['ctype'] in cformat_map: ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" if isstring(var): - ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) + ret['vardebugshowvalue'] = f'debug-capi:slen({a})=%d {a}=\\"%s\\"' if isexternal(var): ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' if ret['ctype'] in cformat_map: ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" if isstring(var): - ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['varshowvalue'] = f'#name#:slen({a})=%d {a}=\\"%s\\"' ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) if hasnote(var): ret['note'] = var['note'] @@ -623,8 +623,9 @@ def routsign2map(rout): break lcb_map[ln] = un[1] elif rout.get('externals'): - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( - ret['name'], repr(rout['externals']))) + externals = rout['externals'] + errmess(f"routsign2map: Confused: function {ret['name']} has externals " + f'{externals!r} but no "use" statement.\n') ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' if isfunction(rout): if 'result' in rout: @@ -641,20 +642,19 @@ def routsign2map(rout): ret['rformat'] = c2buildvalue_map[ret['ctype']] else: ret['rformat'] = 'O' - errmess('routsign2map: no c2buildvalue key for type %s\n' % - (repr(ret['ctype']))) + errmess(f"routsign2map: no c2buildvalue key for type {ret['ctype']!r}\n") if debugcapi(rout): if ret['ctype'] in cformat_map: - ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( - a, cformat_map[ret['ctype']]) + ret['routdebugshowvalue'] = ("debug-capi:" + f"{a}={cformat_map[ret['ctype']]}") if isstringfunction(rout): - ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( - a, a) + ret['routdebugshowvalue'] = f'debug-capi:slen({a})=%d {a}=\\"%s\\"' if isstringfunction(rout): ret['rlength'] = getstrlength(rout['vars'][a]) if ret['rlength'] == '-1': - errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( - repr(rout['name']))) + errmess("routsign2map: expected explicit specification of the length " + "of the string returned by the fortran function " + f"{rout['name']!r}; taking 10.\n") ret['rlength'] = '10' if hasnote(rout): ret['note'] = rout['note'] diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index cef757b6c5a3..356f2a4f6355 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -64,12 +64,14 @@ def dadd(line, s=doc): hnames.append(n) else: inames.append(n) + hnames_str = ','.join(hnames) + inames_str = ','.join(inames) if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( - name, ','.join(inames), ','.join(hnames))) + outmess(f'\t\tConstructing COMMON block support for "{name}"...\n\t\t ' + f'{inames_str}\n\t\t Hidden: {hnames_str}\n') else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( - name, ','.join(inames))) + outmess(f'\t\tConstructing COMMON block support for "{name}"...\n\t\t ' + f'{inames_str}\n') fadd(f'subroutine f2pyinit{name}(setupfunc)') for usename in getuseblocks(m): fadd(f'use {usename}') @@ -82,7 +84,7 @@ def dadd(line, s=doc): fadd(f"common /{name}/ {','.join(vnames)}") fadd(f"call setupfunc({','.join(inames)})") fadd('end\n') - cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + cadd(f'static FortranDataDef f2py_{name}_def[] = {{') idims = [] for n in inames: ct = capi_maps.getctype(vars[n]) @@ -96,12 +98,12 @@ def dadd(line, s=doc): dms = dm['dims'].strip() if not dms: dms = '-1' - cadd('\t{\"%s\",%s,{{%s}},%s, %s},' - % (n, dm['rank'], dms, at, elsize)) + rank = dm['rank'] + cadd(f'\t{{\"{n}\",{rank},{{{{{dms}}}}},{at}, {elsize}}},') cadd('\t{NULL}\n};') inames1 = rmbadname(inames) inames1_tps = ','.join(['char *' + s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd(f'static void f2py_setup_{name}({inames1_tps}) {{') cadd('\tint i_f2py=0;') for n in inames1: cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') @@ -110,23 +112,23 @@ def dadd(line, s=doc): F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' - % (F_FUNC, lower_name, name.upper(), - ','.join(['char*'] * len(inames1)))) - cadd('static void f2py_init_%s(void) {' % name) - cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, lower_name, name.upper(), name)) + arg_types_str = ','.join(['char*'] * len(inames1)) + cadd(f"extern void {F_FUNC}(f2pyinit{lower_name},F2PYINIT{name.upper()})" + f"(void(*)({arg_types_str}));") + cadd(f'static void f2py_init_{name}(void) {{') + cadd(f'\t{F_FUNC}(f2pyinit{lower_name},F2PYINIT{name.upper()})' + f'(f2py_setup_{name});') cadd('}\n') iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') iadd('\tif (tmp == NULL) return NULL;') iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') iadd('\tPy_DECREF(tmp);') tname = name.replace('_', '\\_') - dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd(f'\\subsection{{Common block \\texttt{{{tname}}}}}\n') dadd('\\begin{description}') for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, vars[n]))) + docsign = capi_maps.getarrdocsign(n, vars[n]) + dadd(f'\\item[]{{{{}}\\verb@{docsign}@{{}}}}') if hasnote(vars[n]): note = vars[n]['note'] if isinstance(note, list): diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index d75fe4df6cd6..5a43a5d27f1c 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -413,9 +413,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): beginpattern = beginpattern77 else: beginpattern = beginpattern90 - outmess('\tReading file %s (format:%s%s)\n' - % (repr(currentfilename), sourcecodeform, - (strictf77 and ',strict') or '')) + outmess(f"\tReading file {currentfilename!r} " + f"(format:{sourcecodeform}{',strict' if strictf77 else ''})\n") l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters @@ -461,7 +460,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[0] not in spacedigits: raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' - 'this code is in fix form?\n\tline=%s' % repr(l)) + f'this code is in fix form?\n\tline={l!r}') if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): # Continuation of a previous line @@ -520,8 +519,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: raise ValueError( f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) + filepositiontext = (f'Line #{fin.filelineno() - 1} ' + f'in {currentfilename}:"{l1}"\n\t') m = includeline.match(origfinalline) if m: fn = m.group('name') @@ -538,8 +537,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) + outmess(f'readfortrancode: could not find include file {fn!r} ' + f'in {os.pathsep.join(include_dirs)}. Ignoring.\n') else: dowithline(finalline) l1 = ll @@ -549,8 +548,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: finalline = ll origfinalline = ll - filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( - fin.filelineno() - 1, currentfilename, l1) + filepositiontext = f'Line #{fin.filelineno() - 1} in {currentfilename}:"{l1}"\n\t' m = includeline.match(origfinalline) if m: fn = m.group('name') @@ -566,8 +564,8 @@ def readfortrancode(ffile, dowithline=show, istop=1): readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: - outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( - repr(fn), os.pathsep.join(include_dirs))) + outmess(f'readfortrancode: could not find include file {fn!r} ' + f'in {os.pathsep.join(include_dirs)}. Ignoring.\n') else: dowithline(finalline) filepositiontext = '' @@ -735,8 +733,7 @@ def crackline(line, reset=0): if f77modulename and neededmodule == groupcounter: fl = 2 while groupcounter > fl: - outmess('crackline: groupcounter=%s groupname=%s\n' % - (repr(groupcounter), repr(groupname))) + outmess(f'crackline: groupcounter={groupcounter!r} groupname={groupname!r}\n') outmess( 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter - 1].append(groupcache[groupcounter]) @@ -786,7 +783,8 @@ def crackline(line, reset=0): if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue m1 = re.match( - r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + rf'(?P[^"]*)\b{name}\b\s*@\(@(?P[^@]*)@\)@.*\Z', + markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) @@ -803,7 +801,7 @@ def crackline(line, reset=0): return if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): previous_context = None - outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + outmess(f'crackline:{groupcounter}: No pattern for line\n') return elif pat[1] == 'end': if 0 <= skipblocksuntil < groupcounter: @@ -811,16 +809,13 @@ def crackline(line, reset=0): if skipblocksuntil <= groupcounter: return if groupcounter <= 0: - raise Exception('crackline: groupcounter(=%s) is nonpositive. ' - 'Check the blocks.' - % (groupcounter)) + raise Exception(f'crackline: groupcounter(={groupcounter}) is nonpositive. ' + 'Check the blocks.') m1 = beginpattern[0].match(line) if (m1) and (not m1.group('this') == groupname[groupcounter]): - raise Exception('crackline: End group %s does not match with ' - 'previous Begin group %s\n\t%s' % - (repr(m1.group('this')), repr(groupname[groupcounter]), - filepositiontext) - ) + raise Exception(f'crackline: End group {m1.group("this")!r} ' + 'does not match with previous Begin group ' + f'{groupname[groupcounter]!r}\n\t{filepositiontext}') if skipblocksuntil == groupcounter: skipblocksuntil = -1 grouplist[groupcounter - 1].append(groupcache[groupcounter]) @@ -1086,8 +1081,7 @@ def analyzeline(m, case, line): grouplist[groupcounter] = [] if needmodule: if verbose > 1: - outmess('analyzeline: Creating module block %s\n' % - repr(f77modulename), 0) + outmess(f'analyzeline: Creating module block {f77modulename!r}\n', 0) groupname[groupcounter] = 'module' groupcache[groupcounter]['block'] = 'python module' groupcache[groupcounter]['name'] = f77modulename @@ -1101,13 +1095,13 @@ def analyzeline(m, case, line): grouplist[groupcounter] = [] if needinterface: if verbose > 1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( - groupcounter), 0) + outmess('analyzeline: Creating additional interface block ' + f'({groupcounter=}).\n', 0) groupname[groupcounter] = 'interface' groupcache[groupcounter]['block'] = 'interface' groupcache[groupcounter]['name'] = 'unknown_interface' - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + prev_group = groupcache[groupcounter - 1] + groupcache[groupcounter]['from'] = f"{prev_group['from']}:{prev_group['name']}" groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] @@ -1125,11 +1119,10 @@ def analyzeline(m, case, line): if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename elif f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) + groupcache[groupcounter]['from'] = f"{groupcache[groupcounter - 1]['from']}:{currentfilename}" else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + prev_group = groupcache[groupcounter - 1] + groupcache[groupcounter]['from'] = f"{prev_group['from']}:{prev_group['name']}" for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -1221,8 +1214,8 @@ def analyzeline(m, case, line): ll = ll[:i + 1] + '::' + ll[i + 1:] i = ll.find('::') if ll[i:] == '::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n' % - (m.group('this'), ll[:i])) + outmess('All arguments will have attribute ' + f'{m.group("this")}{ll[:i]}\n') ll = ll + ','.join(groupcache[groupcounter]['args']) if i < 0: i = 0 @@ -1233,8 +1226,8 @@ def analyzeline(m, case, line): ch = markoutercomma(pl).split('@,@') if len(ch) > 1: pl = ch[0] - outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( - ','.join(ch[1:]))) + outmess("analyzeline: cannot handle multiple attributes without " + f"type specification. Ignoring {','.join(ch[1:])!r}.\n") last_name = None for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: @@ -1244,8 +1237,8 @@ def analyzeline(m, case, line): k = '' else: print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( - case, repr(e))) + outmess(f'analyzeline: no name pattern found in {case} statement ' + f'for {e!r}. Skipping.\n') continue else: k = rmbadname1(m1.group('name')) @@ -1265,15 +1258,16 @@ def analyzeline(m, case, line): 'analyzeline: missing __user__ module (could be nothing)\n') # fixes ticket 1693 if k != groupcache[groupcounter]['name']: - outmess('analyzeline: appending intent(callback) %s' - ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + outmess(f"analyzeline: appending intent(callback) {k}" + f" to {groupcache[groupcounter]['name']} " + "arguments\n") groupcache[groupcounter]['args'].append(k) else: errmess( f'analyzeline: intent(callback) {k} is ignored\n') else: - errmess('analyzeline: intent(callback) %s is already' - ' in argument list\n' % (k)) + errmess(f'analyzeline: intent(callback) {k} is already' + ' in argument list\n') if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: ap = case if 'attrspec' in edecl[k]: @@ -1312,8 +1306,8 @@ def analyzeline(m, case, line): if k not in edecl: edecl[k] = {} if '=' in edecl[k] and (not edecl[k]['='] == initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( - k, edecl[k]['='], initexpr)) + outmess(f'analyzeline: Overwriting the value of parameter "{k}" ' + f'("{edecl[k]["="]}") with "{initexpr}".\n') t = determineexprtype(initexpr, params) if t: if t.get('typespec') == 'real': @@ -1328,8 +1322,8 @@ def analyzeline(m, case, line): try: v = eval(initexpr, {}, params) except (SyntaxError, NameError, TypeError) as msg: - errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' - % (initexpr, msg)) + errmess(f'analyzeline: Failed to evaluate {initexpr!r}. ' + f'Ignoring: {msg}\n') continue edecl[k]['='] = repr(v) if 'attrspec' in edecl[k]: @@ -1705,35 +1699,39 @@ def updatevars(typespec, selector, attrspec, entitydecl): if not_has_typespec: edecl['typespec'] = typespec elif typespec and (not typespec == edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typespec'], typespec)) + current_typespec = edecl['typespec'] + outmess(f'updatevars: attempt to change the type of "{ename}" ' + f'("{current_typespec}") to "{typespec}". Ignoring.\n') if 'kindselector' not in edecl: edecl['kindselector'] = copy.copy(kindselect) elif kindselect: for k in list(kindselect.keys()): if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['kindselector'][k], kindselect[k])) + current_kind = edecl['kindselector'][k] + outmess('updatevars: attempt to change the kindselector ' + f'"{k}" of "{ename}" ("{current_kind}") to ' + f'"{kindselect[k]}". Ignoring.\n') else: edecl['kindselector'][k] = copy.copy(kindselect[k]) if 'charselector' not in edecl and charselect: if not_has_typespec: edecl['charselector'] = charselect else: - errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' - % (ename, charselect)) + errmess(f'updatevars:{ename}: attempt to change empty charselector ' + f'to {charselect!r}. Ignoring.\n') elif charselect: for k in list(charselect.keys()): if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( - k, ename, edecl['charselector'][k], charselect[k])) + outmess(f'updatevars: attempt to change the charselector "{k}" ' + f'of "{ename}" ("{edecl["charselector"][k]}") to ' + f'"{charselect[k]}". Ignoring.\n') else: edecl['charselector'][k] = copy.copy(charselect[k]) if 'typename' not in edecl: edecl['typename'] = typename elif typename and (not edecl['typename'] == typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['typename'], typename)) + outmess(f'updatevars: attempt to change the typename of "{ename}" ' + f'("{edecl["typename"]}") to "{typename}". Ignoring.\n') if 'attrspec' not in edecl: edecl['attrspec'] = copy.copy(attrspec) elif attrspec: @@ -1778,8 +1776,9 @@ def updatevars(typespec, selector, attrspec, entitydecl): else: d1['array'] = d1['array'] + ',' + d1['len'] del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( - typespec, e, typespec, ename, d1['array'])) + array_spec = d1['array'] + errmess(f'updatevars: "{typespec} {e}" is mapped to ' + f'"{typespec} {ename}({array_spec})"\n') if 'len' in d1: if typespec in ['complex', 'integer', 'logical', 'real']: @@ -1797,8 +1796,11 @@ def updatevars(typespec, selector, attrspec, entitydecl): if 'init' in d1: if '=' in edecl and (not edecl['='] == d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( - ename, edecl['='], d1['init'])) + current_init = edecl['='] + new_init = d1['init'] + outmess('updatevars: attempt to change the init expression of ' + f'"{ename}" ("{current_init}") to "{new_init}". ' + f'Ignoring.\n') else: edecl['='] = d1['init'] @@ -1811,8 +1813,8 @@ def updatevars(typespec, selector, attrspec, entitydecl): for dm1 in edecl['attrspec']: if dm1[:9] == 'dimension' and dm1 != dm: del edecl['attrspec'][-1] - errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' - % (ename, dm1, dm)) + errmess(f'updatevars:{ename}: attempt to change ' + f'{dm1!r} to {dm!r}. Ignoring.\n') break else: @@ -1879,8 +1881,7 @@ def cracktypespec(typespec, selector): if typename: typename = typename.group('name') else: - outmess('cracktypespec: no typename found in %s\n' % - (repr(typespec + selector))) + outmess(f'cracktypespec: no typename found in {typespec + selector}\n') else: outmess(f'cracktypespec: no selector used for {repr(selector)}\n') return kindselect, charselect, typename @@ -1980,8 +1981,8 @@ def get_useparameters(block, param_map=None): for usename, mapping in list(usedict.items()): usename = usename.lower() if usename not in f90modulevars: - outmess('get_useparameters: no module %s info used by %s\n' % - (usename, block.get('name'))) + outmess(f'get_useparameters: no module {usename} info used by ' + f'{block.get("name")}\n') continue mvars = f90modulevars[usename] params = get_parameters(mvars) @@ -1992,8 +1993,8 @@ def get_useparameters(block, param_map=None): errmess(f'get_useparameters: mapping for {mapping} not impl.\n') for k, v in list(params.items()): if k in param_map: - outmess('get_useparameters: overriding parameter %s with' - ' value from module %s\n' % (repr(k), repr(usename))) + outmess(f'get_useparameters: overriding parameter {k!r} with' + f' value from module {usename!r}\n') param_map[k] = v return param_map @@ -2561,9 +2562,8 @@ def _eval_scalar(value, params): except (NameError, SyntaxError, TypeError): return value except Exception as msg: - errmess('"%s" in evaluating %r ' - '(available names: %s)\n' - % (msg, value, list(params.keys()))) + errmess(f'"{msg}" in evaluating {value!r} ' + f'(available names: {list(params)})\n') return value @@ -2611,7 +2611,7 @@ def analyzevars(block): try: dep_matches[n] except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + dep_matches[n] = re.compile(rf'.*\b{v}\b', re.I).match for n in svars: if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) @@ -2628,8 +2628,8 @@ def analyzevars(block): for l in implicitrules[ln0][k]: vars[n] = setattrspec(vars[n], l) elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( - repr(n), block['name'])) + outmess(f"analyzevars: typespec of variable {n!r} is not defined " + f"in routine {block['name']}.\n") if 'charselector' in vars[n]: if 'len' in vars[n]['charselector']: l = vars[n]['charselector']['len'] @@ -3263,15 +3263,14 @@ def crack2fortrangen(block, tab='\n', as_interface=False): f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s' % ( - f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + f2pyenhancements = (f"{f2pyenhancements}{tab + tabchar}{k} " + f"{block['f2pyenhancements'][k]}") intent_lst = block.get('intent', [])[:] if blocktype == 'function' and 'callback' in intent_lst: intent_lst.remove('callback') if intent_lst: - f2pyenhancements = '%s%sintent(%s) %s' %\ - (f2pyenhancements, tab + tabchar, - ','.join(intent_lst), name) + f2pyenhancements = (f"{f2pyenhancements}{tab + tabchar}" + f"intent({','.join(intent_lst)}) {name}") use = '' if 'use' in block: use = use2fortran(block['use'], tab + tabchar) @@ -3298,8 +3297,9 @@ def crack2fortrangen(block, tab='\n', as_interface=False): body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' - ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( - tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + ret = (f'{tab}{prefix}{blocktype} {name}{args}{result} ' + f'{mess}{f2pyenhancements}{use}{vars}{common}{body}{tab}end ' + f'{blocktype} {name}') return ret @@ -3506,11 +3506,11 @@ def crack2fortran(block): header = """! -*- f90 -*- ! Note: the context of this file is case sensitive. """ - footer = """ -! This file was auto-generated with f2py (version:%s). + footer = f""" +! This file was auto-generated with f2py (version:{f2py_version}). ! See: ! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e -""" % (f2py_version) +""" return header + pyf + footer diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 3e2c53b0ec1d..f6ddbf09ad9a 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -44,8 +44,8 @@ def run(): if has_f2py2e: try: - print('Found f2py2e version %r in %s' % - (f2py2e.__version__.version, f2py2e.__file__)) + print(f'Found f2py2e version {f2py2e.__version__.version!r} in ' + f'{f2py2e.__file__}') except Exception as msg: print('error:', msg) print('------') diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index eb5a39e088ff..98c2b7c65805 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -396,8 +396,8 @@ def buildmodules(lst): ret = {} for module, name in zip(modules, mnames): if name in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n' % ( - name, ','.join('"%s"' % s for s in isusedby[name]))) + using_modules = ','.join(f'"{s}"' for s in isusedby[name]) + outmess(f'\tSkipping module "{name}" which is used by {using_modules}.\n') else: um = [] if 'use' in module: @@ -498,8 +498,8 @@ def run_main(comline_list): if 'python module' not in options: errmess( 'Tip: If your original code is Fortran source then you must use -m option.\n') - raise TypeError('All blocks must be python module blocks but got %s' % ( - repr(plist['block']))) + raise TypeError('All blocks must be python module blocks but got ' + f'{plist["block"]!r}') auxfuncs.debugoptions = options['debug'] f90mod_rules.options = options auxfuncs.wrapfuncs = options['wrapfuncs'] diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 5cd7637a95c2..0ecce40aa856 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -39,11 +39,11 @@ def findf90modules(m): return ret -fgetdims1 = """\ +fgetdims1 = f"""\ external f2pysetdata logical ns integer r,i - integer(%d) s(*) + integer({np.intp().itemsize}) s(*) ns = .FALSE. if (allocated(d)) then do i=1,r @@ -55,7 +55,7 @@ def findf90modules(m): deallocate(d) end if end if - if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" fgetdims2 = """\ end if @@ -136,8 +136,8 @@ def iadd(line, s=ihooks): s[0] = f'{s[0]}\n{line}' vrd = capi_maps.modsign2map(m) - cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) - dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + cadd(f"static FortranDataDef f2py_{m['name']}_def[] = {{") + dadd(f"\\subsection{{Fortran 90/95 module \\texttt{{{m['name']}}}}}\n") if hasnote(m): note = m['note'] if isinstance(note, list): @@ -156,11 +156,12 @@ def iadd(line, s=ihooks): if not dms: dms = '-1' use_fgetdims2 = fgetdims2 - cadd('\t{"%s",%s,{{%s}},%s, %s},' % - (undo_rmbadname1(n), dm['rank'], dms, at, - capi_maps.get_elsize(var))) - dadd('\\item[]{{}\\verb@%s@{}}' % - (capi_maps.getarrdocsign(n, var))) + rank = dm['rank'] + elsize = capi_maps.get_elsize(var) + cadd(f'\t{{"{undo_rmbadname1(n)}",{rank},{{{{{dms}}}}},{at}, ' + f'{elsize}}},') + docsign = capi_maps.getarrdocsign(n, var) + dadd(f'\\item[]{{{{}}\\verb@{docsign}@{{}}}}') if hasnote(var): note = var['note'] if isinstance(note, list): @@ -178,8 +179,8 @@ def iadd(line, s=ihooks): fadd('integer flag\n') fhooks[0] = fhooks[0] + fgetdims1 dms = range(1, int(dm['rank']) + 1) - fadd(' allocate(d(%s))\n' % - (','.join(['s(%s)' % i for i in dms]))) + alloc_args = ','.join(f's({i})' for i in dms) + fadd(f' allocate(d({alloc_args}))\n') fhooks[0] = fhooks[0] + use_fgetdims2 fadd(f'end subroutine {fargs[-1]}') else: @@ -190,66 +191,70 @@ def iadd(line, s=ihooks): if onlyvars: dadd('\\end{description}') if hasbody(m): + m_name = m['name'] for b in m['body']: + b_name = b['name'] if not isroutine(b): outmess("f90mod_rules.buildhooks:" - f" skipping {b['block']} {b['name']}\n") + f" skipping {b['block']} {b_name}\n") continue - modobjs.append(f"{b['name']}()") - b['modulename'] = m['name'] + modobjs.append(f"{b_name}()") + b['modulename'] = m_name api, wrap = rules.buildapi(b) if isfunction(b): fhooks[0] = fhooks[0] + wrap - fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + fargs.append(f"f2pywrap_{m_name}_{b_name}") ifargs.append(func2subr.createfuncwrapper(b, signature=1)) elif wrap: fhooks[0] = fhooks[0] + wrap - fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + fargs.append(f"f2pywrap_{m_name}_{b_name}") ifargs.append( func2subr.createsubrwrapper(b, signature=1)) else: - fargs.append(b['name']) + fargs.append(b_name) mfargs.append(fargs[-1]) api['externroutines'] = [] ar = applyrules(api, vrd) ar['docs'] = [] ar['docshort'] = [] ret = dictappend(ret, ar) - cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' - 'f2py_rout_#modulename#_%s_%s,' - 'doc_f2py_rout_#modulename#_%s_%s},') - % (b['name'], m['name'], b['name'], m['name'], b['name'])) - sargs.append(f"char *{b['name']}") + cadd(f'\t{{"{b_name}",-1,{{{{-1}}}},0,0,NULL,(void *)' + f'f2py_rout_#modulename#_{m_name}_{b_name},' + f'doc_f2py_rout_#modulename#_{m_name}_{b_name}}},') + sargs.append(f"char *{b_name}") sargsp.append('char *') - iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") + iadd(f"\tf2py_{m_name}_def[i_f2py++].data = {b_name};") cadd('\t{NULL}\n};\n') iadd('}') - ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( - m['name'], ','.join(sargs), ihooks[0]) - if '_' in m['name']: + m_name = m['name'] + sargs_str = ','.join(sargs) + ihooks[0] = (f'static void f2py_setup_{m_name}({sargs_str}) ' + f'{{\n\tint i_f2py=0;{ihooks[0]}') + if '_' in m_name: F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' - iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' - % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) - iadd('static void f2py_init_%s(void) {' % (m['name'])) - iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' - % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + sargsp_str = ','.join(sargsp) + iadd(f'extern void {F_FUNC}(f2pyinit{m_name},' + f'F2PYINIT{m_name.upper()})(void (*)({sargsp_str}));') + iadd(f'static void f2py_init_{m_name}(void) {{') + iadd(f'\t{F_FUNC}(f2pyinit{m_name},' + f'F2PYINIT{m_name.upper()})(f2py_setup_{m_name});') iadd('}\n') ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks ret['initf90modhooks'] = [ '\t{', - '\t\tPyObject *tmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' - % (m['name'], m['name']), - '\t\tPyDict_SetItemString(d, "%s", tmp);' % (m['name'],), + ('\t\tPyObject *tmp = ' + f'PyFortranObject_New(f2py_{m_name}_def,f2py_init_{m_name});'), + f'\t\tPyDict_SetItemString(d, "{m_name}", tmp);', '\t\tPy_XDECREF(tmp);', '\t}', ] + ret["initf90modhooks"] fadd('') - fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") + fadd(f"subroutine f2pyinit{m_name}(f2pysetupfunc)") if mfargs: for a in undo_rmbadname(mfargs): - fadd(f"use {m['name']}, only : {a}") + fadd(f"use {m_name}, only : {a}") if ifargs: fadd(' '.join(['interface'] + ifargs)) fadd('end interface') @@ -258,13 +263,14 @@ def iadd(line, s=ihooks): for a in undo_rmbadname(efargs): fadd(f'external {a}') fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") - fadd(f"end subroutine f2pyinit{m['name']}\n") + fadd(f"end subroutine f2pyinit{m_name}\n") dadd('\n'.join(ret['latexdoc']).replace( r'\subsection{', r'\subsubsection{')) ret['latexdoc'] = [] - ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") + modobjs_str = ','.join(undo_rmbadname(modobjs)) + ret['docs'].append(f"\"\t{m_name} --- {modobjs_str}\"") ret['routine_defs'] = '' ret['doc'] = [] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 09b67f7c3085..3484a2e78457 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -297,8 +297,8 @@ def assubr(rout): if isfunction_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( - name, fortranname)) + outmess('\t\tCreating wrapper for Fortran function ' + f'"{name}"("{fortranname}")...\n') rout = copy.copy(rout) fname = name rname = fname @@ -322,8 +322,8 @@ def assubr(rout): if issubroutine_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' - % (name, fortranname)) + outmess('\t\tCreating wrapper for Fortran subroutine ' + f'"{name}"("{fortranname}")...\n') rout = copy.copy(rout) return rout, createsubrwrapper(rout) return rout, '' diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 6ad941f98287..62d8c8af61f9 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1443,8 +1443,7 @@ def buildmodule(m, um): with open(fn, 'w') as f: f.write('.. -*- rest -*-\n') f.write('\n'.join(ar['restdoc'])) - outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % - (options['buildpath'], vrd['modulename'])) + outmess(f' ReST Documentation is saved to file "{fn}"\n') if options['dolatexdoc']: fn = os.path.join( options['buildpath'], vrd['modulename'] + 'module.tex') @@ -1458,8 +1457,7 @@ def buildmodule(m, um): f.write('\n'.join(ar['latexdoc'])) if 'shortlatex' not in options: f.write('\\end{document}') - outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % - (options['buildpath'], vrd['modulename'])) + outmess(f' Documentation is saved to file "{fn}"\n') if funcwrappers: wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) ret['fsrc'] = wn @@ -1527,8 +1525,10 @@ def buildapi(rout): var = rout['vars'] if ismoduleroutine(rout): - outmess(' Constructing wrapper function "%s.%s"...\n' % - (rout['modulename'], rout['name'])) + module_name = rout['modulename'] + name = rout['name'] + outmess(' Constructing wrapper function ' + f'"{module_name}.{name}"...\n') else: outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") # Routine diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 944b5ae6e084..220e092ebbad 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -161,7 +161,7 @@ def get_temp_module_name(): # Assume single-threaded, and the module dir usable only by this thread global _module_num get_module_dir() - name = "_test_ext_module_%d" % _module_num + name = f"_test_ext_module_{_module_num}" _module_num += 1 if name in sys.modules: # this should not be possible, but check anyway diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 1e06f6c01a39..1cc7df18fa71 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -47,11 +47,12 @@ def buildusevars(m, r): revmap = {} if 'map' in r: for k in r['map'].keys(): - if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( - r['map'][k], k, revmap[r['map'][k]])) + mapped_name = r['map'][k] + if mapped_name in revmap: + outmess(f'\t\t\tVariable "{mapped_name}<={k}" is already mapped by ' + f'"{revmap[mapped_name]}". Skipping.\n') else: - revmap[r['map'][k]] = k + revmap[mapped_name] = k if r.get('only'): for v in r['map'].keys(): if r['map'][v] in m['vars']: @@ -72,8 +73,8 @@ def buildusevars(m, r): def buildusevar(name, realname, vars, usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( - name, realname)) + outmess('\t\t\tConstructing wrapper function for variable ' + f'"{name}=>{realname}"...\n') ret = {} vrd = {'name': name, 'realname': realname, From 309700f69c17d28a993910f3fe19cdbdf4ba7124 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Wed, 4 Mar 2026 10:47:46 +0100 Subject: [PATCH 1444/1718] STY: Simplify f-string expressions --- numpy/_core/getlimits.py | 4 ++-- numpy/_core/tests/test_deprecations.py | 5 ++--- numpy/_core/tests/test_print.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index fe08bbda43cb..ccdc8c2b1b89 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -449,5 +449,5 @@ def __str__(self): return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} def __repr__(self): - return (f"{self.__class__.__name__}" - f"(min={self.min}, max={self.max}, dtype={self.dtype})") + name = self.__class__.__name__ + return f'{name}(min={self.min}, max={self.max}, dtype={self.dtype})' diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index f6aab148b840..e4172b2cc1c3 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -86,9 +86,8 @@ def assert_deprecated(self, function, num=1, ignore_others=False, if warning.category is self.warning_cls: num_found += 1 elif not ignore_others: - raise AssertionError( - f"expected {self.warning_cls.__name__} but got: " - f"{warning.category}") + name = self.warning_cls.__name__ + raise AssertionError(f"expected {name} but got: {warning.category}") if num is not None and num_found != num: msg = f"{len(w_context)} warnings found but {num} expected." lst = [str(w) for w in w_context] diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index cd5b076a29e8..c6203ec27559 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -180,7 +180,7 @@ def test_scalar_format(): f"failed with val {val}, type {valtype}") except ValueError as e: assert_(False, - f"format raised exception (fmt='{fmat}', val={val!r}, " + f"format raised exception (fmt='{fmat}', {val=}, " f"type={valtype!r}, exc='{e}')") From d9ae7c9c2116b13a362cce10c49e0f47703125d8 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:17:26 +0100 Subject: [PATCH 1445/1718] STY: Apply suggestions from code review Co-authored-by: Joren Hammudoglu --- numpy/_core/arrayprint.py | 2 +- numpy/_core/einsumfunc.py | 2 +- numpy/_core/tests/test_multiarray.py | 2 +- numpy/_core/tests/test_scalarmath.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 957687488829..76e03a10f0ef 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -1419,7 +1419,7 @@ def _format_non_nat(self, x): unit=self.unit, timezone=self.timezone, casting=self.casting) - return f"'{datetime_str}'" + return repr(datetime_str) class TimedeltaFormat(_TimelikeFormat): diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 3f06c6490f87..b44f0e56b951 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -801,7 +801,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): if dimension_dict[char] == 1: dimension_dict[char] = dim elif dim not in (1, dimension_dict[char]): - raise ValueError(f"Size of label '{char}' for " + raise ValueError(f"Size of label {char!r} for " f"operand {tnum} ({dimension_dict[char]}) " f"does not match previous terms ({dim}).") else: diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 35224b4e4aa3..b861b9de574b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5939,7 +5939,7 @@ def test_file_position_after_tofile(self, tmp_path, param_filename): tmp_filename = normalize_filename(tmp_path, param_filename) for size in sizes: - err_msg = f"{size}" + err_msg = str(size) with open(tmp_filename, 'wb') as f: f.seek(size - 1) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index f9ddb9b026fd..921d1d851273 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -69,8 +69,8 @@ def test_type_add(self): # skipped ahead based on the first argument, but that # does not produce properly symmetric results... assert_equal(c_scalar.dtype, c_array.dtype, - f"error with types ({k}/'{np.dtype(atype).char}' + " - f"{l}/'{np.dtype(btype).char}')") + f"error with types ({k}/{np.dtype(atype).char!r} + " + f"{l}/{np.dtype(btype).char!r})") def test_type_create(self): for atype in types: From 4d3b7b3c3cb4afa1887611fea2eb42229e0efdba Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:39:53 +0100 Subject: [PATCH 1446/1718] STY: Revert repr() usage --- numpy/_core/arrayprint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 76e03a10f0ef..957687488829 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -1419,7 +1419,7 @@ def _format_non_nat(self, x): unit=self.unit, timezone=self.timezone, casting=self.casting) - return repr(datetime_str) + return f"'{datetime_str}'" class TimedeltaFormat(_TimelikeFormat): From fcbb4c0c6e1da10d8317290b23534a60c624398e Mon Sep 17 00:00:00 2001 From: Antareep Sarkar Date: Wed, 4 Mar 2026 16:28:40 +0530 Subject: [PATCH 1447/1718] BUG: fix uint input for triu_indices and deprecate non-int (#30869) resolves #29488 Does not raise an error when any or all of of n, m and k are unsigned integers by converting them to integers. If inputs do not convert via `operator.index()` give a DeprecationWarning. --- doc/release/upcoming_changes/30869.change.rst | 3 ++ .../upcoming_changes/30869.deprecation.rst | 7 +++ numpy/_core/tests/test_deprecations.py | 27 ++++++++++++ numpy/lib/_twodim_base_impl.py | 43 ++++++++++++++++++- numpy/lib/tests/test_twodim_base.py | 5 +++ numpy/tests/test_warnings.py | 6 ++- 6 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 doc/release/upcoming_changes/30869.change.rst create mode 100644 doc/release/upcoming_changes/30869.deprecation.rst diff --git a/doc/release/upcoming_changes/30869.change.rst b/doc/release/upcoming_changes/30869.change.rst new file mode 100644 index 000000000000..b7ba697aaa7f --- /dev/null +++ b/doc/release/upcoming_changes/30869.change.rst @@ -0,0 +1,3 @@ +``numpy.triu_indices`` now accepts ``unsigned integers`` +-------------------------------------------------------- +``numpy.triu_indices`` previously used to error in some cases when ``unsigned integers`` were given as arguments. Now, it accepts them in all cases. diff --git a/doc/release/upcoming_changes/30869.deprecation.rst b/doc/release/upcoming_changes/30869.deprecation.rst new file mode 100644 index 000000000000..f193353cb737 --- /dev/null +++ b/doc/release/upcoming_changes/30869.deprecation.rst @@ -0,0 +1,7 @@ +Inputs other than ``integers`` is deprecated +-------------------------------------------- +Inputs other than integers is deprecated for ``numpy.triu_indices`` and ``numpy.tril_indices``. + +The ``M``, ``k`` and ``N`` parameters of ``numpy.tri`` also deprecate non-integer arguments. + +The ``k`` parameter of both ``numpy.tril_indices_from`` and ``numpy.triu_indices_from`` deprecates non-integer arguments. diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index b7a2444fbbc0..fc1ae9d56fc0 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -365,3 +365,30 @@ def test_round_emits_deprecation_warning_array(self): def test_round_emits_deprecation_warning_scalar(self): self.assert_deprecated(lambda: np.ma.round_(3.14)) + +class TestTriDeprecationWithNonInteger(_DeprecationTestCase): + # Deprecation in NumPy 2.5, 2026-03 + + def test_tri(self): + self.assert_deprecated(lambda: np.tri(M=2.3, k=3.14, N=np.object_(8))) + + def test_triu_indices(self): + self.assert_deprecated(lambda: np.triu_indices(n=np.float64(7.14), k=3.2)) + self.assert_deprecated(lambda: np.triu_indices(n=4, k=np.bool(0))) + + def test_tril_indices(self): + self.assert_deprecated(lambda: np.tril_indices(n=np.array(3.14))) + + def test_triu_indices_from(self): + a = np.array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + self.assert_deprecated(lambda: np.triu_indices_from(a, k=np.object_(9.8))) + + def test_tril_indices_from(self): + a = np.array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + self.assert_deprecated(lambda: np.tril_indices_from(a, k=9.8)) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index f92bfe9ce104..2426134c4083 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -3,6 +3,8 @@ """ import functools import operator +import os +import warnings from numpy._core import iinfo, overrides from numpy._core._multiarray_umath import _array_converter @@ -385,7 +387,6 @@ def diagflat(v, k=0): return conv.wrap(res) - @finalize_array_function_like @set_module('numpy') def tri(N, M=None, k=0, dtype=float, *, like=None): @@ -429,11 +430,28 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): [1., 1., 0., 0., 0.]]) """ + + warning_for_type = None + try: + N = operator.index(N) + except TypeError: + warning_for_type = warning_for_type or type(N) + if like is not None: return _tri_with_like(like, N, M=M, k=k, dtype=dtype) if M is None: M = N + else: + try: + M = operator.index(M) + except TypeError: + warning_for_type = warning_for_type or type(M) + + try: + k = operator.index(k) + except TypeError: + warning_for_type = warning_for_type or type(k) m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), arange(-k, M - k, dtype=_min_int(-k, M - k))) @@ -441,6 +459,15 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): # Avoid making a copy if the requested type is already bool m = m.astype(dtype, copy=False) + # Deprecation in NumPy 2.5, 2026-03 + if warning_for_type: + warnings.warn( + (f"Cannot convert {(warning_for_type).__name__} safely to an integer." + "This will raise an error in future versions (Deprecated NumPy 2.5)"), + DeprecationWarning, + skip_file_prefixes=(os.path.dirname(__file__),), + ) + return m @@ -1132,6 +1159,20 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ + + try: + k = operator.index(k) + except TypeError: + # If same instance,then warning will be given in tri + if not isinstance(k, type(k - 1)): + # Deprecated in NumPy 2.5, 2026-03 + warnings.warn( + (f"Cannot convert {type(k).__name__} safely to an integer." + "This will raise an error in future versions (Deprecated NumPy 2.5)"), + DeprecationWarning, + skip_file_prefixes=(os.path.dirname(__file__),), + ) + tri_ = ~tri(n, m, k=k - 1, dtype=bool) return tuple(broadcast_to(inds, tri_.shape)[tri_] diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index eb6aa69a443c..43ef45ad5d48 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -467,6 +467,7 @@ def test_triu_indices(self): iu2 = triu_indices(4, k=2) iu3 = triu_indices(4, m=5) iu4 = triu_indices(4, k=2, m=5) + iu5 = triu_indices(np.uint64(4), m=np.uint8(4)) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], @@ -510,6 +511,10 @@ def test_triu_indices(self): [11, 12, -1, -1, -10], [16, 17, 18, -1, -1]])) + # For unsigned integer + assert_array_equal(iu5, + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), + array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))) class TestTrilIndicesFrom: def test_exceptions(self): diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 7efa2a1d1896..150327e85c61 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -47,14 +47,16 @@ def visit_Call(self, node): # This file return - # See if stacklevel exists: + # See if stacklevel or skip_file_prefixes exists: if len(node.args) == 3: return args = {kw.arg for kw in node.keywords} if "stacklevel" in args: return + if "skip_file_prefixes" in args: + return raise AssertionError( - "warnings should have an appropriate stacklevel; " + "warnings should have an appropriate stacklevel or skip_file_prefixes; " f"found in {self.__filename} on line {node.lineno}") From 6cd2cb2e4abb6105307d88c1099f8116344e8052 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 4 Mar 2026 12:06:02 +0100 Subject: [PATCH 1448/1718] MAINT: Refactor PyUFunc_GenericReduction (#30929) Extract the code to prepare the axis into a helper method. This is in preparation of PRs to improve performance numpy ufuncs. --- numpy/_core/src/umath/ufunc_object.c | 131 +++++++++++++++------------ 1 file changed, 71 insertions(+), 60 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index c4c5907e4cda..6c575752a6bc 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -3427,6 +3427,72 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) return 0; } +static inline int +/* Convert the 'axis' parameter into a list of axes */ +_parse_axis(PyObject *axes_obj, int ndim, int *axes) +{ + int naxes = 0; + if (axes_obj == NULL) { + /* apply defaults */ + if (ndim == 0) { + naxes = 0; + } + else { + naxes = 1; + axes[0] = 0; + } + } + else if (axes_obj == Py_None) { + /* Convert 'None' into all the axes */ + naxes = ndim; + for (int i = 0; i < naxes; ++i) { + axes[i] = i; + } + } + else if (PyTuple_Check(axes_obj)) { + naxes = PyTuple_Size(axes_obj); + if (naxes < 0 || naxes > NPY_MAXDIMS) { + PyErr_SetString(PyExc_ValueError, + "too many values for 'axis'"); + return -1; + } + for (int i = 0; i < naxes; ++i) { + PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i); + int axis = PyArray_PyIntAsInt(tmp); + if (error_converting(axis)) { + return -1; + } + if (check_and_adjust_axis(&axis, ndim) < 0) { + return -1; + } + axes[i] = (int)axis; + } + } + else { + /* Try to interpret axis as an integer */ + int axis = PyArray_PyIntAsInt(axes_obj); + /* TODO: PyNumber_Index would be good to use here */ + if (error_converting(axis)) { + return -1; + } + /* + * As a special case for backwards compatibility in 'sum', + * 'prod', et al, also allow a reduction for scalars even + * though this is technically incorrect. + */ + if (ndim == 0 && (axis == 0 || axis == -1)) { + naxes = 0; + } + else if (check_and_adjust_axis(&axis, ndim) < 0) { + return -1; + } + else { + axes[0] = (int)axis; + naxes = 1; + } + } + return naxes; +} /* forward declaration */ static PyArray_DTypeMeta * _get_dtype(PyObject *dtype_obj); @@ -3440,7 +3506,7 @@ static PyObject * PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, int operation) { - int i, naxes=0, ndim; + int ndim; int axes[NPY_MAXDIMS]; ufunc_full_args full_args = {NULL, NULL}; @@ -3625,65 +3691,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, ndim = PyArray_NDIM(mp); - /* Convert the 'axis' parameter into a list of axes */ - if (axes_obj == NULL) { - /* apply defaults */ - if (ndim == 0) { - naxes = 0; - } - else { - naxes = 1; - axes[0] = 0; - } - } - else if (axes_obj == Py_None) { - /* Convert 'None' into all the axes */ - naxes = ndim; - for (i = 0; i < naxes; ++i) { - axes[i] = i; - } - } - else if (PyTuple_Check(axes_obj)) { - naxes = PyTuple_Size(axes_obj); - if (naxes < 0 || naxes > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "too many values for 'axis'"); - goto fail; - } - for (i = 0; i < naxes; ++i) { - PyObject *tmp = PyTuple_GET_ITEM(axes_obj, i); - int axis = PyArray_PyIntAsInt(tmp); - if (error_converting(axis)) { - goto fail; - } - if (check_and_adjust_axis(&axis, ndim) < 0) { - goto fail; - } - axes[i] = (int)axis; - } - } - else { - /* Try to interpret axis as an integer */ - int axis = PyArray_PyIntAsInt(axes_obj); - /* TODO: PyNumber_Index would be good to use here */ - if (error_converting(axis)) { - goto fail; - } - /* - * As a special case for backwards compatibility in 'sum', - * 'prod', et al, also allow a reduction for scalars even - * though this is technically incorrect. - */ - if (ndim == 0 && (axis == 0 || axis == -1)) { - naxes = 0; - } - else if (check_and_adjust_axis(&axis, ndim) < 0) { - goto fail; - } - else { - axes[0] = (int)axis; - naxes = 1; - } + /* Extract the axis argument */ + int naxes = _parse_axis(axes_obj, ndim, axes); + if (naxes < 0) { + goto fail; } switch(operation) { From 45f7d0df718477f201579bd793e41853501b165d Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 4 Mar 2026 12:14:12 +0100 Subject: [PATCH 1449/1718] ENH: Replace PyTyple_Pack with PyTuple_FromArray (#30928) --- numpy/_core/src/umath/ufunc_object.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6c575752a6bc..9c0dce55d1bf 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1384,7 +1384,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, Py_INCREF(op_axes_tuple); } else if (op_ncore == 1) { - op_axes_tuple = PyTuple_Pack(1, op_axes_tuple); + op_axes_tuple = PyTuple_FromArray(&op_axes_tuple, 1); if (op_axes_tuple == NULL) { return -1; } @@ -3412,7 +3412,7 @@ _set_full_args_out(int nout, PyObject *out_obj, ufunc_full_args *full_args) return 0; } /* Can be an array if it only has one output */ - full_args->out = PyTuple_Pack(1, out_obj); + full_args->out = PyTuple_FromArray(&out_obj, 1); if (full_args->out == NULL) { return -1; } @@ -3568,7 +3568,8 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } /* Prepare inputs for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(2, op, indices_obj); + PyObject *reduce_in[] = {op, indices_obj}; + full_args.in = PyTuple_FromArray(reduce_in, 2); if (full_args.in == NULL) { goto fail; } @@ -3586,7 +3587,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } /* Prepare input for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(1, op); + full_args.in = PyTuple_FromArray(&op, 1); if (full_args.in == NULL) { goto fail; } @@ -3607,7 +3608,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } /* Prepare input for PyUfunc_CheckOverride */ - full_args.in = PyTuple_Pack(1, op); + full_args.in = PyTuple_FromArray(&op, 1); if (full_args.in == NULL) { goto fail; } @@ -3623,7 +3624,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, goto fail; } if (out_obj != Py_None) { - full_args.out = PyTuple_Pack(1, out_obj); + full_args.out = PyTuple_FromArray(&out_obj, 1); if (full_args.out == NULL) { goto fail; } @@ -6433,7 +6434,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, ((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i]; } - result = PyTuple_Pack(2, result_dtype_tuple, capsule); + PyObject *result_items[] = {result_dtype_tuple, capsule}; + result = PyTuple_FromArray(result_items, 2); /* cleanup and return */ Py_DECREF(capsule); From 70e82ab5165c94e2c74469bf2f213fbf6e8572de Mon Sep 17 00:00:00 2001 From: Brett Graham Date: Wed, 4 Mar 2026 13:04:03 -0500 Subject: [PATCH 1450/1718] BUG: Fix tracemalloc tracking of allocations (#30902) Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/alloc.cpp | 98 ++++++++++++++++++++++++---- numpy/_core/tests/test_multiarray.py | 17 ++++- 2 files changed, 100 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.cpp index ca2027403b96..b3e73211f6a2 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.cpp @@ -294,7 +294,15 @@ PyDataMem_NEW(size_t size) assert(size != 0); result = PyMem_RawMalloc(size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (result == NULL) { + return NULL; + } + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { PyMem_RawFree(result); return NULL; @@ -311,7 +319,15 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) void *result; result = PyMem_RawCalloc(nmemb, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (result == NULL) { + return NULL; + } + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); if (ret == -1) { PyMem_RawFree(result); return NULL; @@ -338,9 +354,19 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + result = PyMem_RawRealloc(ptr, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (result == NULL) { + // ptr is still valid here + return NULL; + } + int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { PyMem_RawFree(result); return NULL; @@ -354,7 +380,16 @@ PyDataMem_RENEW(void *ptr, size_t size) static inline void * default_malloc(void *NPY_UNUSED(ctx), size_t size) { - return _npy_alloc_cache(size, 1, NBUCKETS, datacache, &PyMem_RawMalloc); + void *result; + result = _npy_alloc_cache(size, 1, NBUCKETS, datacache, &PyMem_RawMalloc); + if (result == NULL) { + // alloc failed, nothing more to do + return NULL; + } + // untrack the allocation from tracemalloc + // ignore return value, since we'd early return either way + PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + return result; } // The default data mem allocator calloc routine does not make use of a ctx. @@ -367,15 +402,23 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) size_t sz = nelem * elsize; if (sz < NBUCKETS) { p = _npy_alloc_cache(sz, 1, NBUCKETS, datacache, &PyMem_RawMalloc); - if (p) { - memset(p, 0, sz); + if (p == NULL) { + return NULL; + } + memset(p, 0, sz); + } else { + p = PyMem_RawCalloc(nelem, elsize); + if (p == NULL) { + return NULL; } - return p; - } - p = PyMem_RawCalloc(nelem, elsize); - if (p) { indicate_hugepages(p, sz); } + // untrack the allocation from tracemalloc + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)p); + if (ret == -2) { + // tracemalloc is disabled + return p; + } return p; } @@ -385,7 +428,19 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) static inline void * default_realloc(void *NPY_UNUSED(ctx), void *ptr, size_t new_size) { - return PyMem_RawRealloc(ptr, new_size); + void *result; + result = PyMem_RawRealloc(ptr, new_size); + if (result == NULL) { + // realloc failed, nothing more to do + return NULL; + } + // untrack the reallocation from tracemalloc + int ret = PyTraceMalloc_Untrack(PYMEM_DOMAIN_RAW, (npy_uintp)result); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + return result; } // The default data mem allocator free routine does not make use of a ctx. @@ -428,6 +483,9 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); + if (result == NULL) { + return NULL; + } int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); @@ -446,6 +504,9 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); + if (result == NULL) { + return NULL; + } int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); @@ -465,6 +526,7 @@ PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler) "Could not get pointer to 'mem_handler' from PyCapsule"); return; } + // ignore -2 return when tracemalloc is disabled PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); handler->allocator.free(handler->allocator.ctx, ptr, size); } @@ -480,9 +542,17 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (result == NULL) { + // ptr is still valid here + return NULL; + } + int ret = PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + if (ret == -2) { + // tracemalloc is disabled + return result; + } + ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); if (ret == -1) { handler->allocator.free(handler->allocator.ctx, result, size); return NULL; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 1b76c7100b7f..e485f4578910 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -15,9 +15,10 @@ import re import sys import tempfile +import tracemalloc import warnings import weakref -from contextlib import contextmanager +from contextlib import ExitStack, contextmanager # Need to test an object that does not fully implement math interface from datetime import datetime, timedelta @@ -1040,6 +1041,20 @@ def test_malloc_fails(self): with assert_raises(np._core._exceptions._ArrayMemoryError): np.empty(np.iinfo(np.intp).max, dtype=np.uint8) + @pytest.mark.thread_unsafe(reason="tracemalloc is not thread-safe") + def test_tracemalloc(self): + with ExitStack() as ctx: + if not tracemalloc.is_tracing(): + tracemalloc.start() + ctx.callback(tracemalloc.stop) + pre_snapshot = tracemalloc.take_snapshot() + arr = np.zeros(1000000, dtype="uint8") + post_snapshot = tracemalloc.take_snapshot() + diff = post_snapshot.compare_to(pre_snapshot, "filename") + allocated_bytes = sum(d.size_diff for d in diff) + # Allow for some non-data allocations + assert_allclose(allocated_bytes, arr.nbytes, 1000) + def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: From ca46bafdef4f6b993f560b64ec7cd2da33198c05 Mon Sep 17 00:00:00 2001 From: mattip Date: Wed, 4 Mar 2026 20:45:11 +0200 Subject: [PATCH 1451/1718] DOC: add AI policy [skip actions][skip cirrus] --- doc/source/dev/ai_policy.rst | 79 ++++++++++++++++++++++++++++++++++++ doc/source/dev/index.rst | 1 + 2 files changed, 80 insertions(+) create mode 100644 doc/source/dev/ai_policy.rst diff --git a/doc/source/dev/ai_policy.rst b/doc/source/dev/ai_policy.rst new file mode 100644 index 000000000000..7be4724ec535 --- /dev/null +++ b/doc/source/dev/ai_policy.rst @@ -0,0 +1,79 @@ +.. _ai_policy: + +AI Policy +========= + +"AI" herein refers to generative AI tools like large language models (LLMs) +that can generate, edit, and review software code, create and manipulate +images, or generate human-like communication. + +Responsibility +-------------- + +You are responsible for any code you submit to NumPy's repositories, regardless +of whether it was manually written or generated by AI. You must understand and be able +to explain the code you submit as well as the existing related code. It is not +acceptable to submit a patch that you cannot understand and explain yourself. +In explaining your contribution, do not use AI to automatically generate +descriptions. + +Disclosure +---------- + +You must disclose whether AI has been used to assist in the development of +your pull request. +If so, you must document which tool(s) have been used, how they were used, +and specify what code or text is AI generated. We will reject any pull request +that does not include the disclosure. + +Code Quality +------------ + +Contributors are expected to submit code that meets NumPy's standards. We will +reject pull requests that we deem being "`AI slop`_". Do not waste developers' +time by submitting code that is fully or mostly generated by AI, and doesn't +meet our standards. + +.. _AI slop: https://en.wikipedia.org/wiki/AI_slop + +Copyright +--------- + +All code in NumPy is released under the BSD 3-clause copyright license. +Contributors to NumPy license their code under the same license when it is +included into NumPy's version control repository. That means contributors must +own the copyright of any code submitted to NumPy or must include the BSD +3-clause compatible open source license(s) associated with the submitted code +in the patch. Code generated by AI may infringe on copyright and it is the +submitter's responsibility to not infringe. We reserve the right to reject any pull +requests, AI generated or not, where the copyright is in question. + +Communication +------------- + +When interacting with developers (forum, discussions, +issues, pull requests, etc.) do not use AI to speak for you, except for +translation or grammar editing. If the developers want to chat with a chatbot, +they can do so themselves. Human-to-human communication is essential for an +open source community to thrive. + +AI Agents +--------- +The use of an AI agent that writes code and then submits a pull request autonomously is +not permitted. A human must check any generated code and submit a pull request according +to the 'Responsibility' section above. + +Other Resources +--------------- +While these do not formally form part of NumPy's AI policy, the following resources +may be helpful in understanding some pitfalls associated with using AI to contribute to +NumPy: + +- https://llvm.org/docs/AIToolPolicy.html +- https://github.com/melissawm/open-source-ai-contribution-policies +- https://blog.scientific-python.org/scientific-python/community-considerations-around-ai/ + +Acknowledgements +---------------- +We thank the SciPy developers for their AI policy, upon which this document is largely +based. diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 625e69712030..bee0bf746ab4 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -254,6 +254,7 @@ The rest of the story .. toctree:: :maxdepth: 2 + ai_policy development_environment spin howto_build_docs From eab8434ab17139e1085a404439bc9817a78b773c Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Wed, 4 Mar 2026 19:26:37 -0700 Subject: [PATCH 1452/1718] CI: Checksum Intel downloads --- .github/workflows/linux_simd.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 46feb4abb3e4..959ee6da041a 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -197,7 +197,10 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz + SDE_URL="https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz" + SDE_SHA256="f849acecad4c9b108259c643b2688fd65c35723cd23368abe5dd64b917cc18c0" + curl -o /tmp/sde.tar.xz "$SDE_URL" + echo "$SDE_SHA256 /tmp/sde.tar.xz" | sha256sum -c - mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde @@ -247,7 +250,10 @@ jobs: - name: Install Intel SDE run: | - curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz + SDE_URL="https://downloadmirror.intel.com/859732/sde-external-9.58.0-2025-06-16-lin.tar.xz" + SDE_SHA256="f849acecad4c9b108259c643b2688fd65c35723cd23368abe5dd64b917cc18c0" + curl -o /tmp/sde.tar.xz "$SDE_URL" + echo "$SDE_SHA256 /tmp/sde.tar.xz" | sha256sum -c - mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde From c635bc8ac885da029ef293559e93d1b628732b89 Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Wed, 4 Mar 2026 19:33:53 -0700 Subject: [PATCH 1453/1718] CI: Pin versions for unpinned CI downloads --- .github/workflows/compiler_sanitizers.yml | 2 +- .github/workflows/linux_qemu.yml | 4 +++- .github/workflows/mypy_primer.yml | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 013b1a3b7831..138834d0ceb2 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -38,7 +38,7 @@ jobs: persist-credentials: false - name: Set up pyenv run: | - git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + git clone --branch v2.6.25 --depth 1 https://github.com/pyenv/pyenv.git "$HOME/.pyenv" PYENV_ROOT="$HOME/.pyenv" PYENV_BIN="$PYENV_ROOT/bin" PYENV_SHIMS="$PYENV_ROOT/shims" diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 318f5591c2ac..7dff21dcb782 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -110,7 +110,9 @@ jobs: -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && apt install -y cmake git curl ca-certificates && - curl -LsSf https://astral.sh/uv/install.sh | sh && + curl -LsSf https://astral.sh/uv/0.10.8/install.sh -o /tmp/uv-install.sh && + echo 'eae5e1dae89cd0b74d357f549ccd6faa94b2ad6c1d89d78972a625655a4556ae /tmp/uv-install.sh' | sha256sum -c - && + sh /tmp/uv-install.sh && export PATH="/root/.local/bin:$PATH" && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu && diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 852f36a966ea..0bf64fe1e9ef 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -36,7 +36,7 @@ jobs: with: python-version: "3.12" - name: Install dependencies - run: pip install git+https://github.com/hauntsaninja/mypy_primer.git + run: pip install git+https://github.com/hauntsaninja/mypy_primer.git@05f73ec3d85bb4f55676f3c57f2c3e5136228977 # HEAD of master on 2026-03-04, no tags/releases available - name: Run mypy_primer shell: bash run: | From bd80fa4ed4f4e4c6b554bd1a9c28d8e3188c8021 Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Wed, 4 Mar 2026 19:46:20 -0700 Subject: [PATCH 1454/1718] CI: Checksum versioned CI containers --- .github/workflows/compiler_sanitizers.yml | 4 ++-- .github/workflows/linux_qemu.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 138834d0ceb2..5a0d9d36879b 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -83,7 +83,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest container: - image: ghcr.io/nascheme/numpy-tsan:3.14t + image: ghcr.io/nascheme/numpy-tsan:3.14t@sha256:1ec427e2e480cc373d0fecbf21b8ac590fb94119fb81d18489945cd0afd04dd3 options: --shm-size=2g # increase memory for large matrix ops steps: @@ -114,7 +114,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest container: - image: ghcr.io/nascheme/cpython-asan:3.14 + image: ghcr.io/nascheme/cpython-asan:3.14@sha256:b5bfbcdca07e86d22afaf66e3b57959e1c44756452dcabda1efde7711fd0bdde options: --shm-size=2g # increase memory for large matrix ops steps: diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 7dff21dcb782..7d0ec754620d 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -89,7 +89,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | # see https://hub.docker.com/r/tonistiigi/binfmt for available versions - docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all + docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52@sha256:1b804311fe87047a4c96d38b4b3ef6f62fca8cd125265917a9e3dc3c996c39e6 --install all - name: Install GCC cross-compilers run: | @@ -177,7 +177,7 @@ jobs: - [ "loongarch64", "loongarch64-linux-gnu", - "cnclarechen/numpy-loong64-debian:v1", + "cnclarechen/numpy-loong64-debian:v1@sha256:1f35e614955fa9cba890172a73a068120f25f1cfcd59ad5521995e37cb7e2c3f", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", "loong64" @@ -199,7 +199,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56 --install all + docker run --rm --privileged tonistiigi/binfmt:qemu-v10.0.4-56@sha256:30cc9a4d03765acac9be2ed0afc23af1ad018aed2c28ea4be8c2eb9afe03fbd1 --install all - name: Install GCC cross-compilers run: | From d92083a4590ace4ac838f16e035fa209e8295c8d Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Wed, 4 Mar 2026 19:57:18 -0700 Subject: [PATCH 1455/1718] CI: Inline PR labeling logic to avoid 3rd party GITHUB_TOKEN access --- .github/workflows/labeler.yml | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 7d2edc869893..be3e2a925a72 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -7,13 +7,34 @@ permissions: {} jobs: pr-labeler: + if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest permissions: pull-requests: write # to add labels steps: - name: Label the PR - uses: gerrymanoim/pr-prefix-labeler@c8062327f6de59a9ae1c19f7f07cacd0b976b6fa # v3 continue-on-error: true - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: github.repository == 'numpy/numpy' + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const yaml = require('js-yaml'); + const {data} = await github.rest.repos.getContent({ + owner: context.repo.owner, + repo: context.repo.repo, + path: '.github/pr-prefix-labeler.yml', + }); + const prefixToLabel = yaml.load( + Buffer.from(data.content, data.encoding).toString() + ); + const title = context.payload.pull_request.title; + for (const [prefix, label] of Object.entries(prefixToLabel)) { + if (title.startsWith(prefix)) { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + labels: [label], + }); + break; + } + } From b0f7520fe8b2b7abfd3c74cb689a30bafac848ef Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 5 Mar 2026 12:07:05 +0100 Subject: [PATCH 1456/1718] MAINT: Don't use vulture 2.15, it has false positives --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 7522638e01ad..a2b5a82e59bc 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -357,7 +357,7 @@ jobs: run: | pip install -r requirements/build_requirements.txt pip install -r requirements/test_requirements.txt - pip install vulture + pip install "vulture!=2.15" - name: Build and install NumPy run: | # Install using the fastest way to build (no BLAS, no SIMD) From c3b0e2a409497640a37d62b9a7b7c9334e74355f Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Thu, 5 Mar 2026 13:43:52 +0200 Subject: [PATCH 1457/1718] DOC: grammar [skip actions][skip cirrus] Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- doc/source/dev/ai_policy.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/dev/ai_policy.rst b/doc/source/dev/ai_policy.rst index 7be4724ec535..7675af5bfab0 100644 --- a/doc/source/dev/ai_policy.rst +++ b/doc/source/dev/ai_policy.rst @@ -30,7 +30,7 @@ Code Quality ------------ Contributors are expected to submit code that meets NumPy's standards. We will -reject pull requests that we deem being "`AI slop`_". Do not waste developers' +reject pull requests that we deem to be "`AI slop`_". Do not waste developers' time by submitting code that is fully or mostly generated by AI, and doesn't meet our standards. @@ -41,17 +41,17 @@ Copyright All code in NumPy is released under the BSD 3-clause copyright license. Contributors to NumPy license their code under the same license when it is -included into NumPy's version control repository. That means contributors must +included in NumPy's version control repository. That means contributors must own the copyright of any code submitted to NumPy or must include the BSD 3-clause compatible open source license(s) associated with the submitted code in the patch. Code generated by AI may infringe on copyright and it is the submitter's responsibility to not infringe. We reserve the right to reject any pull -requests, AI generated or not, where the copyright is in question. +requests, AI-generated or not, where the copyright is in question. Communication ------------- -When interacting with developers (forum, discussions, +When interacting with developers (forums, discussions, issues, pull requests, etc.) do not use AI to speak for you, except for translation or grammar editing. If the developers want to chat with a chatbot, they can do so themselves. Human-to-human communication is essential for an From ba839c36099a80db167bb9210a8e31f74a7b2706 Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Thu, 5 Mar 2026 09:09:59 -0700 Subject: [PATCH 1458/1718] CI: Add instructions for updating links and checksums for downloads --- .github/workflows/compiler_sanitizers.yml | 4 ++++ .github/workflows/linux_qemu.yml | 6 +++++- .github/workflows/linux_simd.yml | 4 ++++ .github/workflows/mypy_primer.yml | 3 ++- 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 5a0d9d36879b..c032f145a9bc 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -1,3 +1,7 @@ +# To update pinned container digests and pyenv version (not handled by Dependabot): +# Containers: change tag and get new digest with +# docker pull : && docker inspect --format='{{index .RepoDigests 0}}' : +# pyenv: see https://github.com/pyenv/pyenv/releases name: Test with compiler sanitizers on: diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 7d0ec754620d..f22d71200e4e 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -1,3 +1,8 @@ +# To update pinned container digests and uv version: not handled by Dependabot. +# Containers: change tag and get new digest with +# docker pull : && docker inspect --format='{{index .RepoDigests 0}}' : +# uv: change version in URL and update checksum (curl -sL | sha256sum) +# # Meson's Python module doesn't support crosscompiling, # and python dependencies may be another potential hurdle. # There might also be a need to run runtime tests during configure time. @@ -88,7 +93,6 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - # see https://hub.docker.com/r/tonistiigi/binfmt for available versions docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52@sha256:1b804311fe87047a4c96d38b4b3ef6f62fca8cd125265917a9e3dc3c996c39e6 --install all - name: Install GCC cross-compilers diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 959ee6da041a..b89a867187e0 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -1,5 +1,9 @@ name: Linux SIMD tests +# To update Intel SDE (not handled by Dependabot): download new version from +# https://www.intel.com/content/www/us/en/developer/articles/tool/software-development-emulator.html +# and update SDE_URL and SDE_SHA256 (curl -sL | sha256sum) +# # This file is meant for testing different SIMD-related build options and # optimization levels. See `meson_options.txt` for the available build options. # diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 0bf64fe1e9ef..6e46dea579c3 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -36,7 +36,8 @@ jobs: with: python-version: "3.12" - name: Install dependencies - run: pip install git+https://github.com/hauntsaninja/mypy_primer.git@05f73ec3d85bb4f55676f3c57f2c3e5136228977 # HEAD of master on 2026-03-04, no tags/releases available + # To update: replace commit hash (no tags/releases available, use HEAD of master) + run: pip install git+https://github.com/hauntsaninja/mypy_primer.git@05f73ec3d85bb4f55676f3c57f2c3e5136228977 # HEAD of master on 2026-03-04 - name: Run mypy_primer shell: bash run: | From 55c271465dea6e54e454c4e458fe7b2ed63a4b7f Mon Sep 17 00:00:00 2001 From: Scott Shambaugh Date: Thu, 5 Mar 2026 09:10:43 -0700 Subject: [PATCH 1459/1718] CI: Fix dependabot for github actions --- .github/dependabot.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 76e3f31a96e2..ceebf268351d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -12,7 +12,6 @@ updates: - "03 - Maintenance" ignore: - dependency-name: "bus1/cabuild" -updates: - package-ecosystem: pip directory: /requirements schedule: From 22ac8118198ad9f222b7ba9b976ab6fc05006a0c Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 5 Mar 2026 19:12:17 +0100 Subject: [PATCH 1460/1718] MAINT: bump ``ruff`` to ``0.15.4`` --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index cffefb4b0183..9c17eea462f0 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.2 +ruff==0.15.4 GitPython>=3.1.30 spin From ea86506c1d600f6d10f73b75c1761dca80dd6d14 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 5 Mar 2026 19:12:44 +0100 Subject: [PATCH 1461/1718] STY: remove unused ``# noqa`` comment --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 113ea5e010fa..b1bca4b8b229 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1084,7 +1084,7 @@ class _DTypeMeta(type): def _legacy(cls, /) -> bool: ... @final -class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): # noqa: UP046 +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): names: tuple[py_str, ...] | None def __hash__(self) -> int: ... From 74d952694d933d62d32a4191ce23fe9335676136 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 5 Mar 2026 19:22:21 +0100 Subject: [PATCH 1462/1718] MAINT: bump ``pyrefly`` to ``0.55.0`` --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 5b5d3c5b4cab..5986dcc2b859 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.19.1 -pyrefly==0.53.0 +pyrefly==0.55.0 From f7535a582ad11712029c4815385eacb3fc6f4f45 Mon Sep 17 00:00:00 2001 From: david-cortes Date: Thu, 5 Mar 2026 20:28:02 +0100 Subject: [PATCH 1463/1718] DOC: Fix incorrect variable name in example (#30946) --- doc/source/user/basics.interoperability.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index ba72c2cb6a52..6fff1e5ea037 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -472,7 +472,7 @@ Convert a PyTorch CPU tensor to NumPy array: The imported arrays are read-only so writing or operating in-place will fail: - >>> x.flags.writeable + >>> x_np.flags.writeable False >>> x_np[1] = 1 Traceback (most recent call last): From a2689ab97eb182283c6fb17e74576c961aae1aa3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:24:54 +0000 Subject: [PATCH 1464/1718] MAINT: Bump actions/upload-artifact from 6.0.0 to 7.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 6.0.0 to 7.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/b7c566a772e6b6bfb58ed0dc250532a479d7789f...bbbca2ddaa5d8feaa63e36b76fdaad77386f024f) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index eebb6de405ab..7ac71a9802e7 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -68,7 +68,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 6e46dea579c3..d649bca93a50 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -76,7 +76,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -84,7 +84,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -98,7 +98,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact/merge@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 02a4614c2177..c25ab0ab98a5 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index bb15b79da4e8..a276b57db8fe 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -103,7 +103,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From 10d43067bdb0a66061c2501e72381d2cf63e242a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:25:15 +0000 Subject: [PATCH 1465/1718] MAINT: Bump prefix-dev/setup-pixi from 0.9.3 to 0.9.4 Bumps [prefix-dev/setup-pixi](https://github.com/prefix-dev/setup-pixi) from 0.9.3 to 0.9.4. - [Release notes](https://github.com/prefix-dev/setup-pixi/releases) - [Commits](https://github.com/prefix-dev/setup-pixi/compare/82d477f15f3a381dbcc8adc1206ce643fe110fb7...a0af7a228712d6121d37aba47adf55c1332c9c2e) --- updated-dependencies: - dependency-name: prefix-dev/setup-pixi dependency-version: 0.9.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/pixi-packages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index 400af28084e9..806083b73459 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -30,7 +30,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: prefix-dev/setup-pixi@82d477f15f3a381dbcc8adc1206ce643fe110fb7 # v0.9.3 + - uses: prefix-dev/setup-pixi@a0af7a228712d6121d37aba47adf55c1332c9c2e # v0.9.4 with: pixi-version: v0.60.0 run-install: false From b36e8a55ad48bfa72fc75e2acbb40f8b67fa06d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:25:34 +0000 Subject: [PATCH 1466/1718] MAINT: Bump actions/setup-python from 6.1.0 to 6.2.0 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 6.1.0 to 6.2.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/83679a892e2d95755f2dac6acb0bfd1e9ac5d548...a309ff8b426b58ec0e2a45f0f869d46889d02405) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: 6.2.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/linux.yml | 16 ++++++++-------- .github/workflows/linux_blas.yml | 12 ++++++------ .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 2 +- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index a2b5a82e59bc..2b198e63b4ea 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -43,7 +43,7 @@ jobs: submodules: recursive fetch-depth: 0 persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install linter requirements @@ -74,7 +74,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -124,7 +124,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.version }} - uses: ./.github/meson_actions @@ -139,7 +139,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install build and test dependencies from PyPI @@ -224,7 +224,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install build and benchmarking dependencies @@ -263,7 +263,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -313,7 +313,7 @@ jobs: path: 'array-api-tests' persist-credentials: false - name: Set up Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install build and test dependencies from PyPI @@ -342,7 +342,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 26d98fe813d7..994f1c334873 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -75,7 +75,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -198,7 +198,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -226,7 +226,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -289,7 +289,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -353,7 +353,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -390,7 +390,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index b89a867187e0..65f05ff5224f 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -68,7 +68,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - uses: ./.github/meson_actions @@ -86,7 +86,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -132,7 +132,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Install dependencies @@ -180,7 +180,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -195,7 +195,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' @@ -248,7 +248,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 689e775b6aa3..37df6e2d0e04 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -128,7 +128,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.version }} diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 6e46dea579c3..f08dc43ec7d8 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -32,7 +32,7 @@ jobs: with: path: numpy_to_test fetch-depth: 0 - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a1a76d363be3..7f110888ecca 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -33,7 +33,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: "3.14t" @@ -91,7 +91,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' architecture: ${{ matrix.architecture }} @@ -147,7 +147,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: ${{ matrix.pyver }} From 4b9819a1b0b39dfcb5f29853996b207771b61eec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:27:41 +0000 Subject: [PATCH 1467/1718] MAINT: Bump actions/cache from 5.0.2 to 5.0.3 Bumps [actions/cache](https://github.com/actions/cache) from 5.0.2 to 5.0.3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/8b402f58fbc84540c8b491a91e594a4576fec3d7...cdf6c1fa76f9f475f3d7449005a359c84ca0f306) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index f22d71200e4e..1c6aedb93e1b 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -101,7 +101,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -211,7 +211,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 689e775b6aa3..526f5e7e8b4e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -50,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -74,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 0f2b851d3c1d4d87877437971d0ff9ba47a8c045 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Mar 2026 01:09:19 +0000 Subject: [PATCH 1468/1718] MAINT: Bump actions/checkout from 6.0.1 to 6.0.2 Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.1 to 6.0.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/8e8c483db84b4bee98b60c0593521ed34d9990e8...de0fac2e4500dabe0009e67214ff5f5447ce83dd) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 2 +- .github/workflows/compiler_sanitizers.yml | 6 +++--- .github/workflows/cygwin.yml | 2 +- .github/workflows/dependency-review.yml | 2 +- .github/workflows/emscripten.yml | 2 +- .github/workflows/linux-ppc64le.yml | 2 +- .github/workflows/linux.yml | 24 +++++++++++------------ .github/workflows/linux_blas.yml | 18 ++++++++--------- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/linux_simd.yml | 12 ++++++------ .github/workflows/macos.yml | 4 ++-- .github/workflows/mypy_primer.yml | 2 +- .github/workflows/pixi-packages.yml | 2 +- .github/workflows/scorecards.yml | 2 +- .github/workflows/stubtest.yml | 2 +- .github/workflows/typecheck.yml | 4 ++-- .github/workflows/wheels.yml | 2 +- .github/workflows/windows.yml | 6 +++--- 18 files changed, 49 insertions(+), 49 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 99c7afcabec7..54aaffafe187 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -41,7 +41,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index c032f145a9bc..3dc30ebf76f0 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -35,7 +35,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: macos-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -91,7 +91,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy @@ -122,7 +122,7 @@ jobs: options: --shm-size=2g # increase memory for large matrix ops steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Trust working directory and initialize submodules run: | git config --global --add safe.directory /__w/numpy/numpy diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 7ac71a9802e7..594af15a88a1 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -23,7 +23,7 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index a4bec8af6d82..f49263ba127f 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: 'Dependency Review' diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index d1c730f5a732..4f0127431f03 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -30,7 +30,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout NumPy - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index c12165287a65..52c23e593f02 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -38,7 +38,7 @@ jobs: name: "${{ matrix.config.name }}" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 2b198e63b4ea..f0976f34828a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-depth: 0 @@ -69,7 +69,7 @@ jobs: matrix: version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -84,7 +84,7 @@ jobs: runs-on: ubuntu-24.04 if: github.event_name != 'push' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -119,7 +119,7 @@ jobs: matrix: version: ["3.13", "3.14", "3.15-dev", "3.15t-dev"] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -134,7 +134,7 @@ jobs: needs: [smoke_test] runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -174,7 +174,7 @@ jobs: if: github.repository == 'numpy/numpy' runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -219,7 +219,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -258,7 +258,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -299,13 +299,13 @@ jobs: if: github.event_name != 'push' steps: - name: Checkout NumPy - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true persist-credentials: false - name: Checkout array-api-tests - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: repository: data-apis/array-api-tests ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 @@ -337,7 +337,7 @@ jobs: runs-on: ubuntu-latest if: github.event_name != 'push' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -345,7 +345,7 @@ jobs: - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: repository: numpy/numpy-release path: numpy-release diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 994f1c334873..aaed39e21e84 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -70,7 +70,7 @@ jobs: USE_NIGHTLY_OPENBLAS: ${{ matrix.USE_NIGHTLY_OPENBLAS }} name: "Test Linux (${{ matrix.USE_NIGHTLY_OPENBLAS && 'nightly' || 'stable' }} OpenBLAS)" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -128,7 +128,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel openblas-devel -y - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -163,7 +163,7 @@ jobs: run: | dnf install git gcc-gfortran g++ python3-devel flexiblas-devel -y - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -193,7 +193,7 @@ jobs: runs-on: ubuntu-latest name: "OpenBLAS with CMake" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -221,7 +221,7 @@ jobs: runs-on: ubuntu-latest name: "Debian libblas/liblapack" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -257,7 +257,7 @@ jobs: # If it is needed in the future, use install name `pkgconf-pkg-config` zypper install -y git gcc-c++ python3-pip python3-devel blas cblas lapack - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -284,7 +284,7 @@ jobs: runs-on: ubuntu-latest name: "MKL (LP64, ILP64, SDL)" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -348,7 +348,7 @@ jobs: runs-on: ubuntu-latest name: "BLIS" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -385,7 +385,7 @@ jobs: runs-on: ubuntu-latest name: "ATLAS" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 1c6aedb93e1b..e954f346ef76 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -85,7 +85,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -196,7 +196,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 65f05ff5224f..0293d95b9aa2 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -63,7 +63,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-dispatch=none" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -81,7 +81,7 @@ jobs: env: MESON_ARGS: "-Dallow-noblas=true" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -128,7 +128,7 @@ jobs: args: "-Dallow-noblas=true -Dcpu-baseline=native -Dcpu-dispatch=none" name: "ARM64 SIMD - ${{ matrix.config.name }}" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -175,7 +175,7 @@ jobs: name: "${{ matrix.BUILD_PROP[0] }}" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -190,7 +190,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -243,7 +243,7 @@ jobs: needs: [baseline_only] runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index a1455602ba9f..16207cd50b2d 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -33,7 +33,7 @@ jobs: python-version: ["3.12"] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -122,7 +122,7 @@ jobs: version: ["3.12", "3.14t"] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 28375f7c91be..21ba1beb7a1c 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -28,7 +28,7 @@ jobs: shard-index: [0] # e.g. change this to [0, 1, 2] and --num-shards below to 3 fail-fast: false steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: numpy_to_test fetch-depth: 0 diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index 806083b73459..2f873d43a160 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -24,7 +24,7 @@ jobs: package_variant: [asan, default] if: github.repository == 'numpy/numpy' steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c25ab0ab98a5..87552305f65d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -25,7 +25,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v3.1.0 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v3.1.0 with: persist-credentials: false diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index a25e6c54cfb8..1d2447d6023c 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -37,7 +37,7 @@ jobs: py: ["3.12", "3.14"] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index 321530993b82..43fdc70073f8 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -56,7 +56,7 @@ jobs: - [ubuntu-latest, '3.13'] - [windows-latest, '3.12'] steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -93,7 +93,7 @@ jobs: pyrefly: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 with: activate-environment: true diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index a276b57db8fe..9e84232bc457 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -49,7 +49,7 @@ jobs: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} # used in cibw_test_command.sh steps: - name: Checkout numpy - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: true persist-credentials: false diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7f110888ecca..032beb8ff815 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -26,7 +26,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -84,7 +84,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true @@ -140,7 +140,7 @@ jobs: if: github.repository == 'numpy/numpy' steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: submodules: recursive fetch-tags: true From 5da4d4d33d3a42806cc9a6466365f36bd4f225a8 Mon Sep 17 00:00:00 2001 From: ChristinaDec <103141476+ChristinaDec@users.noreply.github.com> Date: Thu, 5 Mar 2026 21:13:11 -0800 Subject: [PATCH 1469/1718] DOC: Remove Python 2 array-protocol strings type note (#30920) --- doc/source/reference/arrays.dtypes.rst | 9 --------- doc/source/reference/arrays.scalars.rst | 2 -- 2 files changed, 11 deletions(-) diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index 2a5b8ce50fc5..b1887fc09d23 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -294,15 +294,6 @@ Array-protocol type strings (see :ref:`arrays.interface`) >>> dt = np.dtype('S25') # 25-length zero-terminated bytes >>> dt = np.dtype('U25') # 25-character string - .. _string-dtype-note: - - .. admonition:: Note on string types - - For backward compatibility with existing code originally written to support - Python 2, ``S`` and ``a`` typestrings are zero-terminated bytes. - For unicode strings, use ``U``, `numpy.str_`. For signed bytes that do not - need zero-termination ``b`` or ``i1`` can be used. - String with comma-separated fields A short-hand notation for specifying the format of a structured data type is a comma-separated string of basic formats. diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index f859db4620d4..10223456bb6c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -327,8 +327,6 @@ elements the data type consists of.) .. warning:: - See :ref:`Note on string types`. - Numeric Compatibility: If you used old typecode characters in your Numeric code (which was never recommended), you will need to change some of them to the new characters. In particular, the needed From 2430050aaa6adcf57181caac8d6456bd09c16585 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Fri, 6 Mar 2026 12:24:45 +0200 Subject: [PATCH 1470/1718] ENH: ``numpy.where``: don't silently truncate Python scalars (#30803) Since NumPy 2.0, code like the following will cause a scalar value to be truncated: >>> np.where([False], np.array([1], dtype=np.uint8), 1000) array([232], dtype=uint8) (in NumPy 1.x, it would instead output a uint16 array) This is dangerous, and inconsistent with other NumPy functions, e.g.: >>> np.multiply(np.array([1], dtype=np.uint8), 1000) Traceback (most recent call last): File "", line 1, in np.multiply(np.array([1], dtype=np.uint8), 1000) ~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ OverflowError: Python integer 1000 out of bounds for uint8 Use `npy_update_operand_for_scalar` to bring the behavior of `where` in line with other functions. --- .../upcoming_changes/30803.compatibility.rst | 8 ++++++++ numpy/_core/src/multiarray/multiarraymodule.c | 15 +++++++++++++++ numpy/_core/tests/test_multiarray.py | 7 +++++++ 3 files changed, 30 insertions(+) create mode 100644 doc/release/upcoming_changes/30803.compatibility.rst diff --git a/doc/release/upcoming_changes/30803.compatibility.rst b/doc/release/upcoming_changes/30803.compatibility.rst new file mode 100644 index 000000000000..3fc75cae53ab --- /dev/null +++ b/doc/release/upcoming_changes/30803.compatibility.rst @@ -0,0 +1,8 @@ +``numpy.where`` no longer truncates Python integers +--------------------------------------------------- + +Previously, if the ``x`` or ``y`` argument of ``numpy.where`` was a Python integer +that was out of range of the output type, it would be silently truncated. +Now, an `OverflowError` will be raised instead. + +This change also applies to the underlying C API function ``PyArray_Where``. diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 9587ea5753c7..49c70a94f48e 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -3281,6 +3281,21 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) if (common_dt == NULL) { goto fail; } + + if (PyArray_FLAGS(ax) & NPY_ARRAY_WAS_PYTHON_LITERAL) { + if (npy_update_operand_for_scalar(&ax, x, common_dt, NPY_SAFE_CASTING) < 0) { + goto fail; + } + op_in[2] = ax; + } + + if (PyArray_FLAGS(ay) & NPY_ARRAY_WAS_PYTHON_LITERAL) { + if (npy_update_operand_for_scalar(&ay, y, common_dt, NPY_SAFE_CASTING) < 0) { + goto fail; + } + op_in[3] = ay; + } + npy_intp itemsize = common_dt->elsize; // If x and y don't have references, we ask the iterator to create buffers diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e485f4578910..9c52b7b51c6f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -9633,6 +9633,13 @@ def test_error(self): assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) + def test_scalar_overflow(self): + c = [True] + a = np.array([1], dtype=np.uint8) + b = 1000 + assert_raises(OverflowError, np.where, c, a, b) + assert_raises(OverflowError, np.where, c, b, a) + def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") From 198d499169015a85649d17973411ba58781771bd Mon Sep 17 00:00:00 2001 From: samrat-rm <93928124+samrat-rm@users.noreply.github.com> Date: Fri, 6 Mar 2026 16:18:36 +0530 Subject: [PATCH 1471/1718] DOC: add versionadded directive for numpy.concat (#30833) Co-authored-by: Sebastian Berg --- numpy/_core/multiarray.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 1757270deb62..c7f21b60c488 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -210,6 +210,9 @@ def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): Join a sequence of arrays along an existing axis. + .. versionadded:: 2.0 + ``numpy.concat`` added as a shorthand for ``numpy.concatenate``. + Parameters ---------- a1, a2, ... : sequence of array_like From f15fbab1a29f51374e6c76902e0e5e22f7a252dc Mon Sep 17 00:00:00 2001 From: Maanas Arora Date: Fri, 6 Mar 2026 08:22:27 -0500 Subject: [PATCH 1472/1718] BUG: Fix `np.dot` to allow user dtypes (#30931) Refactor things (slightly) to use a pattern of PyArray_DTypeFromObject and NPY_DT_CALL_ensure_canonical to avoid relying on type numbers (except for final BLAS dispatching). This is because type numbers are not reliable representation for user dtypes (and in general for parametric ones). A future iteration may want to use normal promotion (but this very subtly changes things for empty list inputs). Co-authored-by: Sebastian Berg --- numpy/_core/src/common/cblasfuncs.c | 5 +- numpy/_core/src/common/cblasfuncs.h | 2 +- numpy/_core/src/multiarray/common.c | 13 +- numpy/_core/src/multiarray/common.h | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 116 ++++++++++-------- numpy/_core/tests/test_multiarray.py | 7 ++ 6 files changed, 85 insertions(+), 60 deletions(-) diff --git a/numpy/_core/src/common/cblasfuncs.c b/numpy/_core/src/common/cblasfuncs.c index 66a215dfeb64..04ca81086bcf 100644 --- a/numpy/_core/src/common/cblasfuncs.c +++ b/numpy/_core/src/common/cblasfuncs.c @@ -225,10 +225,11 @@ _bad_strides(PyArrayObject *ap) * __array_ufunc__ nonsense is also assumed to have been taken care of. */ NPY_NO_EXPORT PyObject * -cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, +cblas_matrixproduct(PyArray_Descr *typec, PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject *out) { PyArrayObject *result = NULL, *out_buf = NULL; + int typenum = typec->type_num; npy_intp j, lda, ldb; npy_intp l; int nd; @@ -364,7 +365,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2, } } - out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result); + out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typec, &result); if (out_buf == NULL) { goto fail; } diff --git a/numpy/_core/src/common/cblasfuncs.h b/numpy/_core/src/common/cblasfuncs.h index 71c533f369a4..fb9c325dd4d9 100644 --- a/numpy/_core/src/common/cblasfuncs.h +++ b/numpy/_core/src/common/cblasfuncs.h @@ -2,6 +2,6 @@ #define NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ NPY_NO_EXPORT PyObject * -cblas_matrixproduct(int, PyArrayObject *, PyArrayObject *, PyArrayObject *); +cblas_matrixproduct(PyArray_Descr *, PyArrayObject *, PyArrayObject *, PyArrayObject *); #endif /* NUMPY_CORE_SRC_COMMON_CBLASFUNCS_H_ */ diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index fd4f24151331..954179c66cb3 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -381,7 +381,7 @@ _may_have_objects(PyArray_Descr *dtype) */ NPY_NO_EXPORT PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, - int nd, npy_intp dimensions[], int typenum, PyArrayObject **result) + int nd, npy_intp dimensions[], PyArray_Descr *descr, PyArrayObject **result) { PyArrayObject *out_buf; @@ -390,7 +390,7 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, /* verify that out is usable */ if (PyArray_NDIM(out) != nd || - PyArray_TYPE(out) != typenum || + !PyArray_EquivTypes(PyArray_DESCR(out), descr) || !PyArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, "output array is not acceptable (must have the right datatype, " @@ -452,10 +452,11 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, subtype = Py_TYPE(ap1); } - out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); + Py_INCREF(descr); + out_buf = (PyArrayObject *)PyArray_NewFromDescr(subtype, descr, nd, dimensions, + NULL, NULL, 0, + (PyObject *) + (prior2 > prior1 ? ap2 : ap1)); if (out_buf != NULL && result) { Py_INCREF(out_buf); diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index c0b5c043c7a3..f4d0e595aaa5 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -339,7 +339,7 @@ check_is_convertible_to_scalar(PyArrayObject *v); */ NPY_NO_EXPORT PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, - int nd, npy_intp dimensions[], int typenum, PyArrayObject **result); + int nd, npy_intp dimensions[], PyArray_Descr *descr, PyArrayObject **result); /* diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 49c70a94f48e..a451e0099675 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -881,7 +881,6 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) { PyArrayObject *ap1 = NULL; PyArrayObject *ap2 = NULL; - int typenum; PyArray_Descr *typec = NULL; PyObject* ap2t = NULL; npy_intp dims[NPY_MAXDIMS]; @@ -889,23 +888,18 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) int i; PyObject* ret = NULL; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "Cannot find a common data type."); - } - goto fail; + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, @@ -914,6 +908,8 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_DECREF(typec); goto fail; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { @@ -947,6 +943,7 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_DECREF(ap1); Py_DECREF(ap2); Py_DECREF(ap2t); + Py_DECREF(typec); return ret; fail: @@ -954,6 +951,7 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2) Py_XDECREF(ap2); Py_XDECREF(ap2t); Py_XDECREF(ret); + Py_XDECREF(typec); return NULL; } @@ -985,23 +983,19 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) PyArray_Descr *typec = NULL; NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); if (typec == NULL) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "Cannot find a common data type."); - } - return NULL; + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); + typenum = typec->type_num; Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, @@ -1010,6 +1004,8 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, NPY_ARRAY_ALIGNED, NULL); if (ap2 == NULL) { @@ -1021,7 +1017,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { - return cblas_matrixproduct(typenum, ap1, ap2, out); + return cblas_matrixproduct(typec, ap1, ap2, out); } #endif @@ -1062,7 +1058,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) is1 = PyArray_STRIDES(ap1)[PyArray_NDIM(ap1)-1]; is2 = PyArray_STRIDES(ap2)[matchDim]; /* Choose which subtype to return */ - out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result); + out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typec, &result); if (out_buf == NULL) { goto fail; } @@ -1124,6 +1120,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) /* Trigger possible copy-back into `result` */ PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); + Py_DECREF(typec); return (PyObject *)result; @@ -1132,6 +1129,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) Py_XDECREF(ap2); Py_XDECREF(out_buf); Py_XDECREF(result); + Py_XDECREF(typec); return NULL; } @@ -1143,7 +1141,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) * inverted is set to 1 if computed correlate(ap2, ap1), 0 otherwise */ static PyArrayObject* -_pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, +_pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, PyArray_Descr *typec, int mode, int *inverted) { PyArrayObject *ret; @@ -1203,7 +1201,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, * Need to choose an output array that can hold a sum * -- use priority to determine which subtype. */ - ret = new_array_for_sum(ap1, ap2, NULL, 1, &length, typenum, NULL); + ret = new_array_for_sum(ap1, ap2, NULL, 1, &length, typec, NULL); if (ret == NULL) { return NULL; } @@ -1323,21 +1321,23 @@ NPY_NO_EXPORT PyObject * PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) { PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; - PyArray_Descr *typec; + PyArray_Descr *typec = NULL; int inverted; int st; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); + if (typec == NULL) { + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); + Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); @@ -1345,6 +1345,8 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); if (ap2 == NULL) { @@ -1361,7 +1363,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) ap2 = cap2; } - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &inverted); + ret = _pyarray_correlate(ap1, ap2, typec, mode, &inverted); if (ret == NULL) { goto clean_ap2; } @@ -1379,6 +1381,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(ap1); Py_DECREF(ap2); + Py_DECREF(typec); return (PyObject *)ret; clean_ret: @@ -1387,6 +1390,8 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) Py_DECREF(ap2); clean_ap1: Py_DECREF(ap1); + + Py_DECREF(typec); return NULL; } @@ -1397,20 +1402,22 @@ NPY_NO_EXPORT PyObject * PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) { PyArrayObject *ap1, *ap2, *ret = NULL; - int typenum; int unused; - PyArray_Descr *typec; + PyArray_Descr *typec = NULL; - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &typec) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &typec) < 0) { + Py_XDECREF(typec); return NULL; } - typec = PyArray_DescrFromType(typenum); + if (typec == NULL) { + typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); + Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); @@ -1418,24 +1425,28 @@ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) Py_DECREF(typec); return NULL; } + + Py_INCREF(typec); ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 1, 1, NPY_ARRAY_DEFAULT, NULL); if (ap2 == NULL) { goto fail; } - ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused); + ret = _pyarray_correlate(ap1, ap2, typec, mode, &unused); if (ret == NULL) { goto fail; } Py_DECREF(ap1); Py_DECREF(ap2); + Py_DECREF(typec); return (PyObject *)ret; fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ret); + Py_XDECREF(typec); return NULL; } @@ -2587,14 +2598,13 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), static PyObject * array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_args) { - int typenum; char *ip1, *ip2, *op; npy_intp n, stride1, stride2; PyObject *op1, *op2; npy_intp newdimptr[1] = {-1}; PyArray_Dims newdims = {newdimptr, 1}; PyArrayObject *ap1 = NULL, *ap2 = NULL, *ret = NULL; - PyArray_Descr *type; + PyArray_Descr *type = NULL; PyArray_DotFunc *vdot; NPY_BEGIN_THREADS_DEF; @@ -2610,16 +2620,19 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar * Conjugating dot product using the BLAS for vectors. * Flattens both op1 and op2 before dotting. */ - typenum = PyArray_ObjectType(op1, NPY_NOTYPE); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op1, NPY_MAXDIMS, &type) < 0) { return NULL; } - typenum = PyArray_ObjectType(op2, typenum); - if (typenum == NPY_NOTYPE) { + if (PyArray_DTypeFromObject(op2, NPY_MAXDIMS, &type) < 0) { + Py_XDECREF(type); return NULL; } - type = PyArray_DescrFromType(typenum); + if (type == NULL) { + type = PyArray_DescrFromType(NPY_DEFAULT_TYPE); + } + Py_SETREF(type, NPY_DT_CALL_ensure_canonical(type)); + Py_INCREF(type); ap1 = (PyArrayObject *)PyArray_FromAny(op1, type, 0, 0, 0, NULL); if (ap1 == NULL) { @@ -2635,6 +2648,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar Py_DECREF(ap1); ap1 = (PyArrayObject *)op1; + Py_INCREF(type); ap2 = (PyArrayObject *)PyArray_FromAny(op2, type, 0, 0, 0, NULL); if (ap2 == NULL) { goto fail; @@ -2653,7 +2667,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar } /* array scalar output */ - ret = new_array_for_sum(ap1, ap2, NULL, 0, (npy_intp *)NULL, typenum, NULL); + ret = new_array_for_sum(ap1, ap2, NULL, 0, (npy_intp *)NULL, type, NULL); if (ret == NULL) { goto fail; } @@ -2665,7 +2679,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar ip2 = PyArray_DATA(ap2); op = PyArray_DATA(ret); - switch (typenum) { + switch (type->type_num) { case NPY_CFLOAT: vdot = (PyArray_DotFunc *)CFLOAT_vdot; break; @@ -2698,11 +2712,13 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar Py_XDECREF(ap1); Py_XDECREF(ap2); + Py_XDECREF(type); return PyArray_Return(ret); fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(ret); + Py_XDECREF(type); return NULL; } diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 9c52b7b51c6f..7d01fb5ed441 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -7274,6 +7274,13 @@ def test_dot_array_order(self): assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) + def test_dot_has_native_byteorder(self): + # gh-30931 + a = np.array([1, 2, 3], ">f8") + dot = a.dot([[], [], []]) + + assert_equal(dot.dtype, np.dtype("=f8")) + def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): From a6ce1e76a9cd6ecca0e20a62590d26bdb1ca56c3 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Fri, 6 Mar 2026 09:16:53 -0500 Subject: [PATCH 1473/1718] ENH: Test .kind not .char in np.testing.assert_equal (#30879) Co-authored-by: Sebastian Berg --- numpy/testing/_private/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 54d040a6ed3f..81e990bbca3b 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -744,7 +744,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', ox, oy = x, y def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPefdgFDG' + return type(x.dtype)._is_numeric def istime(x): return x.dtype.char in "Mm" From f8a86fae07ca415a8062b47aed27777a5047cca7 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Fri, 6 Mar 2026 22:32:57 +0200 Subject: [PATCH 1474/1718] DOC: remove a redundant build command Both `environment.yml` and `build_requirements.txt` already include spin, so there is no need to install it again. --- building_with_meson.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/building_with_meson.md b/building_with_meson.md index 6498d3659bb0..602e80756fe5 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -14,9 +14,6 @@ have rough edges, please open an issue if you run into a problem._ *Note: also make sure you have `pkg-config` and the usual system dependencies for NumPy* -Then install spin: -- `python -m pip install spin` - **Compile and install:** `spin build` This builds in the `build/` directory, and installs into the `build-install` directory. From 42e36f6c97ea1454b09033f7e6ac879639b85dd7 Mon Sep 17 00:00:00 2001 From: antareepsarkar Date: Sat, 7 Mar 2026 07:44:31 +0530 Subject: [PATCH 1475/1718] MAINT: change ruff version to 0.15.4 --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index e74ba4aba356..8d8f1a871c46 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.15.2 + - ruff=0.15.4 - gitpython # Used in some tests - cffi From 15497f5a4bd7ca4ac728e4cc8437ed4843a70619 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sat, 7 Mar 2026 20:14:48 +0200 Subject: [PATCH 1476/1718] DOC: tweak [skip actions][skip cirrus] Co-authored-by: Nathan Goldbaum --- doc/source/dev/ai_policy.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/dev/ai_policy.rst b/doc/source/dev/ai_policy.rst index 7675af5bfab0..3a793d45b849 100644 --- a/doc/source/dev/ai_policy.rst +++ b/doc/source/dev/ai_policy.rst @@ -15,7 +15,8 @@ of whether it was manually written or generated by AI. You must understand and b to explain the code you submit as well as the existing related code. It is not acceptable to submit a patch that you cannot understand and explain yourself. In explaining your contribution, do not use AI to automatically generate -descriptions. +comments, pull request descriptions, or issue descriptions. See below for our +policy on AI translation systems. Disclosure ---------- From ee46cfb3f669e418dfcfb54fd8e05e02a8c33feb Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 8 Mar 2026 21:41:12 +0200 Subject: [PATCH 1477/1718] MAINT: update openblas (#30961) --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index da8c8141917f..a19f6b3b9e7b 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.31.22.1 +scipy-openblas32==0.3.31.126.1 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 6a63af65d96c..f200891aa560 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.31.22.1 -scipy-openblas64==0.3.31.22.1 +scipy-openblas32==0.3.31.126.1 +scipy-openblas64==0.3.31.126.1 From 8b6b26524b6d562451c3bae55cd7b732a0d2c32b Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Mon, 9 Mar 2026 07:18:47 +0000 Subject: [PATCH 1478/1718] MAINT: Use `npy_cfloat/npy_cdouble/npy_clongdouble` in `f2py` (#30966) Replaces custom complex struct definitions (struct {T r,i;}) with the npy_cfloat, npy_cdouble, npy_clongdouble types. Updates accessor patterns from .r/.i to npy_creal/npy_cimag/npy_csetreal/npy_csetimag. Replaces {0.0,0.0} initializers with npy_cpack(). The TRYCOMPLEXPYARRAYTEMPLATE macro now uses helper functions to extract real/imag parts as doubles, since the npy accessor functions are type-specific while the macro is type-generic. --- numpy/f2py/cfuncs.py | 139 ++++++++++++++++++++++++------------------- 1 file changed, 77 insertions(+), 62 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index f48617f8e878..63a7ff5740cb 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -89,10 +89,9 @@ def errmess(s: str) -> None: typedef long double long_double; #endif """ -typedefs[ - 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' -typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' -typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['complex_long_double'] = 'typedef npy_clongdouble complex_long_double;' +typedefs['complex_float'] = 'typedef npy_cfloat complex_float;' +typedefs['complex_double'] = 'typedef npy_cdouble complex_double;' typedefs['string'] = """typedef char * string;""" typedefs['character'] = """typedef char character;""" @@ -288,15 +287,15 @@ def errmess(s: str) -> None: #define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" cppmacros['pyobj_from_float1'] = """ #define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" -needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +needs['pyobj_from_complex_long_double1'] = ['complex_long_double', 'npy_math.h'] cppmacros['pyobj_from_complex_long_double1'] = """ -#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" -needs['pyobj_from_complex_double1'] = ['complex_double'] +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles((double)npy_creall(v),(double)npy_cimagl(v)))""" +needs['pyobj_from_complex_double1'] = ['complex_double', 'npy_math.h'] cppmacros['pyobj_from_complex_double1'] = """ -#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" -needs['pyobj_from_complex_float1'] = ['complex_float'] +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(npy_creal(v),npy_cimag(v)))""" +needs['pyobj_from_complex_float1'] = ['complex_float', 'npy_math.h'] cppmacros['pyobj_from_complex_float1'] = """ -#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles((double)npy_crealf(v),(double)npy_cimagf(v)))""" needs['pyobj_from_string1'] = ['string'] cppmacros['pyobj_from_string1'] = """ #define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" @@ -340,42 +339,56 @@ def errmess(s: str) -> None: return 1 """ -needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR', 'npy_math.h'] cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ #define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; -#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ +/* Helper to extract real/imag from complex value via cast to npy_cdouble */ +static inline double _f2py_creal_as_double(const void *p, int typenum) { + if (typenum == NPY_CFLOAT) return (double)npy_crealf(*(const npy_cfloat *)p); + if (typenum == NPY_CLONGDOUBLE) return (double)npy_creall(*(const npy_clongdouble *)p); + return npy_creal(*(const npy_cdouble *)p); +} +static inline double _f2py_cimag_as_double(const void *p, int typenum) { + if (typenum == NPY_CFLOAT) return (double)npy_cimagf(*(const npy_cfloat *)p); + if (typenum == NPY_CLONGDOUBLE) return (double)npy_cimagl(*(const npy_clongdouble *)p); + return npy_cimag(*(const npy_cdouble *)p); +} +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typenum)\\ PyArrayObject *arr = NULL;\\ + double _re, _im;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ - if (PyArray_DESCR(arr)->type==typecode) {\\ - *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ - *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + _re = _f2py_creal_as_double(v, typenum);\\ + _im = _f2py_cimag_as_double(v, typenum);\\ + if (PyArray_TYPE(arr)==typenum) {\\ + *(ctype *)(PyArray_DATA(arr))=(ctype)_re;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(ctype)_im;\\ return 1;\\ }\\ switch (PyArray_TYPE(arr)) {\\ - case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + case NPY_CDOUBLE: npy_csetreal((npy_cdouble *)PyArray_DATA(arr), _re);\\ + npy_csetimag((npy_cdouble *)PyArray_DATA(arr), _im);\\ break;\\ - case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + case NPY_CFLOAT: npy_csetrealf((npy_cfloat *)PyArray_DATA(arr), (float)_re);\\ + npy_csetimagf((npy_cfloat *)PyArray_DATA(arr), (float)_im);\\ break;\\ - case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ - case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ - case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ - *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(_re!=0 && _im!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=_re; break;\\ + case NPY_CLONGDOUBLE: npy_csetreall((npy_clongdouble *)PyArray_DATA(arr), (npy_longdouble)_re);\\ + npy_csetimagl((npy_clongdouble *)PyArray_DATA(arr), (npy_longdouble)_im);\\ break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ @@ -1133,7 +1146,7 @@ def errmess(s: str) -> None: static int complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) { - complex_double cd = {0.0,0.0}; + complex_double cd = npy_cpack(0.0, 0.0); if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, CLongDouble) { PyArray_ScalarAsCtype(obj, v); @@ -1142,15 +1155,16 @@ def errmess(s: str) -> None: else if (PyArray_Check(obj)) { PyArrayObject *arr = (PyArrayObject *)obj; if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + npy_clongdouble tmp = *(npy_clongdouble *)PyArray_DATA(arr); + npy_csetreall(v, npy_creall(tmp)); + npy_csetimagl(v, npy_cimagl(tmp)); return 1; } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (long_double)cd.r; - (*v).i = (long_double)cd.i; + npy_csetreall(v, (long_double)npy_creal(cd)); + npy_csetimagl(v, (long_double)npy_cimag(cd)); return 1; } return 0; @@ -1165,22 +1179,22 @@ def errmess(s: str) -> None: Py_complex c; if (PyComplex_Check(obj)) { c = PyComplex_AsCComplex(obj); - (*v).r = c.real; - (*v).i = c.imag; + npy_csetreal(v, c.real); + npy_csetimag(v, c.imag); return 1; } if (PyArray_IsScalar(obj, ComplexFloating)) { if (PyArray_IsScalar(obj, CFloat)) { - npy_cfloat new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)npy_crealf(new); - (*v).i = (double)npy_cimagf(new); + npy_cfloat tmp; + PyArray_ScalarAsCtype(obj, &tmp); + npy_csetreal(v, (double)npy_crealf(tmp)); + npy_csetimag(v, (double)npy_cimagf(tmp)); } else if (PyArray_IsScalar(obj, CLongDouble)) { - npy_clongdouble new; - PyArray_ScalarAsCtype(obj, &new); - (*v).r = (double)npy_creall(new); - (*v).i = (double)npy_cimagl(new); + npy_clongdouble tmp; + PyArray_ScalarAsCtype(obj, &tmp); + npy_csetreal(v, (double)npy_creall(tmp)); + npy_csetimag(v, (double)npy_cimagl(tmp)); } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); @@ -1198,20 +1212,21 @@ def errmess(s: str) -> None: if (arr == NULL) { return 0; } - (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); - (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + npy_cdouble tmp = *(npy_cdouble *)PyArray_DATA(arr); + npy_csetreal(v, npy_creal(tmp)); + npy_csetimag(v, npy_cimag(tmp)); Py_DECREF(arr); return 1; } /* Python does not provide PyNumber_Complex function :-( */ - (*v).i = 0.0; + npy_csetimag(v, 0.0); if (PyFloat_Check(obj)) { - (*v).r = PyFloat_AsDouble(obj); - return !((*v).r == -1.0 && PyErr_Occurred()); + npy_csetreal(v, PyFloat_AsDouble(obj)); + return !(npy_creal(*v) == -1.0 && PyErr_Occurred()); } if (PyLong_Check(obj)) { - (*v).r = PyLong_AsDouble(obj); - return !((*v).r == -1.0 && PyErr_Occurred()); + npy_csetreal(v, PyLong_AsDouble(obj)); + return !(npy_creal(*v) == -1.0 && PyErr_Occurred()); } if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { PyObject *tmp = PySequence_GetItem(obj,0); @@ -1240,10 +1255,10 @@ def errmess(s: str) -> None: static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { - complex_double cd={0.0,0.0}; + complex_double cd = npy_cpack(0.0, 0.0); if (complex_double_from_pyobj(&cd,obj,errmess)) { - (*v).r = (float)cd.r; - (*v).i = (float)cd.i; + npy_csetrealf(v, (float)npy_creal(cd)); + npy_csetimagf(v, (float)npy_cimag(cd)); return 1; } return 0; @@ -1307,11 +1322,11 @@ def errmess(s: str) -> None: needs['try_pyarr_from_complex_float'] = [ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs[ - 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,NPY_CFLOAT);\n}\n' needs['try_pyarr_from_complex_double'] = [ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs[ - 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,NPY_CDOUBLE);\n}\n' needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] From 278d2c92d25b6f8eecbf4ba1904f2a2b3f0a90f9 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 9 Mar 2026 10:05:41 +0100 Subject: [PATCH 1479/1718] MAINT: Avoid warning on no-cblas build Oddly enough, I never made to have to have cblas in a local build and this warning started showing up. --- numpy/_core/src/multiarray/multiarraymodule.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index a451e0099675..047185a32900 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -975,7 +975,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) PyArrayObject *ap1, *ap2, *out_buf = NULL, *result = NULL; PyArrayIterObject *it1, *it2; npy_intp i, j, l; - int typenum, nd, axis, matchDim; + int nd, axis, matchDim; npy_intp is1, is2, os; char *op; npy_intp dimensions[NPY_MAXDIMS]; @@ -995,7 +995,6 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) typec = PyArray_DescrFromType(NPY_DEFAULT_TYPE); } Py_SETREF(typec, NPY_DT_CALL_ensure_canonical(typec)); - typenum = typec->type_num; Py_INCREF(typec); ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, @@ -1015,8 +1014,8 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) #if defined(HAVE_CBLAS) if (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2 && - (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum || - NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) { + (NPY_DOUBLE == typec->type_num || NPY_CDOUBLE == typec->type_num || + NPY_FLOAT == typec->type_num || NPY_CFLOAT == typec->type_num)) { return cblas_matrixproduct(typec, ap1, ap2, out); } #endif From fb8157f2d60d57ce460f5cf5d23f792f60affd3d Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Mon, 9 Mar 2026 10:32:21 +0100 Subject: [PATCH 1480/1718] ENH: Improve performance of npy_parse_arguments (#30930) Co-authored-by: Nathan Goldbaum --- numpy/_core/src/common/npy_argparse.c | 98 +++---- numpy/_core/src/common/npy_argparse.h | 58 +++-- .../src/multiarray/_multiarray_tests.c.src | 14 +- numpy/_core/src/multiarray/array_coercion.c | 5 +- numpy/_core/src/multiarray/array_converter.c | 19 +- numpy/_core/src/multiarray/compiled_base.c | 34 ++- numpy/_core/src/multiarray/dlpack.c | 16 +- numpy/_core/src/multiarray/methods.c | 117 ++++----- numpy/_core/src/multiarray/multiarraymodule.c | 242 ++++++++---------- .../src/multiarray/textreading/readtext.c | 29 +-- numpy/_core/src/multiarray/unique.cpp | 7 +- numpy/_core/src/umath/extobj.c | 15 +- numpy/_core/src/umath/ufunc_object.c | 87 +++---- 13 files changed, 328 insertions(+), 413 deletions(-) diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index ea15ec68026b..8961bdd61e49 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -76,8 +76,6 @@ PyArray_PythonPyIntFromInt(PyObject *obj, int *value) } -typedef int convert(PyObject *, void *); - /** * Internal function to initialize keyword argument parsing. * @@ -92,55 +90,41 @@ typedef int convert(PyObject *, void *); * * @param funcname Name of the function, mainly used for errors. * @param cache A cache object stored statically in the parsing function - * @param va_orig Argument list to npy_parse_arguments + * @param specs Array of argument specifications + * @param nspecs Number of argument specifications * @return 0 on success, -1 on failure */ static int initialize_keywords(const char *funcname, - _NpyArgParserCache *cache, va_list va_orig) { - va_list va; - int nargs = 0; + _NpyArgParserCache *cache, npy_arg_spec *specs, int nspecs) { int nkwargs = 0; int npositional_only = 0; int nrequired = 0; int npositional = 0; char state = '\0'; - va_copy(va, va_orig); - while (1) { - /* Count length first: */ - char *name = va_arg(va, char *); - convert *converter = va_arg(va, convert *); - void *data = va_arg(va, void *); - - /* Check if this is the sentinel, only converter may be NULL */ - if ((name == NULL) && (converter == NULL) && (data == NULL)) { - break; - } + for (int i = 0; i < nspecs; i++) { + const char *name = specs[i].name; if (name == NULL) { PyErr_Format(PyExc_SystemError, "NumPy internal error: name is NULL in %s() at " - "argument %d.", funcname, nargs); - va_end(va); + "argument %d.", funcname, i); return -1; } - if (data == NULL) { + if (specs[i].output == NULL) { PyErr_Format(PyExc_SystemError, "NumPy internal error: data is NULL in %s() at " - "argument %d.", funcname, nargs); - va_end(va); + "argument %d.", funcname, i); return -1; } - nargs += 1; if (*name == '|') { if (state == '$') { PyErr_Format(PyExc_SystemError, "NumPy internal error: positional argument `|` " "after keyword only `$` one to %s() at argument %d.", - funcname, nargs); - va_end(va); + funcname, i + 1); return -1; } state = '|'; @@ -156,8 +140,7 @@ initialize_keywords(const char *funcname, PyErr_Format(PyExc_SystemError, "NumPy internal error: non-required argument after " "required | or $ one to %s() at argument %d.", - funcname, nargs); - va_end(va); + funcname, i + 1); return -1; } @@ -172,8 +155,7 @@ initialize_keywords(const char *funcname, PyErr_Format(PyExc_SystemError, "NumPy internal error: non-kwarg marked with $ " "to %s() at argument %d or positional only following " - "kwarg.", funcname, nargs); - va_end(va); + "kwarg.", funcname, i + 1); return -1; } } @@ -181,18 +163,17 @@ initialize_keywords(const char *funcname, nkwargs += 1; } } - va_end(va); if (npositional == -1) { - npositional = nargs; + npositional = nspecs; } - if (nargs > _NPY_MAX_KWARGS) { + if (nspecs > _NPY_MAX_KWARGS) { PyErr_Format(PyExc_SystemError, "NumPy internal error: function %s() has %d arguments, but " "the maximum is currently limited to %d for easier parsing; " "it can be increased by modifying `_NPY_MAX_KWARGS`.", - funcname, nargs, _NPY_MAX_KWARGS); + funcname, nspecs, _NPY_MAX_KWARGS); return -1; } @@ -200,7 +181,7 @@ initialize_keywords(const char *funcname, * Do any necessary string allocation and interning, * creating a caching object. */ - cache->nargs = nargs; + cache->nargs = nspecs; cache->npositional_only = npositional_only; cache->npositional = npositional; cache->nrequired = nrequired; @@ -208,12 +189,8 @@ initialize_keywords(const char *funcname, /* NULL kw_strings for easier cleanup (and NULL termination) */ memset(cache->kw_strings, 0, sizeof(PyObject *) * (nkwargs + 1)); - va_copy(va, va_orig); - for (int i = 0; i < nargs; i++) { - /* Advance through non-kwargs, which do not require setup. */ - char *name = va_arg(va, char *); - va_arg(va, convert *); - va_arg(va, void *); + for (int i = 0; i < nspecs; i++) { + const char *name = specs[i].name; if (*name == '|' || *name == '$') { name++; /* ignore | and $ */ @@ -222,13 +199,11 @@ initialize_keywords(const char *funcname, int i_kwarg = i - npositional_only; cache->kw_strings[i_kwarg] = PyUnicode_InternFromString(name); if (cache->kw_strings[i_kwarg] == NULL) { - va_end(va); goto error; } } } - va_end(va); return 0; error: @@ -288,25 +263,21 @@ raise_missing_argument(const char *funcname, * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. - * @param ... List of arguments (see macro version). + * @param specs Array of argument specifications + * @param nspecs Number of argument specifications * * @return Returns 0 on success and -1 on failure. */ NPY_NO_EXPORT int _npy_parse_arguments(const char *funcname, - /* cache_ptr is a NULL initialized persistent storage for data */ _NpyArgParserCache *cache, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - /* ... is NULL, NULL, NULL terminated: name, converter, value */ - ...) + npy_arg_spec *specs, int nspecs) { if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { LOCK_ARGPARSE_MUTEX; if (!atomic_load_explicit((_Atomic(uint8_t) *)&cache->initialized, memory_order_acquire)) { - va_list va; - va_start(va, kwnames); - int res = initialize_keywords(funcname, cache, va); - va_end(va); + int res = initialize_keywords(funcname, cache, specs, nspecs); if (res < 0) { UNLOCK_ARGPARSE_MUTEX; return -1; @@ -394,38 +365,33 @@ _npy_parse_arguments(const char *funcname, assert(len_args + len_kwargs <= cache->nargs); /* At this time `all_arguments` holds either NULLs or the objects */ - va_list va; - va_start(va, kwnames); - for (int i = 0; i < max_nargs; i++) { - va_arg(va, char *); - convert *converter = va_arg(va, convert *); - void *data = va_arg(va, void *); - if (all_arguments[i] == NULL) { continue; } - int res; + npy_arg_converter converter = (npy_arg_converter)specs[i].converter; + void *data = specs[i].output; + if (converter == NULL) { *((PyObject **) data) = all_arguments[i]; continue; } - res = converter(all_arguments[i], data); + int res = converter(all_arguments[i], data); if (NPY_UNLIKELY(res == NPY_SUCCEED)) { continue; } else if (NPY_UNLIKELY(res == NPY_FAIL)) { /* It is usually the users responsibility to clean up. */ - goto converting_failed; + return -1; } else if (NPY_UNLIKELY(res == Py_CLEANUP_SUPPORTED)) { /* TODO: Implementing cleanup if/when needed should not be hard */ PyErr_Format(PyExc_SystemError, "converter cleanup of parameter %d to %s() not supported.", i, funcname); - goto converting_failed; + return -1; } assert(0); } @@ -435,21 +401,15 @@ _npy_parse_arguments(const char *funcname, /* (PyArg_* also does this after the actual parsing is finished) */ if (NPY_UNLIKELY(max_nargs < cache->nrequired)) { raise_missing_argument(funcname, cache, max_nargs); - goto converting_failed; + return -1; } for (int i = 0; i < cache->nrequired; i++) { if (NPY_UNLIKELY(all_arguments[i] == NULL)) { raise_missing_argument(funcname, cache, i); - goto converting_failed; + return -1; } } } - va_end(va); return 0; - -converting_failed: - va_end(va); - return -1; - } diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index e1eef918cb33..f48ba90791fe 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -20,7 +20,15 @@ NPY_NO_EXPORT int PyArray_PythonPyIntFromInt(PyObject *obj, int *value); -#define _NPY_MAX_KWARGS 15 +#define _NPY_MAX_KWARGS 14 + +typedef int (*npy_arg_converter)(PyObject *, void *); + +typedef struct { + const char *name; + void *converter; + void *output; +} npy_arg_spec; typedef struct { int npositional; @@ -54,11 +62,10 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * * PyObject *argument1, *argument3; * int argument2 = -1; - * if (npy_parse_arguments("method", args, len_args, kwnames), - * "argument1", NULL, &argument1, - * "|argument2", &PyArray_PythonPyIntFromInt, &argument2, - * "$argument3", NULL, &argument3, - * NULL, NULL, NULL) < 0) { + * if (npy_parse_arguments("method", args, len_args, kwnames, + * {"argument1", NULL, &argument1}, + * {"|argument2", &PyArray_PythonPyIntFromInt, &argument2}, + * {"$argument3", NULL, &argument3}) < 0) { * return NULL; * } * } @@ -66,32 +73,43 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * * The `NPY_PREPARE_ARGPARSER` macro sets up a static cache variable necessary * to hold data for speeding up the parsing. `npy_parse_arguments` must be - * used in cunjunction with the macro defined in the same scope. + * used in conjunction with the macro defined in the same scope. * (No two `npy_parse_arguments` may share a single `NPY_PREPARE_ARGPARSER`.) * * @param funcname Function name * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. - * @param ... List of arguments must be param1_name, param1_converter, - * *param1_outvalue, param2_name, ..., NULL, NULL, NULL. - * Where name is ``char *``, ``converter`` a python converter - * function or NULL and ``outvalue`` is the ``void *`` passed to - * the converter (holding the converted data or a borrowed - * reference if converter is NULL). + * @param ... List of argument specs as {name, converter, outvalue} structs. + * Where name is ``const char *``, ``converter`` a python converter + * function pointer or NULL and ``outvalue`` is the ``void *`` + * passed to the converter (holding the converted data or a + * borrowed reference if converter is NULL). * * @return Returns 0 on success and -1 on failure. */ NPY_NO_EXPORT int _npy_parse_arguments(const char *funcname, - /* cache_ptr is a NULL initialized persistent storage for data */ - _NpyArgParserCache *cache_ptr, + _NpyArgParserCache *cache, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames, - /* va_list is NULL, NULL, NULL terminated: name, converter, value */ - ...) NPY_GCC_NONNULL(1); + npy_arg_spec *specs, int nspecs) NPY_GCC_NONNULL(1); -#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ - _npy_parse_arguments(funcname, &__argparse_cache, \ - args, len_args, kwnames, __VA_ARGS__) +#ifdef __cplusplus +#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ + [&]() -> int { \ + npy_arg_spec _npy_specs_[] = {__VA_ARGS__}; \ + return _npy_parse_arguments(funcname, &__argparse_cache, \ + args, len_args, kwnames, \ + _npy_specs_, \ + (int)(sizeof(_npy_specs_) / sizeof(npy_arg_spec))); \ + }() +#else +#define npy_parse_arguments(funcname, args, len_args, kwnames, ...) \ + _npy_parse_arguments(funcname, &__argparse_cache, \ + args, len_args, kwnames, \ + (npy_arg_spec[]){__VA_ARGS__}, \ + (int)(sizeof((npy_arg_spec[]){__VA_ARGS__}) \ + / sizeof(npy_arg_spec))) +#endif #endif /* NUMPY_CORE_SRC_COMMON_NPY_ARGPARSE_H */ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index f79ff9486fe4..b84868933ad6 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -33,11 +33,10 @@ argparse_example_function(PyObject *NPY_UNUSED(mod), int arg1; PyObject *arg2, *arg3, *arg4; if (npy_parse_arguments("func", args, len_args, kwnames, - "", &PyArray_PythonPyIntFromInt, &arg1, - "arg2", NULL, &arg2, - "|arg3", NULL, &arg3, - "$arg3", NULL, &arg4, - NULL, NULL, NULL) < 0) { + {"", &PyArray_PythonPyIntFromInt, &arg1}, + {"arg2", NULL, &arg2}, + {"|arg3", NULL, &arg3}, + {"$arg3", NULL, &arg4}) < 0) { return NULL; } Py_RETURN_NONE; @@ -57,9 +56,8 @@ threaded_argparse_example_function(PyObject *NPY_UNUSED(mod), int arg1; PyObject *arg2; if (npy_parse_arguments("thread_func", args, len_args, kwnames, - "$arg1", &PyArray_PythonPyIntFromInt, &arg1, - "$arg2", NULL, &arg2, - NULL, NULL, NULL) < 0) { + {"$arg1", &PyArray_PythonPyIntFromInt, &arg1}, + {"$arg2", NULL, &arg2}) < 0) { return NULL; } Py_RETURN_NONE; diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 24982a4fdc1e..e890b73bda2f 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -1420,9 +1420,8 @@ _discover_array_parameters(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments( "_discover_array_parameters", args, len_args, kwnames, - "", NULL, &obj, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}) < 0) { /* fixed is last to parse, so never necessary to clean up */ return NULL; } diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 578e7b1554f4..674676dfc0c1 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -221,11 +221,10 @@ array_converter_as_arrays(PyArrayArrayConverterObject *self, scalar_policy policy = CONVERT_IF_NO_ARRAY; NPY_PREPARE_ARGPARSER; + /* pyscalars: how to handle scalars (ignored if dtype is given). */ if (npy_parse_arguments("as_arrays", args, len_args, kwnames, - "$subok", &PyArray_BoolConverter, &subok, - /* how to handle scalars (ignored if dtype is given). */ - "$pyscalars", &pyscalar_mode_conv, &policy, - NULL, NULL, NULL) < 0) { + {"$subok", &PyArray_BoolConverter, &subok}, + {"$pyscalars", &pyscalar_mode_conv, &policy}) < 0) { return NULL; } if (policy == CONVERT_IF_NO_ARRAY) { @@ -286,11 +285,10 @@ array_converter_wrap(PyArrayArrayConverterObject *self, } NPY_PREPARE_ARGPARSER; + /* to_scalar is three-way "bool", if `None` inspect input to decide. */ if (npy_parse_arguments("wrap", args, len_args, kwnames, - "", NULL, &obj, - /* Three-way "bool", if `None` inspect input to decide. */ - "$to_scalar", NULL, &to_scalar, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"$to_scalar", NULL, &to_scalar}) < 0) { return NULL; } if (to_scalar == Py_None) { @@ -327,9 +325,8 @@ array_converter_result_type(PyArrayArrayConverterObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("result_type", args, len_args, kwnames, - "|extra_dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|ensure_inexact", &PyArray_BoolConverter, &ensure_inexact, - NULL, NULL, NULL) < 0) { + {"|extra_dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|ensure_inexact", &PyArray_BoolConverter, &ensure_inexact}) < 0) { goto finish; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 23e922d470d0..19ac45ad170a 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -123,10 +123,9 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("bincount", args, len_args, kwnames, - "list", NULL, &list, - "|weights", NULL, &weight, - "|minlength", NULL, &mlength, - NULL, NULL, NULL) < 0) { + {"list", NULL, &list}, + {"|weights", NULL, &weight}, + {"|minlength", NULL, &mlength}) < 0) { return NULL; } @@ -553,12 +552,11 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t len_arg NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("interp", args, len_args, kwnames, - "x", NULL, &x, - "xp", NULL, &xp, - "fp", NULL, &fp, - "|left", NULL, &left, - "|right", NULL, &right, - NULL, NULL, NULL) < 0) { + {"x", NULL, &x}, + {"xp", NULL, &xp}, + {"fp", NULL, &fp}, + {"|left", NULL, &left}, + {"|right", NULL, &right}) < 0) { return NULL; } @@ -725,12 +723,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("interp_complex", args, len_args, kwnames, - "x", NULL, &x, - "xp", NULL, &xp, - "fp", NULL, &fp, - "|left", NULL, &left, - "|right", NULL, &right, - NULL, NULL, NULL) < 0) { + {"x", NULL, &x}, + {"xp", NULL, &xp}, + {"fp", NULL, &fp}, + {"|left", NULL, &left}, + {"|right", NULL, &right}) < 0) { return NULL; } @@ -1465,9 +1462,8 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("add_docstring", args, len_args, NULL, - "", NULL, &obj, - "", NULL, &str, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"", NULL, &str}) < 0) { return NULL; } if (!PyUnicode_Check(str)) { diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 29e5aecec5d5..1c901ff6bc06 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -414,11 +414,10 @@ array_dlpack(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, - "$stream", NULL, &stream, - "$max_version", NULL, &max_version, - "$dl_device", &device_converter, &result_device, - "$copy", &PyArray_CopyConverter, ©_mode, - NULL, NULL, NULL)) { + {"$stream", NULL, &stream}, + {"$max_version", NULL, &max_version}, + {"$dl_device", &device_converter, &result_device}, + {"$copy", &PyArray_CopyConverter, ©_mode})) { return NULL; } @@ -493,10 +492,9 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj, *copy = Py_None, *device = Py_None; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("from_dlpack", args, len_args, kwnames, - "obj", NULL, &obj, - "$copy", NULL, ©, - "$device", NULL, &device, - NULL, NULL, NULL) < 0) { + {"obj", NULL, &obj}, + {"$copy", NULL, ©}, + {"$device", NULL, &device}) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 5333ea7b7538..6728020ad984 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -126,11 +126,10 @@ array_take(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("take", args, len_args, kwnames, - "indices", NULL, &indices, - "|axis", &PyArray_AxisConverter, &dimension, - "|out", &PyArray_OutputConverter, &out, - "|mode", &PyArray_ClipmodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"indices", NULL, &indices}, + {"|axis", &PyArray_AxisConverter, &dimension}, + {"|out", &PyArray_OutputConverter, &out}, + {"|mode", &PyArray_ClipmodeConverter, &mode}) < 0) { return NULL; } @@ -225,8 +224,7 @@ array_squeeze(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("squeeze", args, len_args, kwnames, - "|axis", NULL, &axis_in, - NULL, NULL, NULL) < 0) { + {"|axis", NULL, &axis_in}) < 0) { return NULL; } @@ -253,9 +251,8 @@ array_view(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("view", args, len_args, kwnames, - "|dtype", NULL, &out_dtype, - "|type", NULL, &out_type, - NULL, NULL, NULL) < 0) { + {"|dtype", NULL, &out_dtype}, + {"|type", NULL, &out_type}) < 0) { return NULL; } @@ -302,10 +299,9 @@ array_argmax(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argmax", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|out", &PyArray_OutputConverter, &out, - "$keepdims", &PyArray_BoolConverter, &keepdims, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", &PyArray_OutputConverter, &out}, + {"$keepdims", &PyArray_BoolConverter, &keepdims}) < 0) { return NULL; } @@ -329,10 +325,9 @@ array_argmin(PyArrayObject *self, npy_bool keepdims = NPY_FALSE; NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argmin", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|out", &PyArray_OutputConverter, &out, - "$keepdims", &PyArray_BoolConverter, &keepdims, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", &PyArray_OutputConverter, &out}, + {"$keepdims", &PyArray_BoolConverter, &keepdims}) < 0) { return NULL; } @@ -773,12 +768,11 @@ array_astype(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("astype", args, len_args, kwnames, - "dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "|casting", &PyArray_CastingConverterSameValue, &casting, - "|subok", &PyArray_PythonPyIntFromInt, &subok, - "|copy", &PyArray_AsTypeCopyConverter, &forcecopy, - NULL, NULL, NULL) < 0) { + {"dtype", &PyArray_DTypeOrDescrConverterRequired, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"|casting", &PyArray_CastingConverterSameValue, &casting}, + {"|subok", &PyArray_PythonPyIntFromInt, &subok}, + {"|copy", &PyArray_AsTypeCopyConverter, &forcecopy}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1151,8 +1145,7 @@ array_copy(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("copy", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } @@ -1271,12 +1264,12 @@ array_sort(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("sort", args, len_args, kwnames, - "|axis", &PyArray_PythonPyIntFromInt, &axis, - "|kind", &PyArray_SortkindConverter, &sortkind, - "|order", NULL, &order, - "$stable", &PyArray_OptionalBoolConverter, &stable, -// "$descending", &PyArray_OptionalBoolConverter, &descending, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_PythonPyIntFromInt, &axis}, + {"|kind", &PyArray_SortkindConverter, &sortkind}, + {"|order", NULL, &order}, + {"$stable", &PyArray_OptionalBoolConverter, &stable} + // {"$descending", &PyArray_OptionalBoolConverter, &descending} + ) < 0) { return NULL; } @@ -1359,11 +1352,10 @@ array_partition(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("partition", args, len_args, kwnames, - "kth", NULL, &kthobj, - "|axis", &PyArray_PythonPyIntFromInt, &axis, - "|kind", &PyArray_SelectkindConverter, &sortkind, - "|order", NULL, &order, - NULL, NULL, NULL) < 0) { + {"kth", NULL, &kthobj}, + {"|axis", &PyArray_PythonPyIntFromInt, &axis}, + {"|kind", &PyArray_SelectkindConverter, &sortkind}, + {"|order", NULL, &order}) < 0) { return NULL; } @@ -1433,12 +1425,13 @@ array_argsort(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argsort", args, len_args, kwnames, - "|axis", &PyArray_AxisConverter, &axis, - "|kind", &PyArray_SortkindConverter, &sortkind, - "|order", NULL, &order, - "$stable", &PyArray_OptionalBoolConverter, &stable, -// "$descending", &PyArray_OptionalBoolConverter, &descending, - NULL, NULL, NULL) < 0) { + {"|axis", &PyArray_AxisConverter, &axis}, + {"|kind", &PyArray_SortkindConverter, &sortkind}, + {"|order", NULL, &order}, + {"$stable", &PyArray_OptionalBoolConverter, &stable} + // TODO: add descending sorts, gh-14728 + // {"$descending", &PyArray_OptionalBoolConverter, &descending} + ) < 0) { return NULL; } @@ -1515,11 +1508,10 @@ array_argpartition(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("argpartition", args, len_args, kwnames, - "kth", NULL, &kthobj, - "|axis", &PyArray_AxisConverter, &axis, - "|kind", &PyArray_SelectkindConverter, &sortkind, - "|order", NULL, &order, - NULL, NULL, NULL) < 0) { + {"kth", NULL, &kthobj}, + {"|axis", &PyArray_AxisConverter, &axis}, + {"|kind", &PyArray_SelectkindConverter, &sortkind}, + {"|order", NULL, &order}) < 0) { return NULL; } if (order == Py_None) { @@ -1580,10 +1572,9 @@ array_searchsorted(PyArrayObject *self, sorter = NULL; if (npy_parse_arguments("searchsorted", args, len_args, kwnames, - "v", NULL, &keys, - "|side", &PyArray_SearchsideConverter, &side, - "|sorter", NULL, &sorter, - NULL, NULL, NULL) < 0) { + {"v", NULL, &keys}, + {"|side", &PyArray_SearchsideConverter, &side}, + {"|sorter", NULL, &sorter}) < 0) { return NULL; } if (sorter == Py_None) { @@ -2444,9 +2435,8 @@ array_dot(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dot", args, len_args, kwnames, - "b", NULL, &b, - "|out", NULL, &o, - NULL, NULL, NULL) < 0) { + {"b", NULL, &b}, + {"|out", NULL, &o}) < 0) { return NULL; } @@ -2542,12 +2532,11 @@ array_trace(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("trace", args, len_args, kwnames, - "|offset", &PyArray_PythonPyIntFromInt, &offset, - "|axis1", &PyArray_PythonPyIntFromInt, &axis1, - "|axis2", &PyArray_PythonPyIntFromInt, &axis2, - "|dtype", &PyArray_DescrConverter2, &dtype, - "|out", &PyArray_OutputConverter, &out, - NULL, NULL, NULL) < 0) { + {"|offset", &PyArray_PythonPyIntFromInt, &offset}, + {"|axis1", &PyArray_PythonPyIntFromInt, &axis1}, + {"|axis2", &PyArray_PythonPyIntFromInt, &axis2}, + {"|dtype", &PyArray_DescrConverter2, &dtype}, + {"|out", &PyArray_OutputConverter, &out}) < 0) { Py_XDECREF(dtype); return NULL; } @@ -2616,8 +2605,7 @@ array_flatten(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("flatten", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } return PyArray_Flatten(self, order); @@ -2632,8 +2620,7 @@ array_ravel(PyArrayObject *self, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("ravel", args, len_args, kwnames, - "|order", PyArray_OrderConverter, &order, - NULL, NULL, NULL) < 0) { + {"|order", PyArray_OrderConverter, &order}) < 0) { return NULL; } return PyArray_Ravel(self, order); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index a451e0099675..f05cdc38418d 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1459,10 +1459,9 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *const *args, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("putmask", args, len_args, kwnames, - "", NULL, &array, - "mask", NULL, &mask, - "values", NULL, &values, - NULL, NULL, NULL) < 0) { + {"", NULL, &array}, + {"mask", NULL, &mask}, + {"values", NULL, &values}) < 0) { return NULL; } if (!PyArray_Check(array)) { @@ -1734,15 +1733,14 @@ array_array(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("array", args, len_args, kwnames, - "object", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$copy", &PyArray_CopyConverter, ©, - "$order", &PyArray_OrderConverter, &order, - "$subok", &PyArray_BoolConverter, &subok, - "$ndmin", &PyArray_PythonPyIntFromInt, &ndmin, - "$ndmax", &PyArray_PythonPyIntFromInt, &ndmax, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"object", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$order", &PyArray_OrderConverter, &order}, + {"$subok", &PyArray_BoolConverter, &subok}, + {"$ndmin", &PyArray_PythonPyIntFromInt, &ndmin}, + {"$ndmax", &PyArray_PythonPyIntFromInt, &ndmax}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1790,13 +1788,12 @@ array_asarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$copy", &PyArray_CopyConverter, ©, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1836,13 +1833,12 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asanyarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$copy", &PyArray_CopyConverter, ©, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$copy", &PyArray_CopyConverter, ©}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1880,10 +1876,9 @@ array_ascontiguousarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("ascontiguousarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1922,10 +1917,9 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), if (len_args != 1 || (kwnames != NULL)) { if (npy_parse_arguments("asfortranarray", args, len_args, kwnames, - "a", NULL, &op, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"a", NULL, &op}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return NULL; @@ -1963,11 +1957,10 @@ array_copyto(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("copyto", args, len_args, kwnames, - "dst", NULL, &dst_obj, - "src", NULL, &src_obj, - "|casting", &PyArray_CastingConverter, &casting, - "|where", NULL, &wheremask_in, - NULL, NULL, NULL) < 0) { + {"dst", NULL, &dst_obj}, + {"src", NULL, &src_obj}, + {"|casting", &PyArray_CastingConverter, &casting}, + {"|where", NULL, &wheremask_in}) < 0) { goto fail; } @@ -2046,12 +2039,11 @@ array_empty(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("empty", args, len_args, kwnames, - "shape", &PyArray_IntpConverter, &shape, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"shape", &PyArray_IntpConverter, &shape}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { goto fail; } @@ -2105,13 +2097,12 @@ array_empty_like(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("empty_like", args, len_args, kwnames, - "prototype", &PyArray_Converter, &prototype, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "|subok", &PyArray_PythonPyIntFromInt, &subok, - "|shape", &PyArray_OptionalIntpConverter, &shape, - "$device", &PyArray_DeviceConverterOptional, &device, - NULL, NULL, NULL) < 0) { + {"prototype", &PyArray_Converter, &prototype}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"|subok", &PyArray_PythonPyIntFromInt, &subok}, + {"|shape", &PyArray_OptionalIntpConverter, &shape}, + {"$device", &PyArray_DeviceConverterOptional, &device}) < 0) { goto fail; } /* steals the reference to dt_info.descr if it's not NULL */ @@ -2251,12 +2242,11 @@ array_zeros(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("zeros", args, len_args, kwnames, - "shape", &PyArray_IntpConverter, &shape, - "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, - "|order", &PyArray_OrderConverter, &order, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"shape", &PyArray_IntpConverter, &shape}, + {"|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info}, + {"|order", &PyArray_OrderConverter, &order}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { goto finish; } @@ -2303,8 +2293,7 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_ NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("count_nonzero", args, len_args, NULL, - "", PyArray_Converter, &array, - NULL, NULL, NULL) < 0) { + {"", PyArray_Converter, &array}) < 0) { return NULL; } @@ -2526,12 +2515,11 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("concatenate", args, len_args, kwnames, - "seq", NULL, &a0, - "|axis", &PyArray_AxisConverter, &axis, - "|out", NULL, &out, - "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", &PyArray_CastingConverter, &casting, - NULL, NULL, NULL) < 0) { + {"seq", NULL, &a0}, + {"|axis", &PyArray_AxisConverter, &axis}, + {"|out", NULL, &out}, + {"$dtype", &PyArray_DescrConverter2, &dtype}, + {"$casting", &PyArray_CastingConverter, &casting}) < 0) { return NULL; } if (out != NULL) { @@ -2557,9 +2545,8 @@ array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_ NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("innerproduct", args, len_args, NULL, - "", NULL, &a0, - "", NULL, &b0, - NULL, NULL, NULL) < 0) { + {"", NULL, &a0}, + {"", NULL, &b0}) < 0) { return NULL; } @@ -2575,10 +2562,9 @@ array_matrixproduct(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dot", args, len_args, kwnames, - "a", NULL, &a, - "b", NULL, &v, - "|out", NULL, &o, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a}, + {"b", NULL, &v}, + {"|out", NULL, &o}) < 0) { return NULL; } if (o != NULL) { @@ -2610,9 +2596,8 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("vdot", args, len_args, NULL, - "", NULL, &op1, - "", NULL, &op2, - NULL, NULL, NULL) < 0) { + {"", NULL, &op1}, + {"", NULL, &op2}) < 0) { return NULL; } @@ -3002,11 +2987,10 @@ array_einsum(PyObject *NPY_UNUSED(dummy), /* Get the keyword arguments */ if (kwnames != NULL) { if (npy_parse_arguments("einsum", args+nargs, 0, kwnames, - "$out", NULL, &out_obj, - "$order", &PyArray_OrderConverter, &order, - "$casting", &PyArray_CastingConverter, &casting, - "$dtype", &PyArray_DescrConverter2, &dtype, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$order", &PyArray_OrderConverter, &order}, + {"$casting", &PyArray_CastingConverter, &casting}, + {"$dtype", &PyArray_DescrConverter2, &dtype}) < 0) { goto finish; } if (out_obj != NULL && !PyArray_Check(out_obj)) { @@ -3048,10 +3032,9 @@ array_correlate(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("correlate", args, len_args, kwnames, - "a", NULL, &a0, - "v", NULL, &shape, - "|mode", &PyArray_CorrelatemodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a0}, + {"v", NULL, &shape}, + {"|mode", &PyArray_CorrelatemodeConverter, &mode}) < 0) { return NULL; } return PyArray_Correlate(a0, shape, mode); @@ -3066,10 +3049,9 @@ array_correlate2(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("correlate2", args, len_args, kwnames, - "a", NULL, &a0, - "v", NULL, &shape, - "|mode", &PyArray_CorrelatemodeConverter, &mode, - NULL, NULL, NULL) < 0) { + {"a", NULL, &a0}, + {"v", NULL, &shape}, + {"|mode", &PyArray_CorrelatemodeConverter, &mode}) < 0) { return NULL; } return PyArray_Correlate2(a0, shape, mode); @@ -3086,13 +3068,12 @@ array_arange(PyObject *NPY_UNUSED(ignored), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("arange", args, len_args, kwnames, - "|start", NULL, &o_start, - "|stop", NULL, &o_stop, - "|step", NULL, &o_step, - "|dtype", &PyArray_DescrConverter2, &typecode, - "$device", &PyArray_DeviceConverterOptional, &device, - "$like", NULL, &like, - NULL, NULL, NULL) < 0) { + {"|start", NULL, &o_start}, + {"|stop", NULL, &o_stop}, + {"|step", NULL, &o_step}, + {"|dtype", &PyArray_DescrConverter2, &typecode}, + {"$device", &PyArray_DeviceConverterOptional, &device}, + {"$like", NULL, &like}) < 0) { Py_XDECREF(typecode); return NULL; } @@ -3482,10 +3463,9 @@ array_where(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("where", args, len_args, NULL, - "", NULL, &obj, - "|x", NULL, &x, - "|y", NULL, &y, - NULL, NULL, NULL) < 0) { + {"", NULL, &obj}, + {"|x", NULL, &x}, + {"|y", NULL, &y}) < 0) { return NULL; } @@ -3501,9 +3481,8 @@ array_lexsort(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t l NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("lexsort", args, len_args, kwnames, - "keys", NULL, &obj, - "|axis", PyArray_PythonPyIntFromInt, &axis, - NULL, NULL, NULL) < 0) { + {"keys", NULL, &obj}, + {"|axis", PyArray_PythonPyIntFromInt, &axis}) < 0) { return NULL; } return PyArray_Return((PyArrayObject *)PyArray_LexSort(obj, axis)); @@ -3522,10 +3501,9 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("can_cast", args, len_args, kwnames, - "from_", NULL, &from_obj, - "to", &PyArray_DescrConverter2, &d2, - "|casting", &PyArray_CastingConverter, &casting, - NULL, NULL, NULL) < 0) { + {"from_", NULL, &from_obj}, + {"to", &PyArray_DescrConverter2, &d2}, + {"|casting", &PyArray_CastingConverter, &casting}) < 0) { goto finish; } if (d2 == NULL) { @@ -3592,9 +3570,8 @@ array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("promote_types", args, len_args, NULL, - "", PyArray_DescrConverter2, &d1, - "", PyArray_DescrConverter2, &d2, - NULL, NULL, NULL) < 0) { + {"", PyArray_DescrConverter2, &d1}, + {"", PyArray_DescrConverter2, &d2}) < 0) { goto finish; } @@ -3774,15 +3751,14 @@ dragon4_scientific(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dragon4_scientific", args, len_args, kwnames, - "x", NULL , &obj, - "|precision", &PyArray_PythonPyIntFromInt, &precision, - "|unique", &PyArray_PythonPyIntFromInt, &unique, - "|sign", &PyArray_PythonPyIntFromInt, &sign, - "|trim", &trimmode_converter, &trim, - "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, - "|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits, - "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, - NULL, NULL, NULL) < 0) { + {"x", NULL, &obj}, + {"|precision", &PyArray_PythonPyIntFromInt, &precision}, + {"|unique", &PyArray_PythonPyIntFromInt, &unique}, + {"|sign", &PyArray_PythonPyIntFromInt, &sign}, + {"|trim", &trimmode_converter, &trim}, + {"|pad_left", &PyArray_PythonPyIntFromInt, &pad_left}, + {"|exp_digits", &PyArray_PythonPyIntFromInt, &exp_digits}, + {"|min_digits", &PyArray_PythonPyIntFromInt, &min_digits}) < 0) { return NULL; } @@ -3817,16 +3793,15 @@ dragon4_positional(PyObject *NPY_UNUSED(dummy), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("dragon4_positional", args, len_args, kwnames, - "x", NULL , &obj, - "|precision", &PyArray_PythonPyIntFromInt, &precision, - "|unique", &PyArray_PythonPyIntFromInt, &unique, - "|fractional", &PyArray_PythonPyIntFromInt, &fractional, - "|sign", &PyArray_PythonPyIntFromInt, &sign, - "|trim", &trimmode_converter, &trim, - "|pad_left", &PyArray_PythonPyIntFromInt, &pad_left, - "|pad_right", &PyArray_PythonPyIntFromInt, &pad_right, - "|min_digits", &PyArray_PythonPyIntFromInt, &min_digits, - NULL, NULL, NULL) < 0) { + {"x", NULL, &obj}, + {"|precision", &PyArray_PythonPyIntFromInt, &precision}, + {"|unique", &PyArray_PythonPyIntFromInt, &unique}, + {"|fractional", &PyArray_PythonPyIntFromInt, &fractional}, + {"|sign", &PyArray_PythonPyIntFromInt, &sign}, + {"|trim", &trimmode_converter, &trim}, + {"|pad_left", &PyArray_PythonPyIntFromInt, &pad_left}, + {"|pad_right", &PyArray_PythonPyIntFromInt, &pad_right}, + {"|min_digits", &PyArray_PythonPyIntFromInt, &min_digits}) < 0) { return NULL; } @@ -4354,10 +4329,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("normalize_axis_index", args, len_args, kwnames, - "axis", &PyArray_PythonPyIntFromInt, &axis, - "ndim", &PyArray_PythonPyIntFromInt, &ndim, - "|msg_prefix", NULL, &msg_prefix, - NULL, NULL, NULL) < 0) { + {"axis", &PyArray_PythonPyIntFromInt, &axis}, + {"ndim", &PyArray_PythonPyIntFromInt, &ndim}, + {"|msg_prefix", NULL, &msg_prefix}) < 0) { return NULL; } if (check_and_adjust_axis_msg(&axis, ndim, msg_prefix) < 0) { diff --git a/numpy/_core/src/multiarray/textreading/readtext.c b/numpy/_core/src/multiarray/textreading/readtext.c index 4df2446302d6..614b5dfc6608 100644 --- a/numpy/_core/src/multiarray/textreading/readtext.c +++ b/numpy/_core/src/multiarray/textreading/readtext.c @@ -208,21 +208,20 @@ _load_from_filelike(PyObject *NPY_UNUSED(mod), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_load_from_filelike", args, len_args, kwnames, - "file", NULL, &file, - "|delimiter", &parse_control_character, &pc.delimiter, - "|comment", &parse_control_character, &pc.comment, - "|quote", &parse_control_character, &pc.quote, - "|imaginary_unit", &parse_control_character, &pc.imaginary_unit, - "|usecols", NULL, &usecols_obj, - "|skiplines", &PyArray_IntpFromPyIntConverter, &skiplines, - "|max_rows", &PyArray_IntpFromPyIntConverter, &max_rows, - "|converters", NULL, &converters, - "|dtype", NULL, &dtype, - "|encoding", NULL, &encoding_obj, - "|filelike", &PyArray_BoolConverter, &filelike, - "|byte_converters", &PyArray_BoolConverter, &pc.python_byte_converters, - "|c_byte_converters", PyArray_BoolConverter, &pc.c_byte_converters, - NULL, NULL, NULL) < 0) { + {"file", NULL, &file}, + {"|delimiter", &parse_control_character, &pc.delimiter}, + {"|comment", &parse_control_character, &pc.comment}, + {"|quote", &parse_control_character, &pc.quote}, + {"|imaginary_unit", &parse_control_character, &pc.imaginary_unit}, + {"|usecols", NULL, &usecols_obj}, + {"|skiplines", &PyArray_IntpFromPyIntConverter, &skiplines}, + {"|max_rows", &PyArray_IntpFromPyIntConverter, &max_rows}, + {"|converters", NULL, &converters}, + {"|dtype", NULL, &dtype}, + {"|encoding", NULL, &encoding_obj}, + {"|filelike", &PyArray_BoolConverter, &filelike}, + {"|byte_converters", &PyArray_BoolConverter, &pc.python_byte_converters}, + {"|c_byte_converters", PyArray_BoolConverter, &pc.c_byte_converters}) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/unique.cpp b/numpy/_core/src/multiarray/unique.cpp index a8b897446182..c3028d183555 100644 --- a/numpy/_core/src/multiarray/unique.cpp +++ b/numpy/_core/src/multiarray/unique.cpp @@ -494,11 +494,8 @@ array__unique_hash(PyObject *NPY_UNUSED(module), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_unique_hash", args, len_args, kwnames, - "arr", &PyArray_Converter, &arr, - "|equal_nan", &PyArray_BoolConverter, &equal_nan, - NULL, NULL, NULL - ) < 0 - ) { + {"arr", (void *)&PyArray_Converter, &arr}, + {"|equal_nan", (void *)&PyArray_BoolConverter, &equal_nan}) < 0) { Py_XDECREF(arr); return NULL; } diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index 77a76873d20f..cf3f517b4c6a 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -213,14 +213,13 @@ extobj_make_extobj(PyObject *NPY_UNUSED(mod), NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("_seterrobj", args, len_args, kwnames, - "$all", &errmodeconverter, &all_mode, - "$divide", &errmodeconverter, ÷_mode, - "$over", &errmodeconverter, &over_mode, - "$under", &errmodeconverter, &under_mode, - "$invalid", &errmodeconverter, &invalid_mode, - "$bufsize", &PyArray_IntpFromPyIntConverter, &bufsize, - "$call", NULL, &pyfunc, - NULL, NULL, NULL) < 0) { + {"$all", &errmodeconverter, &all_mode}, + {"$divide", &errmodeconverter, ÷_mode}, + {"$over", &errmodeconverter, &over_mode}, + {"$under", &errmodeconverter, &under_mode}, + {"$invalid", &errmodeconverter, &invalid_mode}, + {"$bufsize", &PyArray_IntpFromPyIntConverter, &bufsize}, + {"$call", NULL, &pyfunc}) < 0) { return NULL; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 9c0dce55d1bf..b587fcc7009b 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -3559,12 +3559,11 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("reduceat", args, len_args, kwnames, - "array", NULL, &op, - "indices", NULL, &indices_obj, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"indices", NULL, &indices_obj}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}) < 0) { goto fail; } /* Prepare inputs for PyUfunc_CheckOverride */ @@ -3579,11 +3578,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("accumulate", args, len_args, kwnames, - "array", NULL, &op, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}) < 0) { goto fail; } /* Prepare input for PyUfunc_CheckOverride */ @@ -3597,14 +3595,13 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments("reduce", args, len_args, kwnames, - "array", NULL, &op, - "|axis", NULL, &axes_obj, - "|dtype", NULL, &otype_obj, - "|out", NULL, &out_obj, - "|keepdims", NULL, &keepdims_obj, - "|initial", &_not_NoValue, &initial, - "|where", NULL, &wheremask_obj, - NULL, NULL, NULL) < 0) { + {"array", NULL, &op}, + {"|axis", NULL, &axes_obj}, + {"|dtype", NULL, &otype_obj}, + {"|out", NULL, &out_obj}, + {"|keepdims", NULL, &keepdims_obj}, + {"|initial", &_not_NoValue, &initial}, + {"|where", NULL, &wheremask_obj}) < 0) { goto fail; } /* Prepare input for PyUfunc_CheckOverride */ @@ -4558,15 +4555,14 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, - "$out", NULL, &out_obj, - "$where", NULL, &where_obj, - "$casting", NULL, &casting_obj, - "$order", NULL, &order_obj, - "$subok", NULL, &subok_obj, - "$dtype", NULL, &dtype_obj, - "$signature", NULL, &signature_obj, - "$sig", NULL, &sig_obj, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$where", NULL, &where_obj}, + {"$casting", NULL, &casting_obj}, + {"$order", NULL, &order_obj}, + {"$subok", NULL, &subok_obj}, + {"$dtype", NULL, &dtype_obj}, + {"$signature", NULL, &signature_obj}, + {"$sig", NULL, &sig_obj}) < 0) { goto fail; } } @@ -4574,17 +4570,16 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, NPY_PREPARE_ARGPARSER; if (npy_parse_arguments(ufunc->name, args + len_args, 0, kwnames, - "$out", NULL, &out_obj, - "$axes", NULL, &axes_obj, - "$axis", NULL, &axis_obj, - "$keepdims", NULL, &keepdims_obj, - "$casting", NULL, &casting_obj, - "$order", NULL, &order_obj, - "$subok", NULL, &subok_obj, - "$dtype", NULL, &dtype_obj, - "$signature", NULL, &signature_obj, - "$sig", NULL, &sig_obj, - NULL, NULL, NULL) < 0) { + {"$out", NULL, &out_obj}, + {"$axes", NULL, &axes_obj}, + {"$axis", NULL, &axis_obj}, + {"$keepdims", NULL, &keepdims_obj}, + {"$casting", NULL, &casting_obj}, + {"$order", NULL, &order_obj}, + {"$subok", NULL, &subok_obj}, + {"$dtype", NULL, &dtype_obj}, + {"$signature", NULL, &signature_obj}, + {"$sig", NULL, &sig_obj}) < 0) { goto fail; } if (NPY_UNLIKELY((axes_obj != NULL) && (axis_obj != NULL))) { @@ -6223,11 +6218,10 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, npy_bool reduction = NPY_FALSE; if (npy_parse_arguments("resolve_dtypes", args, len_args, kwnames, - "", NULL, &descrs_tuple, - "$signature", NULL, &signature_obj, - "$casting", &PyArray_CastingConverter, &casting, - "$reduction", &PyArray_BoolConverter, &reduction, - NULL, NULL, NULL) < 0) { + {"", NULL, &descrs_tuple}, + {"$signature", NULL, &signature_obj}, + {"$casting", &PyArray_CastingConverter, &casting}, + {"$reduction", &PyArray_BoolConverter, &reduction}) < 0) { return NULL; } @@ -6479,9 +6473,8 @@ py_get_strided_loop(PyUFuncObject *ufunc, npy_intp fixed_strides[NPY_MAXARGS]; if (npy_parse_arguments("_get_strided_loop", args, len_args, kwnames, - "", NULL, &call_info_obj, - "$fixed_strides", NULL, &fixed_strides_obj, - NULL, NULL, NULL) < 0) { + {"", NULL, &call_info_obj}, + {"$fixed_strides", NULL, &fixed_strides_obj}) < 0) { return NULL; } From 3ececb376ce9bdeb18b234d5f1a5fe1b3e7b7507 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 9 Mar 2026 11:46:23 -0600 Subject: [PATCH 1481/1718] MAINT: Update main after 2.4.3 release. - Forward port 2.4.3-changelog.rst - Forward port 2.4.3-notes.rst - Update release.rst [skip azp] [skip actions] [skip cirrus] --- doc/changelog/2.4.3-changelog.rst | 39 ++++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.4.3-notes.rst | 52 ++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 doc/changelog/2.4.3-changelog.rst create mode 100644 doc/source/release/2.4.3-notes.rst diff --git a/doc/changelog/2.4.3-changelog.rst b/doc/changelog/2.4.3-changelog.rst new file mode 100644 index 000000000000..c927575ce807 --- /dev/null +++ b/doc/changelog/2.4.3-changelog.rst @@ -0,0 +1,39 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Antareep Sarkar + +* Charles Harris +* Joren Hammudoglu +* Matthieu Darbois +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Sebastian Berg +* Warren Weckesser +* stratakis + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#30759 `__: MAINT: Prepare 2.4.x for further development +* `#30827 `__: BUG: Fix some leaks found via LeakSanitizer (#30756) +* `#30841 `__: MAINT: Synchronize 2.4.x submodules with main +* `#30849 `__: TYP: ``matlib``\ : missing extended precision imports +* `#30850 `__: BUG: Fix weak hash function in np.isin(). (#30840) +* `#30921 `__: BUG: fix infinite recursion in np.ma.flatten_structured_array... +* `#30922 `__: BUG: Fix buffer overrun in CPU baseline validation (#30877) +* `#30923 `__: BUG: Fix busdaycalendar's handling of a bool array weekmask.... +* `#30924 `__: BUG: Fix reference leaks and NULL pointer dereferences (#30908) +* `#30925 `__: MAINT: fix two minor issues noticed when touching the C API setup +* `#30955 `__: ENH: Test .kind not .char in np.testing.assert_equal (#30879) +* `#30957 `__: BUG: fix type issues in uses if PyDataType macros +* `#30958 `__: MAINT: Don't use vulture 2.15, it has false positives +* `#30973 `__: MAINT: update openblas (#30961) + diff --git a/doc/source/release.rst b/doc/source/release.rst index c0207894ac02..7ce0933d73e9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.5.0 + 2.4.3 2.4.2 2.4.1 2.4.0 diff --git a/doc/source/release/2.4.3-notes.rst b/doc/source/release/2.4.3-notes.rst new file mode 100644 index 000000000000..09986140c2a9 --- /dev/null +++ b/doc/source/release/2.4.3-notes.rst @@ -0,0 +1,52 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.3 Release Notes +========================= + +The NumPy 2.4.3 is a patch release that fixes bugs discovered after the +2.4.2 release. The most user visible fix may be a threading fix for +OpenBLAS on ARM, closing issue #30816. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Antareep Sarkar + +* Charles Harris +* Joren Hammudoglu +* Matthieu Darbois +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Sebastian Berg +* Warren Weckesser +* stratakis + + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#30759 `__: MAINT: Prepare 2.4.x for further development +* `#30827 `__: BUG: Fix some leaks found via LeakSanitizer (#30756) +* `#30841 `__: MAINT: Synchronize 2.4.x submodules with main +* `#30849 `__: TYP: ``matlib``\ : missing extended precision imports +* `#30850 `__: BUG: Fix weak hash function in np.isin(). (#30840) +* `#30921 `__: BUG: fix infinite recursion in np.ma.flatten_structured_array... +* `#30922 `__: BUG: Fix buffer overrun in CPU baseline validation (#30877) +* `#30923 `__: BUG: Fix busdaycalendar's handling of a bool array weekmask.... +* `#30924 `__: BUG: Fix reference leaks and NULL pointer dereferences (#30908) +* `#30925 `__: MAINT: fix two minor issues noticed when touching the C API setup +* `#30955 `__: ENH: Test .kind not .char in np.testing.assert_equal (#30879) +* `#30957 `__: BUG: fix type issues in uses if PyDataType macros +* `#30958 `__: MAINT: Don't use vulture 2.15, it has false positives +* `#30973 `__: MAINT: update openblas (#30961) + From c0942583ec3fa203fee7291607721fbff3402d35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:27:47 +0000 Subject: [PATCH 1482/1718] MAINT: Bump astral-sh/setup-uv from 7.2.0 to 7.3.1 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.2.0 to 7.3.1. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/61cb8a9741eeb8a550a1b8544337180c0fc8476b...5a095e7a2014a4212f075830d4f7277575a9d098) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.3.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/stubtest.yml | 2 +- .github/workflows/typecheck.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 1d2447d6023c..93caa3b1c526 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 with: python-version: ${{ matrix.py }} activate-environment: true diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index 43fdc70073f8..f9e909a7b8ce 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0 + - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 with: activate-environment: true - name: Install dependencies From 3d3484d89299a91ecb48f8ce0e983f4e17f1e569 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:29:30 +0000 Subject: [PATCH 1483/1718] MAINT: Bump egor-tensin/cleanup-path from 4.0.2 to 4.0.3 Bumps [egor-tensin/cleanup-path](https://github.com/egor-tensin/cleanup-path) from 4.0.2 to 4.0.3. - [Release notes](https://github.com/egor-tensin/cleanup-path/releases) - [Commits](https://github.com/egor-tensin/cleanup-path/compare/64ef0b5036b30ce7845058a1d7a8d0830db39b94...cf0901d753db0bf4d15baf625a6fa537978b03a9) --- updated-dependencies: - dependency-name: egor-tensin/cleanup-path dependency-version: 4.0.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 594af15a88a1..641bfce85936 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -38,7 +38,7 @@ jobs: python-setuptools-wheel liblapack-devel liblapack0 gcc-fortran gcc-g++ git dash cmake ninja - name: Set Windows PATH - uses: egor-tensin/cleanup-path@64ef0b5036b30ce7845058a1d7a8d0830db39b94 # v4.0.2 + uses: egor-tensin/cleanup-path@cf0901d753db0bf4d15baf625a6fa537978b03a9 # v4.0.3 with: dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack' - name: Verify that bash is Cygwin bash From 1308858eafc2520da8a552145514539df091383d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Mar 2026 19:29:36 +0000 Subject: [PATCH 1484/1718] MAINT: Bump int128/hide-comment-action from 1.50.0 to 1.53.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.50.0 to 1.53.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/a218e276fb47d0d526ee989fe02e935a5095417b...c0b5ed56339ed2285f922a5da4444b55270c43f3) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.53.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 7a83cdb53d88..8541265e4a21 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@a218e276fb47d0d526ee989fe02e935a5095417b # v1.50.0 + uses: int128/hide-comment-action@c0b5ed56339ed2285f922a5da4444b55270c43f3 # v1.53.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 8f8bbcb57a69fc488a91612c362cc08dea959f7f Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Tue, 10 Mar 2026 07:01:55 +0000 Subject: [PATCH 1485/1718] BUG: f2py: restore .r/.i field access on complex types via union typedef (#30983) --- numpy/f2py/cfuncs.py | 54 +++++++++---------- .../src/regression/complex_struct_compat.f90 | 8 +++ .../src/regression/complex_struct_compat.pyf | 12 +++++ numpy/f2py/tests/test_regression.py | 17 ++++++ 4 files changed, 64 insertions(+), 27 deletions(-) create mode 100644 numpy/f2py/tests/src/regression/complex_struct_compat.f90 create mode 100644 numpy/f2py/tests/src/regression/complex_struct_compat.pyf diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 63a7ff5740cb..7a540c6ccd2a 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -89,9 +89,9 @@ def errmess(s: str) -> None: typedef long double long_double; #endif """ -typedefs['complex_long_double'] = 'typedef npy_clongdouble complex_long_double;' -typedefs['complex_float'] = 'typedef npy_cfloat complex_float;' -typedefs['complex_double'] = 'typedef npy_cdouble complex_double;' +typedefs['complex_long_double'] = 'typedef union { struct {long double r,i;}; npy_clongdouble _npy; } complex_long_double;' +typedefs['complex_float'] = 'typedef union { struct {float r,i;}; npy_cfloat _npy; } complex_float;' +typedefs['complex_double'] = 'typedef union { struct {double r,i;}; npy_cdouble _npy; } complex_double;' typedefs['string'] = """typedef char * string;""" typedefs['character'] = """typedef char character;""" @@ -289,13 +289,13 @@ def errmess(s: str) -> None: #define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" needs['pyobj_from_complex_long_double1'] = ['complex_long_double', 'npy_math.h'] cppmacros['pyobj_from_complex_long_double1'] = """ -#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles((double)npy_creall(v),(double)npy_cimagl(v)))""" +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles((double)npy_creall(v._npy),(double)npy_cimagl(v._npy)))""" needs['pyobj_from_complex_double1'] = ['complex_double', 'npy_math.h'] cppmacros['pyobj_from_complex_double1'] = """ -#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(npy_creal(v),npy_cimag(v)))""" +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(npy_creal(v._npy),npy_cimag(v._npy)))""" needs['pyobj_from_complex_float1'] = ['complex_float', 'npy_math.h'] cppmacros['pyobj_from_complex_float1'] = """ -#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles((double)npy_crealf(v),(double)npy_cimagf(v)))""" +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles((double)npy_crealf(v._npy),(double)npy_cimagf(v._npy)))""" needs['pyobj_from_string1'] = ['string'] cppmacros['pyobj_from_string1'] = """ #define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" @@ -1146,7 +1146,7 @@ def errmess(s: str) -> None: static int complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) { - complex_double cd = npy_cpack(0.0, 0.0); + complex_double cd = {.r=0, .i=0}; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, CLongDouble) { PyArray_ScalarAsCtype(obj, v); @@ -1156,15 +1156,15 @@ def errmess(s: str) -> None: PyArrayObject *arr = (PyArrayObject *)obj; if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { npy_clongdouble tmp = *(npy_clongdouble *)PyArray_DATA(arr); - npy_csetreall(v, npy_creall(tmp)); - npy_csetimagl(v, npy_cimagl(tmp)); + npy_csetreall(&v->_npy, npy_creall(tmp)); + npy_csetimagl(&v->_npy, npy_cimagl(tmp)); return 1; } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { - npy_csetreall(v, (long_double)npy_creal(cd)); - npy_csetimagl(v, (long_double)npy_cimag(cd)); + npy_csetreall(&v->_npy, (long_double)npy_creal(cd._npy)); + npy_csetimagl(&v->_npy, (long_double)npy_cimag(cd._npy)); return 1; } return 0; @@ -1179,22 +1179,22 @@ def errmess(s: str) -> None: Py_complex c; if (PyComplex_Check(obj)) { c = PyComplex_AsCComplex(obj); - npy_csetreal(v, c.real); - npy_csetimag(v, c.imag); + npy_csetreal(&v->_npy, c.real); + npy_csetimag(&v->_npy, c.imag); return 1; } if (PyArray_IsScalar(obj, ComplexFloating)) { if (PyArray_IsScalar(obj, CFloat)) { npy_cfloat tmp; PyArray_ScalarAsCtype(obj, &tmp); - npy_csetreal(v, (double)npy_crealf(tmp)); - npy_csetimag(v, (double)npy_cimagf(tmp)); + npy_csetreal(&v->_npy, (double)npy_crealf(tmp)); + npy_csetimag(&v->_npy, (double)npy_cimagf(tmp)); } else if (PyArray_IsScalar(obj, CLongDouble)) { npy_clongdouble tmp; PyArray_ScalarAsCtype(obj, &tmp); - npy_csetreal(v, (double)npy_creall(tmp)); - npy_csetimag(v, (double)npy_cimagl(tmp)); + npy_csetreal(&v->_npy, (double)npy_creall(tmp)); + npy_csetimag(&v->_npy, (double)npy_cimagl(tmp)); } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); @@ -1213,20 +1213,20 @@ def errmess(s: str) -> None: return 0; } npy_cdouble tmp = *(npy_cdouble *)PyArray_DATA(arr); - npy_csetreal(v, npy_creal(tmp)); - npy_csetimag(v, npy_cimag(tmp)); + npy_csetreal(&v->_npy, npy_creal(tmp)); + npy_csetimag(&v->_npy, npy_cimag(tmp)); Py_DECREF(arr); return 1; } /* Python does not provide PyNumber_Complex function :-( */ - npy_csetimag(v, 0.0); + npy_csetimag(&v->_npy, 0.0); if (PyFloat_Check(obj)) { - npy_csetreal(v, PyFloat_AsDouble(obj)); - return !(npy_creal(*v) == -1.0 && PyErr_Occurred()); + npy_csetreal(&v->_npy, PyFloat_AsDouble(obj)); + return !(npy_creal(v->_npy) == -1.0 && PyErr_Occurred()); } if (PyLong_Check(obj)) { - npy_csetreal(v, PyLong_AsDouble(obj)); - return !(npy_creal(*v) == -1.0 && PyErr_Occurred()); + npy_csetreal(&v->_npy, PyLong_AsDouble(obj)); + return !(npy_creal(v->_npy) == -1.0 && PyErr_Occurred()); } if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { PyObject *tmp = PySequence_GetItem(obj,0); @@ -1255,10 +1255,10 @@ def errmess(s: str) -> None: static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { - complex_double cd = npy_cpack(0.0, 0.0); + complex_double cd = {.r=0, .i=0}; if (complex_double_from_pyobj(&cd,obj,errmess)) { - npy_csetrealf(v, (float)npy_creal(cd)); - npy_csetimagf(v, (float)npy_cimag(cd)); + npy_csetrealf(&v->_npy, (float)npy_creal(cd._npy)); + npy_csetimagf(&v->_npy, (float)npy_cimag(cd._npy)); return 1; } return 0; diff --git a/numpy/f2py/tests/src/regression/complex_struct_compat.f90 b/numpy/f2py/tests/src/regression/complex_struct_compat.f90 new file mode 100644 index 000000000000..3e673ee3a4d7 --- /dev/null +++ b/numpy/f2py/tests/src/regression/complex_struct_compat.f90 @@ -0,0 +1,8 @@ + subroutine zero_imag(c, n) + complex*16, intent(inout) :: c(n) + integer, intent(in) :: n + integer :: k + do k = 1, n + c(k) = cmplx(dble(c(k)), 0.0d0, kind=8) + end do + end subroutine diff --git a/numpy/f2py/tests/src/regression/complex_struct_compat.pyf b/numpy/f2py/tests/src/regression/complex_struct_compat.pyf new file mode 100644 index 000000000000..fd0f4d73b23c --- /dev/null +++ b/numpy/f2py/tests/src/regression/complex_struct_compat.pyf @@ -0,0 +1,12 @@ +python module _complex_struct_compat_test + interface + subroutine zero_imag(c, n) + callstatement { int k; for(k=0;ki = 0.0; } + callprotoargument complex_double*, int* + + complex*16 intent(inout), dimension(n) :: c + integer intent(hide), depend(c) :: n = shape(c,0) + + end subroutine zero_imag + end interface +end python module _complex_struct_compat_test diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index c4636a764914..d6738cdc4552 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -175,6 +175,23 @@ def test_gh25784(): assert "unknown_subroutine_" in str(rerr) +@pytest.mark.slow +class TestComplexStructCompat(util.F2PyTest): + # Check that .r/.i field access works on complex_double pointers in + # callstatements (scipy compatibility, gh-30966 follow-up) + sources = [ + util.getpath("tests", "src", "regression", "complex_struct_compat.pyf"), + util.getpath("tests", "src", "regression", "complex_struct_compat.f90"), + ] + module_name = "_complex_struct_compat_test" + + def test_complex_struct_field_access(self): + c = np.array([1 + 2j, 3 + 4j, 5 + 6j], dtype=np.complex128) + self.module.zero_imag(c) + npt.assert_array_equal(c.imag, [0.0, 0.0, 0.0]) + npt.assert_array_equal(c.real, [1.0, 3.0, 5.0]) + + @pytest.mark.slow class TestAssignmentOnlyModules(util.F2PyTest): # Ensure that variables are exposed without functions or subroutines in a module From b3ec84b019a393f7963da01b7106e259c0de3372 Mon Sep 17 00:00:00 2001 From: Jeff Smith Date: Tue, 10 Mar 2026 14:27:07 +0000 Subject: [PATCH 1486/1718] DOC: note that PR titles should use the commit message prefix convention The commit message prefix table (BUG, ENH, DOC, etc.) is well documented, but there is no mention that PR titles should follow the same convention. In practice this is required by the automated label mapping in .github/pr-prefix-labeler.yml. Add a short note in the "Asking for your changes to be merged" section so contributors know up front. --- doc/source/dev/development_workflow.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index 10b07cc1f437..16e383e125dc 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -269,6 +269,10 @@ function, add a release note to the ``doc/release/upcoming_changes/`` directory, following the instructions and format in the ``doc/release/upcoming_changes/README.rst`` file. +Use the same prefix convention for your pull request title as for commit +messages (e.g., ``BUG:``, ``ENH:``, ``DOC:``). This enables automated labeling +of your PR. + .. _workflow_PR_timeline: From 09f1a4316c7fdd3b2066a1838f96759c00d9dc09 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 8 Mar 2026 16:35:12 +0100 Subject: [PATCH 1487/1718] BUG: Show allocatable arrays in dir() for F2PY modules Add a __dir__ method to PyFortranObject that includes variable names from the defs array (which contains allocatable arrays) alongside the regular dict keys. Previously dir() only showed dict contents, hiding allocatable variables that are handled dynamically via getattr. Closes #27696 --- .../upcoming_changes/30965.improvement.rst | 4 ++ numpy/f2py/src/fortranobject.c | 42 +++++++++++++++++++ numpy/f2py/tests/test_modules.py | 7 ++++ 3 files changed, 53 insertions(+) create mode 100644 doc/release/upcoming_changes/30965.improvement.rst diff --git a/doc/release/upcoming_changes/30965.improvement.rst b/doc/release/upcoming_changes/30965.improvement.rst new file mode 100644 index 000000000000..aae0f8beaa6c --- /dev/null +++ b/doc/release/upcoming_changes/30965.improvement.rst @@ -0,0 +1,4 @@ +``f2py`` modules now show allocatable arrays in ``dir()`` +--------------------------------------------------------- +Allocatable module variables wrapped by ``f2py`` now appear in ``dir()`` +output, matching their accessibility by name. diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index f368b18292d7..d8151db0c4e4 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -576,6 +576,47 @@ fortran_repr(PyFortranObject *fp) return repr; } +static PyObject * +fortran_dir(PyFortranObject *fp, PyObject *Py_UNUSED(args)) +{ + int i; + PyObject *dir_list = PyDict_Keys(fp->dict); + if (dir_list == NULL) { + return NULL; + } + for (i = 0; i < fp->len; i++) { + PyObject *name = PyUnicode_FromString(fp->defs[i].name); + if (name == NULL) { + Py_DECREF(dir_list); + return NULL; + } + int contains = PySequence_Contains(dir_list, name); + if (contains == -1) { + Py_DECREF(name); + Py_DECREF(dir_list); + return NULL; + } + if (!contains) { + if (PyList_Append(dir_list, name) < 0) { + Py_DECREF(name); + Py_DECREF(dir_list); + return NULL; + } + } + Py_DECREF(name); + } + if (PyList_Sort(dir_list) < 0) { + Py_DECREF(dir_list); + return NULL; + } + return dir_list; +} + +static PyMethodDef fortran_methods[] = { + {"__dir__", (PyCFunction)fortran_dir, METH_NOARGS, NULL}, + {NULL, NULL, 0, NULL} +}; + PyTypeObject PyFortran_Type = { PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", .tp_basicsize = sizeof(PyFortranObject), @@ -583,6 +624,7 @@ PyTypeObject PyFortran_Type = { .tp_getattr = (getattrfunc)fortran_getattr, .tp_setattr = (setattrfunc)fortran_setattr, .tp_repr = (reprfunc)fortran_repr, + .tp_methods = fortran_methods, .tp_call = (ternaryfunc)fortran_call, }; diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 36b6060bcfc7..25d9a6778950 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -58,12 +58,19 @@ class TestModuleAndSubroutine(util.F2PyTest): sources = [ util.getpath("tests", "src", "modules", "gh25337", "data.f90"), util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + util.getpath("tests", "src", "regression", "datonly.f90"), ] def test_gh25337(self): self.module.data.set_shift(3) assert "data" in dir(self.module) + def test_allocatable_in_dir(self): + # gh-27696: allocatable arrays should appear in dir() + names = dir(self.module.datonly) + assert "data_array" in names + assert "max_value" in names + @pytest.mark.slow class TestUsedModule(util.F2PyTest): From 3c4972878395e109f697ca1ad0b2b2b9061afc32 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 8 Mar 2026 16:36:06 +0100 Subject: [PATCH 1488/1718] DOC: Consolidate Fortran compiler instructions into F2PY docs Replace detailed Fortran compiler setup tabs (MSVC/Intel/MinGW-w64) in the building docs with a cross-reference to the F2PY Windows documentation. Keeps MSVC-only setup for users who only need C/C++ compilers. Closes #28187 --- doc/source/building/index.rst | 126 +++++++++------------------------- 1 file changed, 31 insertions(+), 95 deletions(-) diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index 2a89fefde4f9..5493c0fd2dd6 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -140,66 +140,26 @@ your system. .. tab-item:: Windows :sync: windows - On Windows, the use of a Fortran compiler is more tricky than on other - platforms, because MSVC does not support Fortran, and gfortran and MSVC - can't be used together. If you don't need to run the ``f2py`` tests, simply - using MSVC is easiest. Otherwise, you will need one of these sets of - compilers: - - 1. MSVC + Intel Fortran (``ifort``) - 2. Intel compilers (``icc``, ``ifort``) - 3. Mingw-w64 compilers (``gcc``, ``g++``, ``gfortran``) - Compared to macOS and Linux, building NumPy on Windows is a little more - difficult, due to the need to set up these compilers. It is not possible to - just call a one-liner on the command prompt as you would on other - platforms. - - First, install Microsoft Visual Studio - the 2019 Community Edition or any - newer version will work (see the + difficult, due to the need to set up compilers. First, install Microsoft + Visual Studio - the 2019 Community Edition or any newer version will work + (see the `Visual Studio download site `__). - This is needed even if you use the MinGW-w64 or Intel compilers, in order - to ensure you have the Windows Universal C Runtime (the other components of - Visual Studio are not needed when using Mingw-w64, and can be deselected if - desired, to save disk space). The recommended version of the UCRT is - >= 10.0.22621.0. - - .. tab-set:: - - .. tab-item:: MSVC - - The MSVC installer does not put the compilers on the system path, and - the install location may change. To query the install location, MSVC - comes with a ``vswhere.exe`` command-line utility. And to make the - C/C++ compilers available inside the shell you are using, you need to - run a ``.bat`` file for the correct bitness and architecture (e.g., for - 64-bit Intel CPUs, use ``vcvars64.bat``). - - If using a Conda environment while a version of Visual Studio 2019+ is - installed that includes the MSVC v142 package (VS 2019 C++ x86/x64 - build tools), activating the conda environment should cause Visual - Studio to be found and the appropriate .bat file executed to set - these variables. - - For detailed guidance, see `Use the Microsoft C++ toolset from the command line - `__. + This is needed to ensure you have the Windows Universal C Runtime. The + recommended version of the UCRT is >= 10.0.22621.0. - .. tab-item:: Intel + The MSVC installer does not put the compilers on the system path, and + the install location may change. To query the install location, MSVC + comes with a ``vswhere.exe`` command-line utility. And to make the + C/C++ compilers available inside the shell you are using, you need to + run a ``.bat`` file for the correct bitness and architecture (e.g., for + 64-bit Intel CPUs, use ``vcvars64.bat``). - Similar to MSVC, the Intel compilers are designed to be used with an - activation script (``Intel\oneAPI\setvars.bat``) that you run in the - shell you are using. This makes the compilers available on the path. - For detailed guidance, see - `Get Started with the Intel® oneAPI HPC Toolkit for Windows - `__. + For detailed guidance, see `Use the Microsoft C++ toolset from the command line + `__. - .. tab-item:: MinGW-w64 - - There are several sources of binaries for MinGW-w64. We recommend the - RTools versions, which can be installed with Chocolatey (see - Chocolatey install instructions `here `_):: - - choco install rtools -y --no-progress --force --version=4.0.0.20220206 + If you don't need ``f2py``, MSVC alone is sufficient. For Fortran compiler + setup (needed for ``f2py``), see :ref:`F2PY and Windows `. .. note:: @@ -208,9 +168,7 @@ your system. can be found) in order to be found, with the exception of MSVC which will be found automatically if and only if there are no other compilers on the ``PATH``. You can use any shell (e.g., Powershell, ``cmd`` or - Git Bash) to invoke a build. To check that this is the case, try - invoking a Fortran compiler in the shell you use (e.g., ``gfortran - --version`` or ``ifort --version``). + Git Bash) to invoke a build. .. warning:: @@ -218,52 +176,32 @@ your system. creation will not work due to an outdated Fortran compiler. If that happens, remove the ``compilers`` entry from ``environment.yml`` and try again. The Fortran compiler should be installed as described in - this section. + the :ref:`F2PY Windows documentation `. .. tab-item:: Windows on ARM64 :sync: Windows on ARM64 - In Windows on ARM64, the set of a compiler options that are available for - building NumPy are limited. Compilers such as GCC and GFortran are not yet - supported for Windows on ARM64. Currently, the NumPy build for Windows on ARM64 - is supported with MSVC and LLVM toolchains. The use of a Fortran compiler is - more tricky than on other platforms, because MSVC does not support Fortran, and - gfortran and MSVC can't be used together. If you don't need to run the ``f2py`` - tests, simply using MSVC is easiest. Otherwise, you will need the following - set of compilers: - - 1. MSVC + flang (``cl``, ``flang``) - 2. LLVM + flang (``clang-cl``, ``flang``) + In Windows on ARM64, the compiler options available for building NumPy are + limited. GCC and gfortran are not yet supported. Currently, the NumPy build + for Windows on ARM64 is supported with MSVC and LLVM toolchains. First, install Microsoft Visual Studio - the 2022 Community Edition will work (see the `Visual Studio download site `__). Ensure that you have installed necessary Visual Studio components for building NumPy on WoA from `here `__. - To use the flang compiler for Windows on ARM64, install Latest LLVM - toolchain for WoA from `here `__. - - .. tab-set:: - - .. tab-item:: MSVC - - The MSVC installer does not put the compilers on the system path, and - the install location may change. To query the install location, MSVC - comes with a ``vswhere.exe`` command-line utility. And to make the - C/C++ compilers available inside the shell you are using, you need to - run a ``.bat`` file for the correct bitness and architecture (e.g., for - ARM64-based CPUs, use ``vcvarsarm64.bat``). - - For detailed guidance, see `Use the Microsoft C++ toolset from the command line - `__. + The MSVC installer does not put the compilers on the system path, and + the install location may change. To query the install location, MSVC + comes with a ``vswhere.exe`` command-line utility. And to make the + C/C++ compilers available inside the shell you are using, you need to + run a ``.bat`` file for the correct bitness and architecture (e.g., for + ARM64-based CPUs, use ``vcvarsarm64.bat``). - .. tab-item:: LLVM + For detailed guidance, see `Use the Microsoft C++ toolset from the command line + `__. - Similar to MSVC, LLVM does not put the compilers on the system path. - To set system path for LLVM compilers, users may need to use ``set`` - command to put compilers on the system path. To check compiler's path - for LLVM's clang-cl, try invoking LLVM's clang-cl compiler in the shell you use - (``clang-cl --version``). + If you don't need ``f2py``, MSVC alone is sufficient. For Fortran compiler + setup (needed for ``f2py``), see :ref:`F2PY and Windows `. .. note:: @@ -272,9 +210,7 @@ your system. can be found) in order to be found, with the exception of MSVC which will be found automatically if and only if there are no other compilers on the ``PATH``. You can use any shell (e.g., Powershell, ``cmd`` or - Git Bash) to invoke a build. To check that this is the case, try - invoking a Fortran compiler in the shell you use (e.g., ``flang - --version``). + Git Bash) to invoke a build. .. warning:: From d7f24502895410f30db5f5393db6ea1b434c4500 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Wed, 11 Mar 2026 13:47:43 +0200 Subject: [PATCH 1489/1718] MAINT: update the PR template (#30932) We want first time contributors to introduce themselves to start to build a circle of trust with them. Co-authored-by: Sebastian Berg Co-authored-by: Marten van Kerkwijk --- .github/PULL_REQUEST_TEMPLATE.md | 33 +++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3cb690fa494c..3805118aa665 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,16 +1,27 @@ - + +### First time committer introduction + -* HIT ALL THE GUIDELINES: - https://numpy.org/devdocs/dev/index.html#guidelines +#### AI Disclosure + From f603c46a5c0bb588bf3bed6864db8435b3fa6fc2 Mon Sep 17 00:00:00 2001 From: majianhan <59634664+crawfordxx@users.noreply.github.com> Date: Wed, 11 Mar 2026 20:31:11 +0800 Subject: [PATCH 1490/1718] DOC: clarify DLPack GPU tensor example with device='cpu' workaround Update the DLPack example in the interoperability docs to show that since DLPack v1, cross-device copy is supported via the device parameter. The example now shows that np.from_dlpack(x_torch, device='cpu') works for GPU tensors. Closes #30936 --- doc/source/user/basics.interoperability.rst | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index 6fff1e5ea037..f9f52bfeab8e 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -487,16 +487,19 @@ will mean duplicating the memory. Do not do this for very large arrays: .. note:: - Note that GPU tensors can't be converted to NumPy arrays since NumPy doesn't - support GPU devices: + GPU tensors cannot be directly zero-copy converted to NumPy arrays since + NumPy does not support GPU devices. However, since DLPack v1, cross-device + copy is supported via the ``device`` parameter: >>> x_torch = torch.arange(5, device='cuda') - >>> np.from_dlpack(x_torch) + >>> np.from_dlpack(x_torch) # fails: implicit device=None means same device Traceback (most recent call last): File "", line 1, in RuntimeError: Unsupported device in DLTensor. + >>> np.from_dlpack(x_torch, device='cpu') # works: explicit copy to CPU + array([0, 1, 2, 3, 4]) - But, if both libraries support the device the data buffer is on, it is + If both libraries support the device the data buffer is on, it is possible to use the ``__dlpack__`` protocol (e.g. PyTorch_ and CuPy_): >>> x_torch = torch.arange(5, device='cuda') From 10f9880fa130a9f0317df163bd7178bf497d2d1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Mar 2026 17:53:55 +0000 Subject: [PATCH 1491/1718] MAINT: Bump actions/dependency-review-action from 4.8.2 to 4.9.0 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.8.2 to 4.9.0. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261...2031cfc080254a8a887f58cffee85186f0e49e48) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index f49263ba127f..c658792f7c02 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 + uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 From f234b975603430308af89b792eae6a5033e602b4 Mon Sep 17 00:00:00 2001 From: Joe Fox-Rabinovitz Date: Wed, 11 Mar 2026 20:29:42 -0500 Subject: [PATCH 1492/1718] DOC: Added examples to `np.lib.stride_tricks.sliding_window_view` (#30992) Co-authored-by: MadPhysicist --- numpy/lib/_stride_tricks_impl.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 9e8324cec259..919763ad3918 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -364,6 +364,21 @@ def sliding_window_view(x, window_shape, axis=None, *, >>> moving_average array([1., 2., 3., 4.]) + To adjust the step size of the sliding window, index the output view along + the desired dimension(s). Using the array shown above: + + >>> v[::2] + array([[0, 1, 2], + [2, 3, 4]]) + + You can slide in the reverse direction using the same technique: + + >>> v[::-1] + array([[3, 4, 5], + [2, 3, 4], + [1, 2, 3], + [0, 1, 2]]) + The two examples below demonstrate the effect of ``writeable=True``. Creating a view with the default ``writeable=False`` and then writing to From 543866c2ec9702daac380eb2711b72311aa8b1a9 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Thu, 12 Mar 2026 15:32:49 +0000 Subject: [PATCH 1493/1718] MAINT: f2py: replace deprecated `sprintf` with `snprintf` --- numpy/f2py/cfuncs.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 7a540c6ccd2a..c2f115c87661 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -535,7 +535,7 @@ def errmess(s: str) -> None: #define CHECKSTRING(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + snprintf(errstring, sizeof(errstring), \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ PyErr_SetString(#modulename#_error, errstring);\\ /*goto capi_fail;*/\\ } else """ @@ -543,7 +543,7 @@ def errmess(s: str) -> None: #define CHECKSCALAR(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ - sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + snprintf(errstring, sizeof(errstring), \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ PyErr_SetString(#modulename#_error,errstring);\\ /*goto capi_fail;*/\\ } else """ @@ -852,7 +852,8 @@ def errmess(s: str) -> None: Py_INCREF(err); PyErr_Clear(); } - sprintf(mess + strlen(mess), + size_t len = strlen(mess); + snprintf(mess + len, F2PY_MESSAGE_BUFFER_SIZE - len), " -- expected str|bytes|sequence-of-str-or-bytes, got "); f2py_describe(obj, mess + strlen(mess)); PyErr_SetString(err, mess); From 671d164effd038cb14a318e2f60440600eac85a0 Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Thu, 12 Mar 2026 15:54:54 +0000 Subject: [PATCH 1494/1718] fixes Co-authored-by: Lucas Colley --- numpy/f2py/cfuncs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index c2f115c87661..8024b7de7bf1 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -853,9 +853,9 @@ def errmess(s: str) -> None: PyErr_Clear(); } size_t len = strlen(mess); - snprintf(mess + len, F2PY_MESSAGE_BUFFER_SIZE - len), + snprintf(mess + len, F2PY_MESSAGE_BUFFER_SIZE - len, " -- expected str|bytes|sequence-of-str-or-bytes, got "); - f2py_describe(obj, mess + strlen(mess)); + f2py_describe(obj, mess + len); PyErr_SetString(err, mess); Py_DECREF(err); } From 23e25260815c81d4ff9a2c1fed413f86ddff81d9 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Thu, 12 Mar 2026 21:47:36 +0530 Subject: [PATCH 1495/1718] ENH: replace direct access of struct fields with macros and static inline functions (#30996) --- numpy/_core/include/numpy/ndarrayobject.h | 9 ++++----- numpy/_core/src/multiarray/dtypemeta.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index 82a1589ff075..9cc1a4c1d000 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -239,12 +239,12 @@ NPY_TITLE_KEY_check(PyObject *key, PyObject *value) static inline npy_intp PyArray_ITEMSIZE(const PyArrayObject *arr) { - return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr); + return PyDataType_ELSIZE(PyArray_DESCR(arr)); } #define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL) #define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL) -#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ +#define PyDataType_ISUNSIZED(dtype) (PyDataType_ELSIZE((PyArray_Descr*)(dtype)) == 0 && \ !PyDataType_HASFIELDS(dtype)) #define PyDataType_FLAGCHK(dtype, flag) \ @@ -270,8 +270,7 @@ PyArray_ITEMSIZE(const PyArrayObject *arr) static inline PyObject * PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( - (void *)itemptr, (PyArrayObject *)arr); + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->getitem((void *)itemptr, (PyArrayObject *)arr); } /* @@ -282,7 +281,7 @@ PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) static inline int PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr); + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->setitem(v, itemptr, arr); } #endif /* not internal */ diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index bf0acb48b899..d95463f486df 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -287,7 +287,7 @@ PyDataType_GetArrFuncs(const PyArray_Descr *descr) static inline PyObject * PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) { - return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( + return PyDataType_GetArrFuncs(PyArray_DESCR(arr))->getitem( (void *)itemptr, (PyArrayObject *)arr); } From ddde8627f6889114b867a653af6dfd19e01a97eb Mon Sep 17 00:00:00 2001 From: Lucas Colley Date: Thu, 12 Mar 2026 16:46:08 +0000 Subject: [PATCH 1496/1718] review suggestion --- numpy/f2py/cfuncs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 8024b7de7bf1..a70b2b59f5a8 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -855,7 +855,7 @@ def errmess(s: str) -> None: size_t len = strlen(mess); snprintf(mess + len, F2PY_MESSAGE_BUFFER_SIZE - len, " -- expected str|bytes|sequence-of-str-or-bytes, got "); - f2py_describe(obj, mess + len); + f2py_describe(obj, mess + strlen(mess)); PyErr_SetString(err, mess); Py_DECREF(err); } From 8c5b0f9fcca40b6905184eed48f9d3b4d1560d76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Mar 2026 17:55:20 +0000 Subject: [PATCH 1497/1718] MAINT: Bump pypa/cibuildwheel from 3.3.1 to 3.4.0 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.3.1 to 3.4.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e...ee02a1537ce3071a004a6b08c41e72f0fdc42d9a) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.4.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 4f0127431f03..5ba4b07d44f9 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -36,7 +36,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 + - uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 9e84232bc457..8fb5173f7716 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -99,7 +99,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@298ed2fb2c105540f5ed055e8a6ad78d82dd3a7e # v3.3.1 + uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 42141c0951c08ffaa55a9e48b4b94f1dd8b456bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Mar 2026 17:55:36 +0000 Subject: [PATCH 1498/1718] MAINT: Bump github/codeql-action from 4.31.10 to 4.32.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.31.10 to 4.32.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/cdefb33c0f6224e58673d9004f47f7cb3e328b89...0d579ffd059c29b07949a3cce3983f0780820c98) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.32.6 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 54aaffafe187..18334c5d76b9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 + uses: github/codeql-action/init@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 + uses: github/codeql-action/autobuild@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v4.31.10 + uses: github/codeql-action/analyze@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 87552305f65d..7081eb640485 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@cdefb33c0f6224e58673d9004f47f7cb3e328b89 # v2.1.27 + uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v2.1.27 with: sarif_file: results.sarif From 7bc7f54730720cf4b78a25d51ce095cccc09894c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 13 Mar 2026 19:34:27 +0100 Subject: [PATCH 1499/1718] MAINT: bump ``ruff`` to ``0.15.6`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 8d8f1a871c46..704f87bca75d 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.15.4 + - ruff=0.15.6 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 9c17eea462f0..32ccaa194a3e 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.4 +ruff==0.15.6 GitPython>=3.1.30 spin From 037e34d360ce01b8fa7b8cbe96368d127aa26f40 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 13 Mar 2026 19:34:56 +0100 Subject: [PATCH 1500/1718] STY: fix ``PERF102`` ruff error --- numpy/_core/einsumfunc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 3a04b02b9c93..1d191cedfc2f 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -1612,8 +1612,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): # Check the kwargs to avoid a more cryptic error later, without having to # repeat default values here valid_einsum_kwargs = ['dtype', 'order', 'casting'] - unknown_kwargs = [k for (k, v) in kwargs.items() if - k not in valid_einsum_kwargs] + unknown_kwargs = [k for k in kwargs if k not in valid_einsum_kwargs] if len(unknown_kwargs): raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") From d3f7e374737c0c51b55d3a843c0a537aceaafc95 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 14 Mar 2026 19:33:46 +0100 Subject: [PATCH 1501/1718] TYP: fix ``numpy.strings`` ufunc stubs --- numpy/_core/defchararray.pyi | 11 +---- numpy/_core/strings.pyi | 52 +--------------------- numpy/typing/tests/data/fail/strings.pyi | 8 ---- numpy/typing/tests/data/reveal/strings.pyi | 42 ++++++++--------- 4 files changed, 23 insertions(+), 90 deletions(-) diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index 5685a988227e..d4fd68f0a81b 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -3,7 +3,7 @@ from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload from typing_extensions import TypeVar, deprecated import numpy as np -from numpy import _OrderKACF, bytes_, dtype, int_, ndarray, object_, str_ +from numpy import _OrderKACF, add, bytes_, dtype, int_, ndarray, object_, str_ from numpy._core.multiarray import compare_chararrays from numpy._typing import ( NDArray, @@ -376,15 +376,6 @@ def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... @overload def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... -@overload -def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... -@overload -def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... -@overload -def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... - @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 475da159f783..11b1a893b408 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,6 +1,7 @@ from typing import overload import numpy as np +from numpy import add, equal, greater, greater_equal, less, less_equal, not_equal from numpy._globals import _NoValueType from numpy._typing import ( NDArray, @@ -66,57 +67,6 @@ type _StringDTypeArray = np.ndarray[_AnyShape, np.dtypes.StringDType] type _StringDTypeSupportsArray = _SupportsArray[np.dtypes.StringDType] type _StringDTypeOrUnicodeArray = NDArray[np.str_] | _StringDTypeArray -@overload -def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... -@overload -def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... -@overload -def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... - -@overload -def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... -@overload -def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... -@overload -def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... - @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index 328a521ae679..0633db94c817 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -4,14 +4,6 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] - -np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] -np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] -np.strings.less(AR_U, AR_S) # type: ignore[arg-type] - np.strings.encode(AR_S) # type: ignore[arg-type] np.strings.decode(AR_U) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 166481d80922..37b34916378e 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -11,33 +11,33 @@ AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] AR_T: AR_T_alias -assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.not_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.not_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.greater_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.greater_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_U, AR_U), np.ndarray) +assert_type(np.strings.less_equal(AR_S, AR_S), np.ndarray) +assert_type(np.strings.less_equal(AR_T, AR_T), np.ndarray) -assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_U, AR_U), np.ndarray) +assert_type(np.strings.greater(AR_S, AR_S), np.ndarray) +assert_type(np.strings.greater(AR_T, AR_T), np.ndarray) -assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_U, AR_U), np.ndarray) +assert_type(np.strings.less(AR_S, AR_S), np.ndarray) +assert_type(np.strings.less(AR_T, AR_T), np.ndarray) -assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) -assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) -assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) +assert_type(np.strings.add(AR_U, AR_U), np.ndarray) +assert_type(np.strings.add(AR_S, AR_S), np.ndarray) +assert_type(np.strings.add(AR_T, AR_T), np.ndarray) assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) From 97c2ec95414e66c70a076243e0865ccb83b45e51 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 15 Mar 2026 21:14:08 +0100 Subject: [PATCH 1502/1718] DOC: Add f2py distribution guide for meson-python Add a complete tutorial for packaging F2PY extensions as installable Python distributions using meson-python as the PEP 517 build backend. Covers project layout, pyproject.toml configuration, meson.build with dependency('numpy'), wheel building via pypa/build, editable installs, Fortran compiler selection, BLAS/LAPACK dependencies, and comparison with scikit-build-core. Also fixes the pandas intersphinx URL (moved to pandas.pydata.org/docs). Closes #25199 Co-authored-by: Ralf Gommers Co-authored-by: Matti Picus --- doc/source/conf.py | 2 +- doc/source/f2py/buildtools/index.rst | 1 + doc/source/f2py/buildtools/meson-python.rst | 208 ++++++++++++++++++++ doc/source/f2py/code/fib_mesonpy.f90 | 15 ++ doc/source/f2py/code/meson_mesonpy.build | 43 ++++ doc/source/f2py/code/pyproj_mesonpy.toml | 10 + 6 files changed, 278 insertions(+), 1 deletion(-) create mode 100644 doc/source/f2py/buildtools/meson-python.rst create mode 100644 doc/source/f2py/code/fib_mesonpy.f90 create mode 100644 doc/source/f2py/code/meson_mesonpy.build create mode 100644 doc/source/f2py/code/pyproj_mesonpy.toml diff --git a/doc/source/conf.py b/doc/source/conf.py index 3ae97041a5ea..416d7d735ca1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -423,7 +423,7 @@ def setup(app): 'matplotlib': ('https://matplotlib.org/stable', None), 'imageio': ('https://imageio.readthedocs.io/en/stable', None), 'skimage': ('https://scikit-image.org/docs/stable', None), - 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None), + 'pandas': ('https://pandas.pydata.org/docs', None), 'scipy-lecture-notes': ('https://scipy-lectures.org', None), 'pytest': ('https://docs.pytest.org/en/stable', None), 'numpy-tutorials': ('https://numpy.org/numpy-tutorials', None), diff --git a/doc/source/f2py/buildtools/index.rst b/doc/source/f2py/buildtools/index.rst index 671fd5b6d2cf..b12ca01559c2 100644 --- a/doc/source/f2py/buildtools/index.rst +++ b/doc/source/f2py/buildtools/index.rst @@ -108,6 +108,7 @@ Build systems :maxdepth: 2 meson + meson-python cmake skbuild distutils-to-meson diff --git a/doc/source/f2py/buildtools/meson-python.rst b/doc/source/f2py/buildtools/meson-python.rst new file mode 100644 index 000000000000..3f5dcc350827 --- /dev/null +++ b/doc/source/f2py/buildtools/meson-python.rst @@ -0,0 +1,208 @@ +.. _f2py-meson-python: + +===================================================== +Distributing F2PY extensions with ``meson-python`` +===================================================== + +The :ref:`f2py-meson` page covers building F2PY extensions using raw ``meson`` +commands. This page shows how to package those extensions into installable +Python distributions (sdists and wheels) using `meson-python +`_ as the PEP 517 build backend. + +This is the recommended approach for distributing F2PY-wrapped Fortran code as a +Python package on PyPI or for local ``pip install`` workflows. + +.. note:: + + ``meson-python`` replaced ``setuptools`` / ``numpy.distutils`` as the + standard way to build and distribute compiled extensions in the NumPy and + SciPy ecosystem. See :ref:`distutils-status-migration` for background. + +Prerequisites +============= + +You need: + +* A C compiler +* A Fortran compiler (``gfortran``, ``ifort``, ``ifx``, ``flang-new``, etc.), + if you use any Fortran code in your package +* Python >= 3.10 +* ``meson``, ``meson-python``, and ``numpy`` (installed automatically during the + build when listed in ``build-system.requires``) + +Minimal example +=============== + +The project below wraps a Fortran ``fib`` subroutine into an importable Python +package called ``fib_wrapper``. + +Project layout:: + + fib_wrapper/ # project root + ├── fib.f90 # Fortran source + ├── fib_wrapper/ # Python package directory + │ └── __init__.py + ├── meson.build + └── pyproject.toml + +Fortran source +-------------- + +Save the following as ``fib.f90``: + +.. literalinclude:: ../code/fib_mesonpy.f90 + :language: fortran + +``pyproject.toml`` +------------------ + +.. literalinclude:: ../code/pyproj_mesonpy.toml + :language: toml + +Two entries matter here: + +* ``build-backend = "mesonpy"`` tells build frontends to use ``meson-python``. +* ``requires`` lists build-time dependencies. ``numpy >= 2.0`` is required so + that ``f2py``, the NumPy headers, and ``dependency('numpy')`` support in Meson + are available during compilation. + +``meson.build`` +--------------- + +.. literalinclude:: ../code/meson_mesonpy.build + +.. note:: + + The file is stored as ``meson_mesonpy.build`` in the documentation source + tree to avoid collisions with other examples. In your project, name it + ``meson.build``. + +The ``meson.build`` file does four things: + +1. Uses ``dependency('numpy')`` to locate NumPy headers, and a + ``declare_dependency`` to add the F2PY include directory (for + ``fortranobject.h``). +2. Runs ``f2py`` via ``custom_target`` to generate the C wrapper sources. +3. Compiles the generated C code together with the Fortran source into a Python + extension module using ``py.extension_module``. +4. Installs ``__init__.py`` into the package directory so the result is a proper + Python package. + +The ``subdir: 'fib_wrapper'`` argument on the extension module is required so +that the compiled ``fib`` shared library is installed inside the ``fib_wrapper/`` +package directory, next to ``__init__.py``. Without it the extension would +be installed at the top level and ``import fib_wrapper`` would not find the +``fib`` extension. The resulting installed layout is:: + + site-packages/ + └── fib_wrapper/ + ├── __init__.py # from .fib import fib + └── fib.cpython-*.so # compiled extension module + +``__init__.py`` +--------------- + +A minimal ``__init__.py`` re-exports the wrapped function: + +.. code-block:: python + + from .fib import fib + +Building and installing +======================= + +Editable install (development) +------------------------------ + +.. code-block:: bash + + pip install --no-build-isolation --editable . + +``--no-build-isolation`` reuses the current environment, which is useful when +iterating. This requires ``meson-python``, ``meson``, ``ninja``, and ``numpy`` +to already be installed. + +Building a wheel +---------------- + +.. code-block:: bash + + # If you don't yet have `pypa/build` installed: `pip install build` + python -m build --wheel + +The resulting ``.whl`` file in ``dist/`` can be uploaded to PyPI, or installed +elsewhere with ``pip install dist/fib_wrapper-0.1.0-*.whl``. + +Verifying the install +--------------------- + +.. code-block:: python + + >>> from fib_wrapper import fib + >>> fib(10) + array([ 0, 1, 1, 2, 3, 5, 8, 13, 21, 34], dtype=int32) + +Customizing the Fortran compiler +================================ + +``meson-python`` delegates compiler selection to ``meson``. By default, +``meson`` will choose the first Fortran compiler it finds on the PATH. +If you want more control over Fortran compiler selection, set the ``FC`` +environment variable before building: + +.. code-block:: bash + + FC=ifx python -m build --wheel + +For more control, use a `Meson native file +`_: + +.. code-block:: ini + + ; native.ini + [binaries] + fortran = 'ifx' + c = 'icx' + +.. code-block:: bash + + python -m build --wheel -Csetup-args="--native-file=native.ini" + +Adding dependencies (BLAS, LAPACK, etc.) +======================================== + +Use ``dependency()`` in ``meson.build`` to link against system libraries: + +.. code-block:: none + + lapack_dep = dependency('lapack') + + py.extension_module('mymod', + [sources, generated, incdir_f2py / 'fortranobject.c'], + dependencies : [np_dep, f2py_dep, lapack_dep], + install : true, + ) + +``meson`` resolves dependencies through ``pkg-config``, CMake, or its own +detection logic. See the `Meson dependency documentation +`_ for details. + +Differences from the ``scikit-build-core`` workflow +==================================================== + +The ``scikit-build-core`` approach documented in :ref:`f2py-skbuild` uses CMake +under the hood. ``meson-python`` provides: + +* Native Fortran compiler support in ``meson`` (no CMake layer). +* Direct integration with ``pip`` / ``build`` via PEP 517. +* The same build system used by NumPy and SciPy themselves. + +Further reading +=============== + +* `meson-python documentation `_ +* `Meson build system `_ +* `SciPy's meson build configuration `_ (real-world F2PY usage) +* :ref:`f2py-meson` (raw meson build without ``meson-python``) +* :ref:`f2py-skbuild` (alternative using ``scikit-build-core`` / CMake) +* :ref:`f2py-meson-distutils` (migration from ``distutils``) diff --git a/doc/source/f2py/code/fib_mesonpy.f90 b/doc/source/f2py/code/fib_mesonpy.f90 new file mode 100644 index 000000000000..f9f1f88e237f --- /dev/null +++ b/doc/source/f2py/code/fib_mesonpy.f90 @@ -0,0 +1,15 @@ +subroutine fib(a, n) + use iso_c_binding + integer(c_int), intent(in) :: n + integer(c_int), intent(out) :: a(n) + integer :: i + do i = 1, n + if (i == 1) then + a(i) = 0 + else if (i == 2) then + a(i) = 1 + else + a(i) = a(i - 1) + a(i - 2) + end if + end do +end subroutine fib diff --git a/doc/source/f2py/code/meson_mesonpy.build b/doc/source/f2py/code/meson_mesonpy.build new file mode 100644 index 000000000000..5537f3c0b6e9 --- /dev/null +++ b/doc/source/f2py/code/meson_mesonpy.build @@ -0,0 +1,43 @@ +project('fib_wrapper', 'c', + version : '0.1.0', + meson_version: '>=1.1.0', + default_options : ['warning_level=2'], +) + +add_languages('fortran', native: false) + +py = import('python').find_installation(pure: false) + +# NumPy >=2.0 provides include dirs via dependency() +np_dep = dependency('numpy') + +incdir_f2py = run_command(py, + ['-c', 'import numpy.f2py; print(numpy.f2py.get_include())'], + check : true +).stdout().strip() + +# f2py include dir (for fortranobject.h) is not in dependency('numpy'), +# so add it separately +f2py_dep = declare_dependency( + include_directories : incdir_f2py, +) + +# Generate the f2py wrappers +fib_source = custom_target('fibmodule.c', + input : ['fib.f90'], + output : ['fibmodule.c', 'fib-f2pywrappers.f'], + command : [py, '-m', 'numpy.f2py', '@INPUT@', '-m', 'fib', '--lower'] +) + +py.extension_module('fib', + ['fib.f90', fib_source, incdir_f2py / 'fortranobject.c'], + dependencies : [np_dep, f2py_dep], + subdir: 'fib_wrapper', + install : true, +) + +# Install the Python package files +py.install_sources( + 'fib_wrapper/__init__.py', + subdir: 'fib_wrapper', +) diff --git a/doc/source/f2py/code/pyproj_mesonpy.toml b/doc/source/f2py/code/pyproj_mesonpy.toml new file mode 100644 index 000000000000..751882772c8f --- /dev/null +++ b/doc/source/f2py/code/pyproj_mesonpy.toml @@ -0,0 +1,10 @@ +[build-system] +# numpy>=2.0 is required for dependency('numpy') support in meson.build +requires = ["meson-python>=0.15.0", "numpy>=2.0"] +build-backend = "mesonpy" + +[project] +name = "fib_wrapper" +version = "0.1.0" +requires-python = ">=3.10" +dependencies = ["numpy"] From 80bcb8b03ed0b2699901e20aafaa149c9ec5a527 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 16 Mar 2026 04:02:01 -0600 Subject: [PATCH 1503/1718] MAINT: Remove COLS_IN_ALPHA_INDEX from Doxyfile. (#31008) --- doc/source/doxyfile | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/doxyfile b/doc/source/doxyfile index 60ab1058dbba..551f66dda721 100644 --- a/doc/source/doxyfile +++ b/doc/source/doxyfile @@ -148,7 +148,6 @@ VERBATIM_HEADERS = YES # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = YES -COLS_IN_ALPHA_INDEX = 5 #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- From 86eb95bd5cafd952f78f58dd35e9dc7a96d3cd40 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 16 Mar 2026 11:44:12 -0600 Subject: [PATCH 1504/1718] DOC: document caveats of ndarray.resize on 3.14 and newer --- doc/source/reference/c-api/array.rst | 19 +++++++--- numpy/_core/_add_newdocs.py | 28 +++++++++----- numpy/_core/src/multiarray/shape.c | 40 +++++++++++++++----- numpy/_core/tests/examples/cython/checks.pyx | 7 ++++ numpy/_core/tests/test_cython.py | 9 +++++ numpy/_core/tests/test_multiarray.py | 18 +++++++++ 6 files changed, 95 insertions(+), 26 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index d2bdae695933..047627eb5479 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2254,12 +2254,19 @@ Shape Manipulation a different total number of elements then the old shape. If reallocation is necessary, then *self* must own its data, have *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be - referenced by any other array. The fortran argument can be - :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`, or - :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually it - could be used to determine how the resize operation should view the data - when constructing a differently-dimensioned array. Returns None on success - and NULL on error. + referenced by any other array. + + On Python 3.13 and older, the check allows objects with exactly one + reference to be resized, because it is impossible to differentiate between + an array with one reference created via an extension and a uniquely + referenced array defined in a Python function. On Python 3.14 and newer, + the array must be uniquely referenced. + + Resizing arrays in-place can often lead to memory fragmentation and should + be avoided if the goal is to reclaim memory. Create a new array and copy the + relevant subset of data over instead. + + Returns None on success and NULL on error. .. c:function:: PyObject* PyArray_Transpose( \ PyArrayObject* self, PyArray_Dims* permute) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 30ed3c11ac73..a5951a9f4670 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4172,12 +4172,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Raises ------ ValueError - If `a` does not own its own data or references or views to it exist, - and the data memory must be changed. - - SystemError - If the `order` keyword argument is specified. This behaviour is a - bug in NumPy. + If `a` does not own its own data or references or views to may exist. See Also -------- @@ -4190,12 +4185,25 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Only contiguous arrays (data elements consecutive in memory) can be resized. + Resizing arrays in-place can increase memory fragmentation. For that reason, + it is often preferable to allocate new memory for the result by calling + ``np.resize`` instead. This can reduce overall memory usage, even in + situations where one might expect to avoid wasting memory by resizing + in-place. + The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then - reallocate the memory. However, reference counts can increase in - other ways so if you are sure that you have not shared the memory - for this array with another Python object, then you may safely set - `refcheck` to False. + reallocate the memory. + + Note that CPython 3.14 changed reference counting for function locals, so + that NumPy cannot tell the difference between situations where an array is + referenced by exactly one object or an array is referenced by the + interpreter or by a C, C++, or Cython function. In these cases, NumPy may + raise a ValueError in a situation where an array has no references and it is + safe to resize in-place. + + If you are sure that you have not shared the memory for this array with + another Python object, then you may safely set `refcheck` to False. Examples -------- diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index a34af9f9f12b..ce3c00672cbd 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -77,26 +77,40 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) return -1; } + static const char *msg = + "cannot resize an array that references or is referenced\n" + "by another object in this way.\n" + "Use the np.resize function to get a new resized copy or\n " + "set refcheck=False to disable this check"; if (PyArray_BASE(self) != NULL || (((PyArrayObject_fields *)self)->weakreflist != NULL)) { - PyErr_SetString(PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way. Use the np.resize function."); + PyErr_SetString(PyExc_ValueError, msg); return -1; } if (refcheck) { #if PY_VERSION_HEX >= 0x030E00B0 + // Python 3.14 changed reference counting semantics for function- + // local variables. There is no way to tell if the calling function + // has been optimized (because it might be implemented in C or Cython) + // + // Instead, warn if the refcount is exactly 2 that this might be a + // false positive if (!PyUnstable_Object_IsUniquelyReferenced((PyObject *)self)) { + if (Py_REFCNT(self) == 2) { + PyErr_SetString( + PyExc_ValueError, + "cannot resize an array that may be referenced " + "by another object in this way.\n" + "It is possible that the array is not referenced by " + "another object and this is a false positive.\n" + "If you are sure that the array is uniquely referenced, " + "set refcheck=False to disable this check, otherwise use np.resize."); + return -1; + } #else if (Py_REFCNT(self) > 2) { #endif - PyErr_SetString( - PyExc_ValueError, - "cannot resize an array that " - "references or is referenced\n" - "by another array in this way.\n" - "Use the np.resize function or refcheck=False"); + PyErr_SetString(PyExc_ValueError, msg); return -1; } } @@ -175,6 +189,12 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) * array and it is contiguous. If refcheck is 0, then the reference count is * not checked and assumed to be 1. You still must own this data and have no * weak-references and no base object. + * + * On Python 3.13 and older, the check allows objects with exactly one + * reference to be resized, because it is impossible to differentiate between + * an array with one reference created via an extension and a uniquely + * referenced array defined in a Python function. + * */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 6dcce1c2606d..bbd8e9090b79 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -4,6 +4,7 @@ Functions in this module give python-space wrappers for cython functions exposed in numpy/__init__.pxd, so they can be tested in test_cython.py """ +import numpy as np cimport numpy as cnp cnp.import_array() @@ -372,3 +373,9 @@ def check_npy_uintp_type_enum(): # Regression test for gh-27890: cnp.NPY_UINTP was not defined. # Cython would fail to compile this before gh-27890 was fixed. return cnp.NPY_UINTP > 0 + + +def resize_refcheck_test(): + # should run without error, see gh-30991 + a = np.array([[0, 1], [2, 3]], order='C') + a.resize((2, 1)) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index c405a59e535e..eed6b186a64d 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -350,3 +350,12 @@ def test_npystring_allocators_other_dtype(install_temp): def test_npy_uintp_type_enum(install_temp): import checks assert checks.check_npy_uintp_type_enum() + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') +def test_resize_refcheck(install_temp): + import checks + if sys.version_info >= (3, 14): + with pytest.raises(ValueError): + checks.resize_refcheck_test() diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7d01fb5ed441..01769dd4ef02 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -13,8 +13,10 @@ import pathlib import pickle import re +import subprocess import sys import tempfile +import textwrap import tracemalloc import warnings import weakref @@ -6377,6 +6379,22 @@ def test_check_reference(self): y = x assert_raises(ValueError, x.resize, (5, 1)) + def test_check_reference_module_scope(self): + code = textwrap.dedent(""" + import numpy as np + + # See gh-30991 + a = np.array([[0, 1], [2, 3]], order='C') + a.resize((2, 1)) + """) + try: + subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + except subprocess.CalledProcessError as e: + assert sys.version_info >= (3, 14) + assert "ValueError" in e.stdout + assert "It is possible that the array is not referenced" in e.stdout + def test_check_reference_2(self): # see gh-30265 x = np.zeros((2, 2)) From f2c50360bf9f7bb67a91c81e99bec99880dc2472 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 16 Mar 2026 13:10:29 -0600 Subject: [PATCH 1505/1718] skip test using subprocesses on wasm --- numpy/_core/tests/test_multiarray.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 01769dd4ef02..19fbe9ffdb30 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6379,6 +6379,7 @@ def test_check_reference(self): y = x assert_raises(ValueError, x.resize, (5, 1)) + @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") def test_check_reference_module_scope(self): code = textwrap.dedent(""" import numpy as np From 5ca954a3b874b6cfe9c66aeaedfeb14a8bfdcba1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 18 Mar 2026 17:03:20 +0100 Subject: [PATCH 1506/1718] MAINT: bump ``pyrefly`` to ``0.57.0`` --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 5986dcc2b859..1a44fa2e303e 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.19.1 -pyrefly==0.55.0 +pyrefly==0.57.0 From 5033aa92db18221f78fcf9213bc64f409c5c82e4 Mon Sep 17 00:00:00 2001 From: Aadya Chinubhai <77720426+aadya940@users.noreply.github.com> Date: Thu, 19 Mar 2026 03:40:52 -0700 Subject: [PATCH 1507/1718] BUG: Fix `np.vecdot` on empty object vectors returning None (#31033) Make vector dot loop default to integer zero if there are no entries. Fixes #31019 --- numpy/_core/src/umath/matmul.c.src | 9 +++++++++ numpy/_core/tests/test_ufunc.py | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 95d23995e630..fb82a50a13de 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -714,6 +714,15 @@ OBJECT_dotc(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp npy_intp i; PyObject *result = NULL; + if (n == 0) { + PyObject *zero = PyLong_FromLong(0); + if (zero == NULL) { + return; + } + Py_XSETREF(*((PyObject **)op), zero); + return; + } + for (i = 0; i < n; i++, ip1 += is1, ip2 += is2) { PyObject *obj1 = *(PyObject**)ip1, *obj2 = *(PyObject**)ip2; if (obj1 == NULL) { diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index d8ca3f2364f4..567b63deb873 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -909,6 +909,13 @@ def test_vecdot_object_no_conjugate(self): with pytest.raises(AttributeError, match="conjugate"): np.vecdot(arr, arr) + def test_vecdot_object_empty_is_zero(self): + x = np.empty((0,), dtype=object) + assert np.vecdot(x, x) == 0 + + x2 = np.empty((1, 0), dtype=object) + assert_array_equal(np.vecdot(x2, x2), np.array([0], dtype=object)) + def test_vecdot_object_breaks_outer_loop_on_error(self): arr1 = np.ones((3, 3)).astype(object) arr2 = arr1.copy() From 635c1a94650c80f9e1a4d59a85e6f8ba29a4a88d Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 20 Mar 2026 19:55:26 +0100 Subject: [PATCH 1508/1718] MAINT: bump ``ruff`` to ``0.57.0`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 704f87bca75d..a3bdfb0c6641 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.15.6 + - ruff=0.15.7 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 32ccaa194a3e..2dfcf6006ce6 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.6 +ruff==0.15.7 GitPython>=3.1.30 spin From 939c6044373170dad5a4fa17f012020a09ff26f8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 20 Mar 2026 13:23:46 -0600 Subject: [PATCH 1509/1718] Update doc/source/reference/c-api/array.rst Co-authored-by: Matti Picus --- doc/source/reference/c-api/array.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 047627eb5479..0f7af3ab88a1 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2263,8 +2263,7 @@ Shape Manipulation the array must be uniquely referenced. Resizing arrays in-place can often lead to memory fragmentation and should - be avoided if the goal is to reclaim memory. Create a new array and copy the - relevant subset of data over instead. + be avoided. If the goal is to reclaim over-allocated memory, alternatives are to create a view or a copy of just the desired data, or using two passes to build the array: one to cheaply determine the shape and another to allocate and fill. Benchmark your use case to determine what is optimum. You may be surprised to find ``resize`` actually slows down or bloats your application. Returns None on success and NULL on error. From 29b0bd246549ae37d7d2deaf5ff4937f3bbe4e1e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 20 Mar 2026 13:24:39 -0600 Subject: [PATCH 1510/1718] Update numpy/_core/src/multiarray/shape.c Co-authored-by: Matti Picus --- numpy/_core/src/multiarray/shape.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index ce3c00672cbd..08966ac99f77 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -100,11 +100,10 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) PyErr_SetString( PyExc_ValueError, "cannot resize an array that may be referenced " - "by another object in this way.\n" - "It is possible that the array is not referenced by " - "another object and this is a false positive.\n" + "by another object.\n" + "It is possible that this is a false positive.\n" "If you are sure that the array is uniquely referenced, " - "set refcheck=False to disable this check, otherwise use np.resize."); + "set refcheck=False."); return -1; } #else From 7003a9f5fedb589de189428b5caf9fc8489ece3d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 20 Mar 2026 13:55:23 -0600 Subject: [PATCH 1511/1718] TST: fix issues with tests --- numpy/_core/tests/examples/cython/checks.pyx | 2 +- numpy/_core/tests/test_cython.py | 16 +++++++++++----- numpy/_core/tests/test_multiarray.py | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index bbd8e9090b79..9e4cdd3c10e1 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -376,6 +376,6 @@ def check_npy_uintp_type_enum(): def resize_refcheck_test(): - # should run without error, see gh-30991 + # see gh-30991 a = np.array([[0, 1], [2, 3]], order='C') a.resize((2, 1)) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index eed6b186a64d..7e2680578b3e 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -352,10 +352,16 @@ def test_npy_uintp_type_enum(install_temp): assert checks.check_npy_uintp_type_enum() -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', - reason='no checks module on win-arm64') +@pytest.mark.skipif( + sys.version_info < (3, 14), + reason="Tests behavior that happens on Python 3.14 and newer" +) +@pytest.mark.skipif( + sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64' +) def test_resize_refcheck(install_temp): import checks - if sys.version_info >= (3, 14): - with pytest.raises(ValueError): - checks.resize_refcheck_test() + msg = "It is possible that this is a false positive." + with pytest.raises(ValueError, match=msg): + checks.resize_refcheck_test() diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 19fbe9ffdb30..90217d4e4ffa 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6394,7 +6394,7 @@ def test_check_reference_module_scope(self): except subprocess.CalledProcessError as e: assert sys.version_info >= (3, 14) assert "ValueError" in e.stdout - assert "It is possible that the array is not referenced" in e.stdout + assert "It is possible that this is a false positive." in e.stdout def test_check_reference_2(self): # see gh-30265 From 185b41aee4b085d11ba839f4b5e50c36bfa1a118 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 20 Mar 2026 13:55:45 -0600 Subject: [PATCH 1512/1718] Respond to Matti's comments --- doc/source/reference/c-api/array.rst | 29 ++++++++++++++++------------ numpy/_core/_add_newdocs.py | 27 +++++++++++++++----------- numpy/_core/src/multiarray/shape.c | 5 +++++ 3 files changed, 38 insertions(+), 23 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 0f7af3ab88a1..6d648d5dd2f6 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2257,13 +2257,18 @@ Shape Manipulation referenced by any other array. On Python 3.13 and older, the check allows objects with exactly one - reference to be resized, because it is impossible to differentiate between - an array with one reference created via an extension and a uniquely - referenced array defined in a Python function. On Python 3.14 and newer, - the array must be uniquely referenced. - - Resizing arrays in-place can often lead to memory fragmentation and should - be avoided. If the goal is to reclaim over-allocated memory, alternatives are to create a view or a copy of just the desired data, or using two passes to build the array: one to cheaply determine the shape and another to allocate and fill. Benchmark your use case to determine what is optimum. You may be surprised to find ``resize`` actually slows down or bloats your application. + reference to be reallocated in-place. On Python 3.14 and newer, the array + must be uniquely referenced. See the Python 3.14 `What's New entry + `_ on + this topic for more information on why there is a behavior difference. + + Reallocating arrays in-place can often lead to memory fragmentation and + should be avoided. If the goal is to reclaim over-allocated memory, + alternatives are to create a view or a copy of just the desired data, or + using two passes to build the array: one to cheaply determine the shape and + another to allocate and fill. Benchmark your use case to determine what is + optimum. You may be surprised to find ``resize`` actually slows down or + bloats your application. Returns None on success and NULL on error. @@ -4267,9 +4272,9 @@ Memory management .. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj) If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function - clears the flags, `DECREF` s - `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then - copies ``obj->data`` to `obj->base->data`, and returns the error state of + clears the flags, ``DECREF`` s + ``obj->base`` and makes it writeable, and sets ``obj->base`` to NULL. It then + copies ``obj->data`` to ``obj->base->data``, and returns the error state of the copy operation. This is the opposite of :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called @@ -4505,9 +4510,9 @@ Miscellaneous Macros If ``obj->flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, this function clears the flags, `DECREF` s - `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In + ``obj->base`` and makes it writeable, and sets ``obj->base`` to NULL. In contrast to :c:func:`PyArray_ResolveWritebackIfCopy` it makes no attempt - to copy the data from `obj->base`. This undoes + to copy the data from ``obj->base``. This undoes :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an error when you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called multiple times, or with ``NULL`` input. diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index a5951a9f4670..fd9998381105 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -4164,6 +4164,7 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Shape of resized array. refcheck : bool, optional If False, reference count will not be checked. Default is True. + See Notes below for more explanation. Returns ------- @@ -4185,26 +4186,30 @@ def _array_method_doc(name: str, params: str, doc: str) -> None: Only contiguous arrays (data elements consecutive in memory) can be resized. - Resizing arrays in-place can increase memory fragmentation. For that reason, - it is often preferable to allocate new memory for the result by calling - ``np.resize`` instead. This can reduce overall memory usage, even in - situations where one might expect to avoid wasting memory by resizing - in-place. + Reallocating arrays in-place can often lead to memory fragmentation and + should be avoided. If the goal is to reclaim over-allocated memory, + alternatives are to create a view or a copy of just the desired data, or + using two passes to build the array: one to cheaply determine the shape and + another to allocate and fill. Benchmark your use case to determine what is + optimum. You may be surprised to find ``resize`` actually slows down or + bloats your application. The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then reallocate the memory. - Note that CPython 3.14 changed reference counting for function locals, so - that NumPy cannot tell the difference between situations where an array is - referenced by exactly one object or an array is referenced by the - interpreter or by a C, C++, or Cython function. In these cases, NumPy may - raise a ValueError in a situation where an array has no references and it is - safe to resize in-place. + On Python 3.13 and older, the check allows objects with exactly one + reference to be reallocated in-place. On Python 3.14 and newer, the array + must be uniquely referenced. See [1]_ for more details. If you are sure that you have not shared the memory for this array with another Python object, then you may safely set `refcheck` to False. + + References + ---------- + .. [1] Python 3.14 What's New, https://docs.python.org/3/whatsnew/3.14.html#whatsnew314-refcount + Examples -------- Shrinking an array: array is flattened (in the order that the data are diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 08966ac99f77..f5ab89b0bfed 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -190,6 +190,11 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) * weak-references and no base object. * * On Python 3.13 and older, the check allows objects with exactly one + * reference to be reallocated in-place. On Python 3.14 and newer, the array + * must be uniquely referenced. In some cases this can lead to spurious + * ValueErrors on Python 3.14. + * + * On Python 3.13 and older, the check allows objects with exactly one * reference to be resized, because it is impossible to differentiate between * an array with one reference created via an extension and a uniquely * referenced array defined in a Python function. From 86a4cc9e3732f491c18a326704708d8a9a87b0f7 Mon Sep 17 00:00:00 2001 From: Koki Watanabe Date: Sat, 21 Mar 2026 08:06:29 +0900 Subject: [PATCH 1513/1718] BUG: fix FNV-1a 64-bit selection by using NPY_SIZEOF_UINTP (#31035) --- numpy/_core/src/multiarray/fnv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/fnv.c b/numpy/_core/src/multiarray/fnv.c index 2b7848519e61..cef99f93d455 100644 --- a/numpy/_core/src/multiarray/fnv.c +++ b/numpy/_core/src/multiarray/fnv.c @@ -72,14 +72,15 @@ npy_fnv1a_64(const void *buf, size_t len, npy_uint64 hval) /* * Compute a size_t FNV-1a hash of the given data - * This will use 32-bit or 64-bit hash depending on the size of size_t + * This will use 32-bit or 64-bit hash depending on the size of npy_uintp. + * npy_uintp has the same size as size_t. */ size_t npy_fnv1a(const void *buf, size_t len) { -#if NPY_SIZEOF_SIZE_T == 8 +#if NPY_SIZEOF_UINTP == 8 return (size_t)npy_fnv1a_64(buf, len, FNV1A_64_INIT); -#else /* NPY_SIZEOF_SIZE_T == 4 */ +#else /* NPY_SIZEOF_UINTP == 4 */ return (size_t)npy_fnv1a_32(buf, len, FNV1A_32_INIT); #endif } From 126fb1a894ce7d618676f1c76f6efbd7713878a6 Mon Sep 17 00:00:00 2001 From: star1327p Date: Fri, 20 Mar 2026 21:47:58 -0700 Subject: [PATCH 1514/1718] DOC: Correct some a/an usages [skip azp][skip cirrus][skip actions] --- numpy/_core/src/_simd/_simd_inc.h.src | 16 ++++++++-------- numpy/_core/src/multiarray/buffer.c | 2 +- numpy/_core/src/multiarray/item_selection.c | 2 +- .../src/multiarray/stringdtype/static_string.h | 2 +- numpy/_core/src/umath/reduction.c | 2 +- numpy/_core/src/umath/scalarmath.c.src | 2 +- numpy/_core/tests/test_regression.py | 2 +- numpy/ma/core.py | 2 +- numpy/random/_pcg64.pyx | 4 ++-- numpy/random/_philox.pyx | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/_simd/_simd_inc.h.src b/numpy/_core/src/_simd/_simd_inc.h.src index a023848831ed..192000f7165a 100644 --- a/numpy/_core/src/_simd/_simd_inc.h.src +++ b/numpy/_core/src/_simd/_simd_inc.h.src @@ -104,21 +104,21 @@ typedef struct { // type name compatible with python style const char *pyname; - // returns '1' if the type represent a unsigned integer + // returns '1' if the type represents an unsigned integer unsigned int is_unsigned:1; - // returns '1' if the type represent a signed integer + // returns '1' if the type represents a signed integer unsigned int is_signed:1; - // returns '1' if the type represent a single or double precision + // returns '1' if the type represents a single or double precision unsigned int is_float:1; - // returns '1' if the type represent a boolean + // returns '1' if the type represents a boolean unsigned int is_bool:1; - // returns '1' if the type represent a sequence + // returns '1' if the type represents a sequence unsigned int is_sequence:1; - // returns '1' if the type represent a scalar + // returns '1' if the type represents a scalar unsigned int is_scalar:1; - // returns '1' if the type represent a vector + // returns '1' if the type represents a vector unsigned int is_vector:1; - // returns the len of multi-vector if the type represent x2 or x3 vector + // returns the len of multi-vector if the type represents x2 or x3 vector // otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2 int is_vectorx; // returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8 diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index 908553462bfe..f0f4b6d7056c 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -30,7 +30,7 @@ * * - Don't add new members to ndarray or descr structs, to preserve binary * compatibility. (Also, adding the items is actually not very useful, - * since mutability issues prevent an 1 to 1 relationship between arrays + * since mutability issues prevent a one-to-one relationship between arrays * and buffer views.) * * - Don't use bf_releasebuffer, because it prevents PyArg_ParseTuple("s#", ... diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 4751db9b4705..8946b93fbee3 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2407,7 +2407,7 @@ count_nonzero_bytes_384(const npy_uint64 * w) */ if (NPY_UNLIKELY( ((w1 | w2 | w3 | w4 | w5 | w6) & 0xFEFEFEFEFEFEFEFEULL) != 0)) { - /* reload from pointer to avoid a unnecessary stack spill with gcc */ + /* reload from pointer to avoid an unnecessary stack spill with gcc */ const char * c = (const char *)w; npy_uintp i, count = 0; for (i = 0; i < 48; i++) { diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.h b/numpy/_core/src/multiarray/stringdtype/static_string.h index 385d4dc47cce..ac7bd04b3ebb 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.h +++ b/numpy/_core/src/multiarray/stringdtype/static_string.h @@ -37,7 +37,7 @@ NpyString_free_allocator(npy_string_allocator *allocator); // NPY_EMPTY_STRING into *to_init* is sufficient to initialize it. Does not // check if *to_init* is NULL or if the internal buffer is non-NULL, undefined // behavior or memory leaks are possible if this function is passed a pointer -// to a an uninitialized struct, a NULL pointer, or an existing heap-allocated +// to an uninitialized struct, a NULL pointer, or an existing heap-allocated // string. Returns -1 if allocating the string would exceed the maximum // allowed string size or exhaust available memory. Returns 0 on success. NPY_NO_EXPORT int diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 384ac052b226..321a1f841853 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -165,7 +165,7 @@ PyArray_CopyInitialReduceValues( * funcname : The name of the reduction function, for error messages. * errormask : forwarded from _get_bufsize_errmask * - * TODO FIXME: if you squint, this is essentially an second independent + * TODO FIXME: if you squint, this is essentially a second independent * implementation of generalized ufuncs with signature (i)->(), plus a few * extra bells and whistles. (Indeed, as far as I can tell, it was originally * split out to support a fancy version of count_nonzero... which is not diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index e2d7c22f5deb..c3c1416d5618 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -700,7 +700,7 @@ static inline int * - The reverse should work, so we return NotImplemented to defer. * (If self is a subclass, this will end up in the "unknown" path.) * - Neither works (e.g. `uint8 + int8`): We currently use the array path. - * - The other object is a unknown. It could be either a scalar, an array, + * - The other object is an unknown. It could be either a scalar, an array, * or an array-like (including a list!). Because NumPy scalars pretend to be * arrays we fall into the array fallback path here _normally_ (through * the generic scalar path). diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 27a97faad19f..f53e1b1c3dd3 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1073,7 +1073,7 @@ def test_dot_alignment_sse2(self): x = np.zeros((30, 40)) for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): y = pickle.loads(pickle.dumps(x, protocol=proto)) - # y is now typically not aligned on a 8-byte boundary + # y is now typically not aligned on an 8-byte boundary z = np.ones((1, y.shape[0])) # This shouldn't cause a segmentation fault: np.dot(z, y) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 6e3b3a54c7c8..28200da59675 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -7941,7 +7941,7 @@ def where(condition, x=_NoValue, y=_NoValue): Returns ------- out : MaskedArray - An masked array with `masked` elements where the condition is masked, + A masked array with `masked` elements where the condition is masked, elements from `x` where `condition` is True, and elements from `y` elsewhere. diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 30a00a11aa1d..597ef5979ec6 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -256,7 +256,7 @@ cdef class PCG64(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of @@ -490,7 +490,7 @@ cdef class PCG64DXSM(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index da47ad21e2de..422810b9a12e 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -307,7 +307,7 @@ cdef class Philox(BitGenerator): Notes ----- - Advancing a RNG updates the underlying RNG state as-if a given + Advancing an RNG updates the underlying RNG state as-if a given number of calls to the underlying RNG have been made. In general there is not a one-to-one relationship between the number output random values from a particular distribution and the number of From c145f7a28dc64416e1828ff7f2f349348839f77b Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 21 Mar 2026 13:33:23 +0000 Subject: [PATCH 1515/1718] ENH: bump array API version to 2025.12 --- numpy/__init__.py | 2 +- numpy/__init__.pyi | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index 3f2652a306fd..ce0452ffe8d0 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -667,7 +667,7 @@ # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2024.12" + __array_api_version__ = "2025.12" from ._array_api_info import __array_namespace_info__ diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b1bca4b8b229..64956883597b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -870,7 +870,7 @@ type _DTypeNum = L[ ] type _DTypeBuiltinKind = L[0, 1, 2] -type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12"] +type _ArrayAPIVersion = L["2021.12", "2022.12", "2023.12", "2024.12", "2025.12"] type _CastingKind = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] @@ -1067,7 +1067,7 @@ __NUMPY_SETUP__: Final[L[False]] = False __numpy_submodules__: Final[set[LiteralString]] = ... __former_attrs__: Final[_FormerAttrsDict] = ... __future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... -__array_api_version__: Final[L["2024.12"]] = "2024.12" +__array_api_version__: Final[L["2025.12"]] = "2025.12" test: Final[PytestTester] = ... @type_check_only From 2ec716d8b050bac4e971e18b7913775d069e550d Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 21 Mar 2026 13:37:15 +0000 Subject: [PATCH 1516/1718] CI: bump the array-api-tests version to v2026.02.26 --- .github/workflows/linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f0976f34828a..7d6084c4b4c5 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -308,7 +308,7 @@ jobs: uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: repository: data-apis/array-api-tests - ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 + ref: '41379d15d26d67a1e66c840e775d41a8a7fb1516' # v2026.02.26 submodules: 'true' path: 'array-api-tests' persist-credentials: false From 634f2a67761e720375effdda33001c24a8304ba8 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 21 Mar 2026 13:40:52 +0000 Subject: [PATCH 1517/1718] MAINT: __array_namespace_info__().devices() returns a tuple from 2025.12 on --- numpy/_array_api_info.py | 6 +++--- numpy/_core/tests/test_array_api_info.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 41adb835433d..9e652b58b938 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -322,7 +322,7 @@ def devices(self): """ The devices supported by NumPy. - For NumPy, this always returns ``['cpu']``. + For NumPy, this always returns ``('cpu',)``. Returns ------- @@ -340,7 +340,7 @@ def devices(self): -------- >>> info = np.__array_namespace_info__() >>> info.devices() - ['cpu'] + ('cpu',) """ - return ["cpu"] + return ("cpu",) diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 4842dbfa9486..6872e8000ce5 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -110,4 +110,4 @@ def test_dtypes_invalid_device(): def test_devices(): - assert info.devices() == ["cpu"] + assert info.devices() == ("cpu",) From 2adb31fd137d119da6e342e1230fb995299c7fd7 Mon Sep 17 00:00:00 2001 From: Evgeni Burovski Date: Sat, 21 Mar 2026 14:10:13 +0000 Subject: [PATCH 1518/1718] TST: add xfails for the Array API test failures The failures are now new and are due to the test suite updates. --- tools/ci/array-api-xfails.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 8370099015c5..207c6c67e4e9 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -41,6 +41,9 @@ array_api_tests/test_operators_and_elementwise_functions.py::test_clip array_api_tests/test_signatures.py::test_extension_func_signature[fft.fftfreq] array_api_tests/test_signatures.py::test_extension_func_signature[fft.rfftfreq] +array_api_tests/test_fft.py::test_fftfreq +array_api_tests/test_fft.py::test_rfftfreq + # fails on np.repeat(np.array([]), np.array([])) test case array_api_tests/test_manipulation_functions.py::test_repeat @@ -63,3 +66,13 @@ array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] + +# complex plane special cases +array_api_tests/test_special_cases.py::test_unary[expm1((real(x_i) is +0 or real(x_i) == -0) and imag(x_i) is +0) -> 0 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is +0) -> +infinity + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is -infinity and imag(x_i) is +infinity) -> -1 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is +infinity) -> infinity + NaN j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is -infinity and imag(x_i) is NaN) -> -1 + 0j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is +infinity and imag(x_i) is NaN) -> infinity + NaN j] +array_api_tests/test_special_cases.py::test_unary[expm1(real(x_i) is NaN and imag(x_i) is +0) -> NaN + 0j] +array_api_tests/test_special_cases.py::test_unary[tanh(real(x_i) is +infinity and isfinite(imag(x_i)) and imag(x_i) > 0) -> 1 + 0j] From 54d9fd17ae9fc42fc180cc90bcb6ddf3dfd6240c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 21 Mar 2026 09:19:43 -0600 Subject: [PATCH 1519/1718] DOC: Apply Marten's comments and rephrase slightly --- doc/source/reference/c-api/array.rst | 9 +++++---- numpy/_core/src/multiarray/shape.c | 5 ----- numpy/_core/tests/test_multiarray.py | 3 +++ 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 6d648d5dd2f6..88126f7c8fae 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -2254,11 +2254,12 @@ Shape Manipulation a different total number of elements then the old shape. If reallocation is necessary, then *self* must own its data, have *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and (unless refcheck is 0) not be - referenced by any other array. + referenced by any other array. The *fortran* argument has no effect. - On Python 3.13 and older, the check allows objects with exactly one - reference to be reallocated in-place. On Python 3.14 and newer, the array - must be uniquely referenced. See the Python 3.14 `What's New entry + On Python 3.13 and older, the check allows uniquely referenced objects and + objects with exactly one reference to be reallocated in-place. On Python + 3.14 and newer, the array must be uniquely referenced. See the Python 3.14 + `What's New entry `_ on this topic for more information on why there is a behavior difference. diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index f5ab89b0bfed..2ada9b3e0bc5 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -194,11 +194,6 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) * must be uniquely referenced. In some cases this can lead to spurious * ValueErrors on Python 3.14. * - * On Python 3.13 and older, the check allows objects with exactly one - * reference to be resized, because it is impossible to differentiate between - * an array with one reference created via an extension and a uniquely - * referenced array defined in a Python function. - * */ NPY_NO_EXPORT PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 90217d4e4ffa..caa95cb5ea6b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6395,6 +6395,9 @@ def test_check_reference_module_scope(self): assert sys.version_info >= (3, 14) assert "ValueError" in e.stdout assert "It is possible that this is a false positive." in e.stdout + else: + if sys.version_info >= (3, 14): + raise AsseritonError("Unexpected success of resize refcheck") def test_check_reference_2(self): # see gh-30265 From 41afc3b0c4f6b8142849247a9594b0da5b6ff5a0 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 22 Mar 2026 08:47:24 +0100 Subject: [PATCH 1520/1718] BUG: avoid warning on ufunc with where=True and no output Alternative fix for gh-31030. This does not change that `out=None` is removed from what is passed to the ufunc, but ensures that if from the ufunc one calls the original function with the original keyword arguments, there will only be a warning about a missing `out` argument if `where!=True`. --- numpy/_core/src/umath/ufunc_object.c | 3 ++- numpy/_core/tests/test_ufunc.py | 4 ++++ numpy/_core/tests/test_umath.py | 13 +++++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index b587fcc7009b..2ffdb55cf184 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4637,7 +4637,8 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, } /* Warn if "where" is used without "out", issue 29561 */ - if ((where_obj != NULL) && (full_args.out == NULL) && (out_obj == NULL)) { + if ((where_obj != NULL && where_obj != Py_True) + && (full_args.out == NULL) && (out_obj == NULL)) { if (PyErr_WarnEx(PyExc_UserWarning, "'where' used without 'out', expect unitialized memory in output. " "If this is intentional, use out=None.", 1) < 0) { diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 567b63deb873..ca9789a5a74f 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1756,6 +1756,10 @@ def test_where_warns(self): # Sanity check assert np.all(result1[::2] == [0, 4, 8, 12]) assert np.all(result2[::2] == [0, 4, 8, 12]) + # Also no warning for where=True + result3 = np.add(a, a, where=True) + # Sanity check + assert_array_equal(result3, a + a) @staticmethod def identityless_reduce_arrs(): diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 8c5af69af9a7..eeac8ccd280b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -3653,6 +3653,19 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): 'axis': 'axis0', 'initial': 'init0', 'where': 'where0'}) + # reduce, kwargs, out=None is removed + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out=None, + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) # reduce, output equal to None removed, but not other explicit ones, # even if they are at their default value. From 964ea6fe0d50e89b1cd95dc31f41bb976a099f20 Mon Sep 17 00:00:00 2001 From: aaronzuo Date: Sun, 22 Mar 2026 23:20:59 +0800 Subject: [PATCH 1521/1718] fix: f2py map complex_long_double to NPY_CLONGDOUBLE --- numpy/f2py/capi_maps.py | 2 +- numpy/f2py/tests_pure/test_capi_maps.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 numpy/f2py/tests_pure/test_capi_maps.py diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 567bf3a00b90..552488a14313 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -70,7 +70,7 @@ 'unsigned_long_long': 'NPY_ULONGLONG', 'complex_float': 'NPY_CFLOAT', 'complex_double': 'NPY_CDOUBLE', - 'complex_long_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CLONGDOUBLE', 'string': 'NPY_STRING', 'character': 'NPY_STRING'} diff --git a/numpy/f2py/tests_pure/test_capi_maps.py b/numpy/f2py/tests_pure/test_capi_maps.py new file mode 100644 index 000000000000..739b453c0f5e --- /dev/null +++ b/numpy/f2py/tests_pure/test_capi_maps.py @@ -0,0 +1,10 @@ +from numpy.f2py import capi_maps + + +def test_complex_long_double_capi_map(): + assert capi_maps.c2capi_map["complex_long_double"] == "NPY_CLONGDOUBLE" + + +def test_complex_long_double_is_distinct(): + assert capi_maps.c2pycode_map["complex_long_double"] != capi_maps.c2pycode_map["complex_double"] + assert capi_maps.c2capi_map["complex_long_double"] != capi_maps.c2capi_map["complex_double"] From 0f16a041348a51251f6bca3ba34b661e1dcc857c Mon Sep 17 00:00:00 2001 From: Daniel Haag <121057143+denialhaag@users.noreply.github.com> Date: Mon, 23 Mar 2026 10:13:08 +0100 Subject: [PATCH 1522/1718] BUG: Add test to reproduce problem described in #30816 (#30818) Co-authored-by: mattip --- numpy/linalg/tests/test_regression.py | 8 ++++++++ requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 053e7130da63..12d10bc7eecc 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -7,6 +7,7 @@ from numpy import arange, array, dot, float64, linalg, transpose from numpy.testing import ( assert_, + assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_array_less, @@ -180,3 +181,10 @@ def test_openblas_threading(self): if mismatches != 0: assert False, ("unexpected result from matmul, " "probably due to OpenBLAS threading issues") + + def test_norm_linux_arm(self): + # gh-30816 + a = np.arange(20000) / 50000 + b = a + 1j * np.roll(np.flip(a), 12345) + norm = np.linalg.norm(b) + assert_almost_equal(norm, 46.18628948075393) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index a19f6b3b9e7b..02ff529de09c 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.31.126.1 +scipy-openblas32==0.3.31.188.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f200891aa560..397fa703e28d 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.31.126.1 -scipy-openblas64==0.3.31.126.1 +scipy-openblas32==0.3.31.188.0 +scipy-openblas64==0.3.31.188.0 From 8a1ff58b6577e16b991376f68c47fa446fce632a Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 23 Mar 2026 11:10:06 -0600 Subject: [PATCH 1523/1718] Update numpy/_core/src/multiarray/shape.c Co-authored-by: Matti Picus --- numpy/_core/src/multiarray/shape.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index 2ada9b3e0bc5..bbdc20781dcb 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -192,7 +192,7 @@ PyArray_Resize_int(PyArrayObject *self, PyArray_Dims *newshape, int refcheck) * On Python 3.13 and older, the check allows objects with exactly one * reference to be reallocated in-place. On Python 3.14 and newer, the array * must be uniquely referenced. In some cases this can lead to spurious - * ValueErrors on Python 3.14. + * ValueErrors. * */ NPY_NO_EXPORT PyObject * From 7266a8a1dc98a01050e956cf0a3124606032281a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Mar 2026 18:13:36 +0000 Subject: [PATCH 1524/1718] MAINT: Bump astral-sh/setup-uv from 7.3.1 to 7.6.0 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.3.1 to 7.6.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/5a095e7a2014a4212f075830d4f7277575a9d098...37802adc94f370d6bfd71619e3f0bf239e1f3b78) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 7.6.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/stubtest.yml | 2 +- .github/workflows/typecheck.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 93caa3b1c526..9cab4cf63430 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 + - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 with: python-version: ${{ matrix.py }} activate-environment: true diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index f9e909a7b8ce..826e1ffe0f31 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 + - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1 + - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 with: activate-environment: true - name: Install dependencies From a2123c2a823d8e6c68079d5ad49b468e209b5294 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Mar 2026 18:13:42 +0000 Subject: [PATCH 1525/1718] MAINT: Bump egor-tensin/cleanup-path from 4.0.3 to 5.0.1 Bumps [egor-tensin/cleanup-path](https://github.com/egor-tensin/cleanup-path) from 4.0.3 to 5.0.1. - [Release notes](https://github.com/egor-tensin/cleanup-path/releases) - [Commits](https://github.com/egor-tensin/cleanup-path/compare/cf0901d753db0bf4d15baf625a6fa537978b03a9...8cbbf6af9f8cf1d347258e3f6b850622e480d16d) --- updated-dependencies: - dependency-name: egor-tensin/cleanup-path dependency-version: 5.0.1 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 641bfce85936..b530bdee0f71 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -38,7 +38,7 @@ jobs: python-setuptools-wheel liblapack-devel liblapack0 gcc-fortran gcc-g++ git dash cmake ninja - name: Set Windows PATH - uses: egor-tensin/cleanup-path@cf0901d753db0bf4d15baf625a6fa537978b03a9 # v4.0.3 + uses: egor-tensin/cleanup-path@8cbbf6af9f8cf1d347258e3f6b850622e480d16d # v5.0.1 with: dirs: 'C:\tools\cygwin\bin;C:\tools\cygwin\lib\lapack' - name: Verify that bash is Cygwin bash From 7c8dd3c40c86ca5da62948f0aeb9dfe9c58c52d1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 23 Mar 2026 19:58:18 -0600 Subject: [PATCH 1526/1718] MAINT: Fix typo in test_multiarray.py AsseritonError -> AssertionError. --- numpy/_core/tests/test_multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index caa95cb5ea6b..5f9fe44a7011 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6397,7 +6397,7 @@ def test_check_reference_module_scope(self): assert "It is possible that this is a false positive." in e.stdout else: if sys.version_info >= (3, 14): - raise AsseritonError("Unexpected success of resize refcheck") + raise AssertionError("Unexpected success of resize refcheck") def test_check_reference_2(self): # see gh-30265 From e582cddd1cb41d5ea984e26e65f9ede81d8969bc Mon Sep 17 00:00:00 2001 From: Antareep Sarkar <206247917+antareepsarkar@users.noreply.github.com> Date: Tue, 24 Mar 2026 16:05:09 +0530 Subject: [PATCH 1527/1718] DOC: replace name in doc for numpy.place --- numpy/lib/_function_base_impl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 69d1ed8a7c87..4548bb19ac0a 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2114,11 +2114,11 @@ def place(arr, mask, vals): arr : ndarray Array to put data into. mask : array_like - Boolean mask array. Must have the same size as `a`. + Boolean mask array. Must have the same size as `arr`. vals : 1-D sequence - Values to put into `a`. Only the first N elements are used, where + Values to put into `arr`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller - than N, it will be repeated, and if elements of `a` are to be masked, + than N, it will be repeated, and if elements of `arr` are to be masked, this sequence must be non-empty. See Also From b996b34012a8625cc922e0ac3e1e440343e6691a Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Tue, 24 Mar 2026 19:11:06 +0530 Subject: [PATCH 1528/1718] ENH: add getters for accessing struct fields in cython (#30994) This PR adds getters for accessing struct fields in cython. In future these would call an ABI function to get the actual fields struct on opaque PyObject builds. Co-authored-by: Nathan Goldbaum --- doc/release/upcoming_changes/30994.c_api.rst | 4 + doc/source/reference/c-api/array.rst | 29 +++++ numpy/__init__.cython-30.pxd | 106 +++++++++++++++---- numpy/_core/include/numpy/ndarraytypes.h | 46 ++++---- numpy/_core/include/numpy/npy_2_compat.h | 5 + numpy/_core/include/numpy/ufuncobject.h | 10 +- numpy/f2py/src/fortranobject.h | 6 +- 7 files changed, 160 insertions(+), 46 deletions(-) create mode 100644 doc/release/upcoming_changes/30994.c_api.rst diff --git a/doc/release/upcoming_changes/30994.c_api.rst b/doc/release/upcoming_changes/30994.c_api.rst new file mode 100644 index 000000000000..5a704405ec98 --- /dev/null +++ b/doc/release/upcoming_changes/30994.c_api.rst @@ -0,0 +1,4 @@ +* Added ``PyDataType_TYPE``, ``PyDataType_KIND``, ``PyDataType_BYTEORDER`` and + ``PyDataType_TYPEOBJ`` accessor macros to the C API. Together with the other + accessor macros added for the NumPy 2.0 transition, these allow accessing the + fields of ``PyArray_Descr`` structs without any direct field accesses. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 88126f7c8fae..39ff96ae901c 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -841,6 +841,35 @@ cannot not be accessed directly. The shape (always C-style contiguous) of the sub-array as a Python tuple. +.. c:function:: char PyDataType_TYPE(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type character code. See `numpy.dtype.char`. Only set for built-in and + legacy user DTypes. Null character (``b'\x00'``) otherwise. + +.. c:function:: char PyDataType_KIND(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type kind character code. See `numpy.dtype.kind`. Only set for built-in + and legacy user DTypes. Null character (``b'\x00``) otherwise. + +.. c:function:: char PyDataType_BYTEORDER(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + Data type bytorder character code. One of ``'='`` (native), ``'<'`` + (little-endian), ``'>'`` (big-endian), or ``'|'`` (not applicable). See + `numpy.dtype.byteorder`. + +.. c:function:: PyTypeObject *PyDataType_TYPEOBJ(PyArray_Descr *descr) + + .. versionadded:: 2.5 + + The type object for the scalar type. See the ``typeobj`` member of the + ``PyArray_Descr`` struct. See :c:data:`PyArray_Descr` for a full description + of the ``PyArray_Descr`` struct layout. Data-type checking ~~~~~~~~~~~~~~~~~~ diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index c71898626070..e05c20c57761 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -227,18 +227,30 @@ cdef extern from "numpy/arrayobject.h": pass ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. - cdef PyTypeObject* typeobj - cdef char kind - cdef char type + @property + cdef inline PyTypeObject* typeobj(self) noexcept nogil: + return PyDataType_TYPEOBJ(self) + + @property + cdef inline char kind(self) noexcept nogil: + return PyDataType_KIND(self) + + @property + cdef inline char type(self) noexcept nogil: + return PyDataType_TYPE(self) + # Numpy sometimes mutates this without warning (e.g. it'll # sometimes change "|" to "<" in shared dtype objects on # little-endian machines). If this matters to you, use # PyArray_IsNativeByteOrder(dtype.byteorder) instead of # directly accessing this field. - cdef char byteorder - cdef int type_num + @property + cdef inline char byteorder(self) noexcept nogil: + return PyDataType_BYTEORDER(self) + + @property + cdef inline int type_num(self) noexcept nogil: + return PyDataType_TYPENUM(self) @property cdef inline npy_intp itemsize(self) noexcept nogil: @@ -428,6 +440,11 @@ cdef extern from "numpy/arrayobject.h": PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil PyObject* PyDataType_NAMES(dtype) nogil PyObject* PyDataType_FIELDS(dtype) nogil + char PyDataType_TYPE(dtype) nogil + char PyDataType_KIND(dtype) nogil + int PyDataType_TYPENUM(dtype) nogil + char PyDataType_BYTEORDER(dtype) nogil + PyTypeObject* PyDataType_TYPEOBJ(dtype) nogil bint PyDataType_ISBOOL(dtype) nogil bint PyDataType_ISUNSIGNED(dtype) nogil @@ -905,20 +922,69 @@ cdef extern from "numpy/ufuncobject.h": ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + ctypedef struct PyUFuncObject_fields: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + ctypedef struct PyUFuncObject: + pass + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops + @property + cdef inline int nin(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nin + + @property + cdef inline int nout(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nout + + @property + cdef inline int nargs(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).nargs + + @property + cdef inline PyUFuncGenericFunction* functions(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).functions + + @property + cdef inline void ** data(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).data + + @property + cdef inline int ntypes(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).ntypes + + @property + cdef inline const char* name(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).name + + @property + cdef inline const char* doc(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).doc + + @property + cdef inline void* ptr(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).ptr + + @property + cdef inline PyObject* obj(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).obj + + @property + cdef inline PyObject* userloops(self) noexcept nogil: + return _PyUFuncObject_GET_ITEM_DATA(self).userloops + + PyUFuncObject_fields *_PyUFuncObject_GET_ITEM_DATA(ufunc) nogil cdef enum: PyUFunc_Zero diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 6afdcf821a6b..cc3b6a7da569 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -173,7 +173,7 @@ enum NPY_TYPECHAR { * should be downstream compatible, but the actual algorithms used may be * different than before. The new approach should be more flexible and easier * to update. - * + * * Names with a leading underscore are private, and should only be used * internally by NumPy. * @@ -187,7 +187,7 @@ typedef enum { NPY_HEAPSORT = 1, NPY_MERGESORT = 2, NPY_STABLESORT = 2, - // new style names + // new style names _NPY_SORT_HEAPSORT = 1, NPY_SORT_DEFAULT = 0, NPY_SORT_STABLE = 2, @@ -735,6 +735,8 @@ typedef struct _arr_descr { PyObject *shape; /* a tuple */ } PyArray_ArrayDescr; +#define PyDataType_TYPENUM(descr) (((PyArray_Descr *)(descr))->type_num) + /* * Memory handler structure for array data. */ @@ -1597,7 +1599,7 @@ PyArray_FLAGS(const PyArrayObject *arr) static inline int PyArray_TYPE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->descr->type_num; + return PyDataType_TYPENUM(PyArray_DESCR(arr)); } static inline int @@ -1609,7 +1611,7 @@ PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) static inline PyArray_Descr * PyArray_DTYPE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->descr; + return PyArray_DESCR(arr); } static inline npy_intp * @@ -1692,21 +1694,21 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) -#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0)) -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) +#define PyDataType_ISLEGACY(dtype) (PyDataType_TYPENUM(dtype) < NPY_VSTRING && (PyDataType_TYPENUM(dtype) >= 0)) +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(PyDataType_TYPENUM(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyDataType_TYPENUM(obj)) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyDataType_TYPENUM(obj)) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyDataType_TYPENUM(obj)) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(PyDataType_TYPENUM(obj)) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyDataType_TYPENUM(obj)) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyDataType_TYPENUM(obj)) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyDataType_TYPENUM(obj)) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyDataType_TYPENUM(obj)) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyDataType_TYPENUM(obj)) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyDataType_TYPENUM(obj)) +#define PyDataType_MAKEUNSIZED(dtype) PyDataType_SET_ELSIZE(dtype, 0) /* * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific @@ -1755,7 +1757,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) #define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyDataType_BYTEORDER(PyArray_DESCR(m))) #define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) #define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ @@ -1769,7 +1771,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(PyDataType_BYTEORDER(d)) #define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) /************************************************************ @@ -1957,7 +1959,7 @@ typedef struct { * #endif * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ - * + * * #ifndef NPY_NO_DEPRECATED_API * #if defined(_WIN32) * #define _WARN___STR2__(x) #x diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index e39e65aedea7..9b14bc2de86a 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -213,6 +213,11 @@ DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1) DESCR_ACCESSOR(NAMES, names, PyObject *, 1) DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1) DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) +/* ABI compatible in 1.x and 2.x, but defined together with others */ +DESCR_ACCESSOR(TYPE, type, char, 0) +DESCR_ACCESSOR(KIND, kind, char, 0) +DESCR_ACCESSOR(BYTEORDER, byteorder, char, 0) +DESCR_ACCESSOR(TYPEOBJ, typeobj, PyTypeObject *, 0) #undef DESCR_ACCESSOR diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index f5f82b57c91f..cf7064753d0e 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -232,7 +232,9 @@ typedef struct _tagPyUFuncObject { */ PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; #endif -} PyUFuncObject; +} PyUFuncObject_fields; + +typedef PyUFuncObject_fields PyUFuncObject; #include "arrayobject.h" /* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ @@ -336,6 +338,12 @@ typedef struct _loop1d_info { #include "__ufunc_api.h" +// In future, when adding support for opaque PyObject, this would become +// a ABI function call to get the ufunc struct fields from the PyObject. +static inline PyUFuncObject_fields *_PyUFuncObject_GET_ITEM_DATA(PyUFuncObject *ufunc) { + return (PyUFuncObject_fields *)ufunc; +} + #ifdef __cplusplus } #endif diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h index 4aed2f60891b..21ff12d9b622 100644 --- a/numpy/f2py/src/fortranobject.h +++ b/numpy/f2py/src/fortranobject.h @@ -130,9 +130,9 @@ F2PyGetThreadLocalCallbackPtr(char *key); : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) #define F2PY_CHECK_ALIGNMENT(arr, intent) \ ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) -#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ - || PyArray_DESCR(arr)->type_num == NPY_UINT8) -#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyDataType_TYPENUM(PyArray_DESCR(arr)) == NPY_UNICODE) extern PyArrayObject * ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, From e87d0e0132d8559e16653a069dd82306bf8dc934 Mon Sep 17 00:00:00 2001 From: Yoshimura <94695237+himanow@users.noreply.github.com> Date: Wed, 25 Mar 2026 01:25:24 +0900 Subject: [PATCH 1529/1718] BUG: np.take out dtype (#30615) This PR addresses the issue in np.take where an error was raised when input and output dtypes were different during casting. Key changes: Allows 'same-kind' casting to proceed in np.take. Emits a DeprecationWarning for other casting types to maintain backward compatibility while signaling future changes. This is my first contribution to NumPy, so I'm opening this as a draft to ensure the implementation and formatting align with the project's standards. I'd appreciate any feedback on the code or the CI results. Co-authored-by: Sebastian Berg --- .../upcoming_changes/30615.deprecation.rst | 7 +++++++ numpy/_core/src/multiarray/item_selection.c | 19 ++++++++++++++++--- numpy/_core/tests/test_deprecations.py | 13 +++++++++++++ numpy/_core/tests/test_item_selection.py | 11 +++++++++++ numpy/_core/tests/test_regression.py | 16 +++++++++++----- 5 files changed, 58 insertions(+), 8 deletions(-) create mode 100644 doc/release/upcoming_changes/30615.deprecation.rst diff --git a/doc/release/upcoming_changes/30615.deprecation.rst b/doc/release/upcoming_changes/30615.deprecation.rst new file mode 100644 index 000000000000..4f95a3bed811 --- /dev/null +++ b/doc/release/upcoming_changes/30615.deprecation.rst @@ -0,0 +1,7 @@ +* `numpy.take` now correctly checks if the result can be cast to + the provided ``out=out`` under the same-kind rule. + A ``DeprecationWarning`` is given now when this check fails. + Previously, ``take`` incorrectly checked if ``out`` could be cast to + the result (the wrong direction). + This deprecation also affects ``compress`` and possibly other functions. + (Future versions of NumPy may tighten the casting check further.) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 8946b93fbee3..454a434304bb 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -232,7 +232,7 @@ NPY_NO_EXPORT PyObject * PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *out, NPY_CLIPMODE clipmode) { - PyArray_Descr *dtype; + PyArray_Descr *dtype, *out_dtype; PyArrayObject *obj = NULL, *self, *indices; npy_intp nd, i, n, m, max_item, chunk, itemsize, nelem; npy_intp shape[NPY_MAXDIMS]; @@ -310,6 +310,19 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, flags |= NPY_ARRAY_ENSURECOPY; } dtype = PyArray_DESCR(self); + out_dtype = PyArray_DESCR(out); + if (dtype != out_dtype) { + /* Deprecated NumPy 2.5, 2026-01 */ + if (!PyArray_CanCastTypeTo(dtype, out_dtype, NPY_SAME_KIND_CASTING)) { + if (DEPRECATE( + "Implicit casting of output to a different kind is deprecated. " + "In a future version, this will result in an error. (Deprecated NumPy 2.5)") < + 0) { + goto fail; + } + } + flags |= NPY_ARRAY_FORCECAST; + } Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_FromArray(out, dtype, flags); if (obj == NULL) { @@ -3170,7 +3183,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArray_Descr *given_descrs[2] = {descr, descr}; // Sort cannot be a view, so view_offset is unused npy_intp view_offset = 0; - + if (sort_method->resolve_descriptors( sort_method, dtypes, given_descrs, loop_descrs, &view_offset) < 0) { PyErr_SetString(PyExc_RuntimeError, @@ -3280,7 +3293,7 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND flags) PyArray_Descr *given_descrs[2] = {descr, odescr}; // we can ignore the view_offset for sorting npy_intp view_offset = 0; - + int resolve_ret = argsort_method->resolve_descriptors( argsort_method, dtypes, given_descrs, loop_descrs, &view_offset); Py_DECREF(odescr); diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index fc1ae9d56fc0..c17f8afa3d3c 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -366,6 +366,7 @@ def test_round_emits_deprecation_warning_array(self): def test_round_emits_deprecation_warning_scalar(self): self.assert_deprecated(lambda: np.ma.round_(3.14)) + class TestTriDeprecationWithNonInteger(_DeprecationTestCase): # Deprecation in NumPy 2.5, 2026-03 @@ -392,3 +393,15 @@ def test_tril_indices_from(self): [ 8, 9, 10, 11], [12, 13, 14, 15]]) self.assert_deprecated(lambda: np.tril_indices_from(a, k=9.8)) + + +class TestTakeOutDtype(_DeprecationTestCase): + # Deprecated in Numpy 2.5, 2026-01 + message = "Implicit casting of output to a different kind." + + def test_out_dtype_deprecated(self): + a = np.arange(3).astype(np.int32) + indices = np.arange(2) + different_dtype_out = np.zeros_like(indices, dtype=np.uint32) + + self.assert_deprecated(lambda: np.take(a, indices, out=different_dtype_out)) diff --git a/numpy/_core/tests/test_item_selection.py b/numpy/_core/tests/test_item_selection.py index 0e08b7cfd8e0..2d9caaf8061c 100644 --- a/numpy/_core/tests/test_item_selection.py +++ b/numpy/_core/tests/test_item_selection.py @@ -81,6 +81,17 @@ def test_empty_partition(self): assert_array_equal(a, a_original) + @pytest.mark.parametrize("dtype", + [np.int8, np.int16, np.int32, np.int64, + np.float16, np.float32, np.float64, np.longdouble]) + def test_out_dtype(self, dtype): + # In reference to github issue #25588 + a = np.arange(3).astype(np.int32) + indices = np.arange(2) + out = np.zeros_like(indices, dtype=dtype) + np.take(a, indices, out=out) + assert_array_equal(a[indices], out) + def test_empty_argpartition(self): # In reference to github issue #6530 a = np.array([0, 2, 4, 6, 8, 10]) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index f53e1b1c3dd3..5da357f32f7a 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1093,18 +1093,24 @@ def test_astype_copy(self): assert_(xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0]) + @pytest.mark.filterwarnings( + "error:Implicit casting of output.*:DeprecationWarning", + ) def test_compress_small_type(self): # Ticket #789, changeset 5217. # compress with out argument segfaulted if cannot cast safely import numpy as np a = np.array([[1, 2], [3, 4]]) b = np.zeros((2, 1), dtype=np.single) + a.compress([True, False], axis=1, out=b) + assert_equal(b, np.array([[1.0], [3.0]])) try: - a.compress([True, False], axis=1, out=b) - raise AssertionError("compress with an out which cannot be " - "safely casted should not return " - "successfully") - except TypeError: + # Previously the above already failed (and that is OK) but take + # currently allows same-kind casting for the output. + a.compress([True, False], axis=1, out=np.empty((2, 1), dtype=bool)) + raise AssertionError("Expected TypeError due to unsafe out cast") + except DeprecationWarning: + # After deprecation remove TypeError the warnings filter. pass def test_attributes(self): From 7baf0e7c71dfc2de9fa5022cf51cb4873efd4285 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 24 Mar 2026 14:12:25 -0600 Subject: [PATCH 1530/1718] ENH: TSan pixi package definitions (#30510) Co-authored-by: Guido Imperiale --- .github/workflows/pixi-packages.yml | 14 ++- pixi-packages/README.md | 104 ++++++++++++++++--- pixi-packages/asan/LICENSE.txt | 30 ++++++ pixi-packages/asan/pixi.toml | 9 +- pixi-packages/default/LICENSE.txt | 30 ++++++ pixi-packages/default/pixi.toml | 19 +++- pixi-packages/freethreading/LICENSE.txt | 30 ++++++ pixi-packages/freethreading/pixi.toml | 38 +++++++ pixi-packages/tsan-freethreading/LICENSE.txt | 30 ++++++ pixi-packages/tsan-freethreading/pixi.toml | 32 ++++++ 10 files changed, 313 insertions(+), 23 deletions(-) create mode 100644 pixi-packages/asan/LICENSE.txt create mode 100644 pixi-packages/default/LICENSE.txt create mode 100644 pixi-packages/freethreading/LICENSE.txt create mode 100644 pixi-packages/freethreading/pixi.toml create mode 100644 pixi-packages/tsan-freethreading/LICENSE.txt create mode 100644 pixi-packages/tsan-freethreading/pixi.toml diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index 2f873d43a160..d864340eadc2 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -20,8 +20,16 @@ jobs: strategy: fail-fast: false matrix: - runs-on: [ubuntu-latest, macos-15] - package_variant: [asan, default] + runs-on: [ubuntu-latest, ubuntu-24.04-arm, macos-latest] + package_variant: + - default + - freethreading + - asan + # TSan CI runs for longer than 30 minutes due to slow + # stdlib bytecode compilation step. Disable or parallelize + # byte compilation once rattler-build has support. + # See https://github.com/prefix-dev/pixi/pull/5737 + # - tsan-freethreading if: github.repository == 'numpy/numpy' steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 @@ -32,7 +40,7 @@ jobs: - uses: prefix-dev/setup-pixi@a0af7a228712d6121d37aba47adf55c1332c9c2e # v0.9.4 with: - pixi-version: v0.60.0 + pixi-version: v0.64.0 run-install: false - name: Build diff --git a/pixi-packages/README.md b/pixi-packages/README.md index 9f1fed5fdb2c..bd47c91bcc92 100644 --- a/pixi-packages/README.md +++ b/pixi-packages/README.md @@ -1,37 +1,109 @@ # NumPy Pixi packages -This directory contains definitions for [Pixi packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) -which can be built from the NumPy source code. +This directory contains definitions for [Pixi +packages](https://pixi.sh/latest/reference/pixi_manifest/#the-package-section) which can +be built from the NumPy source code. -Downstream developers can make use of these packages by adding them as Git dependencies in a -[Pixi workspace](https://pixi.sh/latest/first_workspace/), like: - -```toml -[dependencies] -numpy = { git = "https://github.com/numpy/numpy", subdirectory = "pixi-packages/asan" } -``` +Downstream developers can make use of these packages by adding them as Git dependencies +in a [Pixi workspace](https://pixi.sh/latest/first_workspace/). This is particularly useful when developers need to build NumPy from source (for example, for an ASan-instrumented build), as it does not require any manual clone or build steps. Instead, Pixi will automatically handle both the build and installation of the package. -See https://github.com/scipy/scipy/pull/24066 for a full example of downstream use. +See [scipy#24066](https://github.com/scipy/scipy/pull/24066) for a full example of +downstream use. +## Variants Each package definition is contained in a subdirectory. -Currently defined package variants: +All package variants include debug symbols. + +Currently defined variants: + +### `default` +GIL-enabled build. + +Usage: +```toml +[dependencies] +python = "*" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/default" +``` +See `default/pixi.toml` if you wish to use python git tip instead. + +*Tip:* you may change fork and add `numpy.rev = ""` to test unmerged +PRs. + +### `freethreading` +noGIL build. + +Usage: +```toml +[dependencies] +python-freethreading = "*" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/freethreading" +``` +See `freethreading/pixi.toml` if you wish to use python git tip instead. -- `default` -- `asan`: ASan-instrumented build with `-Db_sanitize=address` +### `asan` +ASan-instrumented build with `-Db_sanitize=address`. + +Usage: +```toml +[dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/asan" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/asan" +``` + +### `tsan-freethreading` +Freethreading TSan-instrumented build with `-Db_sanitize=thread`. + +Usage: +```toml +[dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/tsan-freethreading" +numpy.git = "https://github.com/numpy/numpy" +numpy.subdirectory = "pixi-packages/tsan-freethreading" +``` ## Maintenance - Keep host dependency requirements up to date -- For dependencies on upstream CPython Pixi packages, keep the git revision at a compatible version + +## Troubleshooting + +TSan builds may crash on Linux with +``` +FATAL: ThreadSanitizer: unexpected memory mapping 0x7977bd072000-0x7977bd500000 +``` +To fix it, try reducing `mmap_rnd_bits`: + +```bash +$ sudo sysctl vm.mmap_rnd_bits +vm.mmap_rnd_bits = 32 # too high for TSan +$ sudo sysctl vm.mmap_rnd_bits=28 # reduce it +vm.mmap_rnd_bits = 28 +``` ## Opportunities for future improvement -- More package variants (such as TSan, UBSan) +- More package variants (such as UBSan) - Support for Windows -- Using a single `pixi.toml` for all package variants is blocked on https://github.com/prefix-dev/pixi/issues/2813 +- Using a single `pixi.toml` for all package variants is blocked on + [pixi#2813](https://github.com/prefix-dev/pixi/issues/2813) - Consider pinning dependency versions to guard against upstream breakages over time + +## Known issues +- [numpy#30561](https://github.com/numpy/numpy/issues/30561): `default` and + `freethreading` recipes must be manually tweaked to compile against cpython git tip; + see `default/pixi.toml` and `freethreading/pixi.toml` for details. +- [pixi#5226](https://github.com/prefix-dev/pixi/issues/5226): lock file is invalidated + on all `pixi` invocations +- [rattler-build#2094](https://github.com/prefix-dev/rattler-build/issues/2094): pixi + 0.63.0 introduces a regression regarding the license file; please skip it diff --git a/pixi-packages/asan/LICENSE.txt b/pixi-packages/asan/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/asan/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml index ce25939a0fcb..afe9edc50cba 100644 --- a/pixi-packages/asan/pixi.toml +++ b/pixi-packages/asan/pixi.toml @@ -1,8 +1,12 @@ [workspace] channels = ["https://prefix.dev/conda-forge"] -platforms = ["osx-arm64", "linux-64"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + [package.build] source.path = "../.." @@ -19,8 +23,7 @@ extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=deb [package.host-dependencies] python.git = "https://github.com/python/cpython" python.subdirectory = "Tools/pixi-packages/asan" -# v3.15.0a6 -python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 meson-python = "*" cython = "*" diff --git a/pixi-packages/default/LICENSE.txt b/pixi-packages/default/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/default/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/default/pixi.toml b/pixi-packages/default/pixi.toml index 0b5d53e41ef2..e3e201bece3e 100644 --- a/pixi-packages/default/pixi.toml +++ b/pixi-packages/default/pixi.toml @@ -1,8 +1,12 @@ [workspace] channels = ["https://prefix.dev/conda-forge"] -platforms = ["osx-arm64", "linux-64"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + [package.build] source.path = "../.." @@ -13,9 +17,22 @@ version = "*" [package.build.config] extra-input-globs = ["**/*.c.src"] compilers = ["c", "cxx"] +extra-args = ["-Csetup-args=-Dbuildtype=debug"] [package.host-dependencies] +# FIXME https://github.com/numpy/numpy/issues/30478 +# python = "*" prevents downstream from building cpython from sources. +# Workaround: fork numpy, then uncomment one of the following and +# comment out the other. + +# Use latest cpython release from conda-forge python = "*" + +# Use cpython git tip +# python.git = "https://github.com/python/cpython" +# python.subdirectory = "Tools/pixi-packages/default" +# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + meson-python = "*" cython = "*" uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/freethreading/LICENSE.txt b/pixi-packages/freethreading/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/freethreading/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/freethreading/pixi.toml b/pixi-packages/freethreading/pixi.toml new file mode 100644 index 000000000000..372fcbaa2c48 --- /dev/null +++ b/pixi-packages/freethreading/pixi.toml @@ -0,0 +1,38 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +extra-args = ["-Csetup-args=-Dbuildtype=debug"] + +[package.host-dependencies] +# FIXME https://github.com/numpy/numpy/issues/30478 +# python = "*" prevents downstream from building cpython from sources. +# Workaround: fork numpy, then uncomment one of the following and +# comment out the other. + +# Use latest cpython release from conda-forge +python-freethreading = "*" + +# Use cpython git tip +# python.git = "https://github.com/python/cpython" +# python.subdirectory = "Tools/pixi-packages/freethreading" +# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build diff --git a/pixi-packages/tsan-freethreading/LICENSE.txt b/pixi-packages/tsan-freethreading/LICENSE.txt new file mode 100644 index 000000000000..f37a12cc4ccc --- /dev/null +++ b/pixi-packages/tsan-freethreading/LICENSE.txt @@ -0,0 +1,30 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/tsan-freethreading/pixi.toml b/pixi-packages/tsan-freethreading/pixi.toml new file mode 100644 index 000000000000..8dab473f605d --- /dev/null +++ b/pixi-packages/tsan-freethreading/pixi.toml @@ -0,0 +1,32 @@ +[workspace] +channels = ["https://prefix.dev/conda-forge"] +platforms = ["linux-64", "linux-aarch64", "osx-arm64"] +preview = ["pixi-build"] + +[package] +# Work-around to https://github.com/prefix-dev/pixi/issues/5557 +license-file = "LICENSE.txt" + +[package.build] +source.path = "../.." + +[package.build.backend] +name = "pixi-build-python" +version = "*" + +[package.build.config] +extra-input-globs = ["**/*.c.src"] +compilers = ["c", "cxx"] +env.TSAN_OPTIONS = "halt_on_error=0:allocator_may_return_null=1" +extra-args = ["-Csetup-args=-Db_sanitize=thread", "-Csetup-args=-Dbuildtype=debug"] +# TODO: skip slow bytecode compilation step, see https://github.com/prefix-dev/pixi/pull/5737 +# skip-pyc-compilation = ["**/*.py"] + +[package.host-dependencies] +python.git = "https://github.com/python/cpython" +python.subdirectory = "Tools/pixi-packages/tsan-freethreading" +python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 + +meson-python = "*" +cython = "*" +uv = "*" # used to invoke the wheel build From f105cf2d7c20c9829b431c2ae0cdb1f07efaccf2 Mon Sep 17 00:00:00 2001 From: Joshua Swanson <22283299+joshuaswanson@users.noreply.github.com> Date: Wed, 25 Mar 2026 13:39:53 +0100 Subject: [PATCH 1531/1718] BUG: Raise BufferError instead of RuntimeError in from_dlpack (#31000) Co-authored-by: joshuaswanson --- doc/release/upcoming_changes/30937.compatibility.rst | 8 ++++++++ numpy/_core/src/multiarray/dlpack.c | 8 ++++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 doc/release/upcoming_changes/30937.compatibility.rst diff --git a/doc/release/upcoming_changes/30937.compatibility.rst b/doc/release/upcoming_changes/30937.compatibility.rst new file mode 100644 index 000000000000..7d624132f0c2 --- /dev/null +++ b/doc/release/upcoming_changes/30937.compatibility.rst @@ -0,0 +1,8 @@ +``from_dlpack`` raises ``BufferError`` instead of ``RuntimeError`` +------------------------------------------------------------------ + +``np.from_dlpack`` now raises ``BufferError`` instead of ``RuntimeError`` +when the incoming DLPack tensor has an unsupported device, dtype, or +exceeds the maximum number of dimensions. This aligns with the DLPack +and Array API specifications, which recommend ``BufferError`` for data +that cannot be imported. diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 1c901ff6bc06..fc870b90ddfb 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -577,7 +577,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), const int ndim = dl_tensor.ndim; if (ndim > NPY_MAXDIMS) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "maxdims of DLPack tensor is higher than the supported " "maxdims."); Py_DECREF(capsule); @@ -589,14 +589,14 @@ from_dlpack(PyObject *NPY_UNUSED(self), device_type != kDLCUDAHost && device_type != kDLROCMHost && device_type != kDLCUDAManaged) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported device in DLTensor."); Py_DECREF(capsule); return NULL; } if (dl_tensor.dtype.lanes != 1) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported lanes in DLTensor dtype."); Py_DECREF(capsule); return NULL; @@ -647,7 +647,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), } if (typenum == -1) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_BufferError, "Unsupported dtype in DLTensor."); Py_DECREF(capsule); return NULL; From f765c6397dcae9abaa7471751ea13792cb4d21be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:55:22 +0000 Subject: [PATCH 1532/1718] MAINT: Bump actions/cache from 5.0.3 to 5.0.4 Bumps [actions/cache](https://github.com/actions/cache) from 5.0.3 to 5.0.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/cdf6c1fa76f9f475f3d7449005a359c84ca0f306...668228422ae6a00e4ad889ee87cd7109ec5666a7) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index e954f346ef76..bba244cd12e3 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -101,7 +101,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -211,7 +211,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 16207cd50b2d..9f59bb9f7002 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -50,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -74,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 + uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From a575a3bb2717c4feb189d7aa5a398bebbb2474fb Mon Sep 17 00:00:00 2001 From: Maarten Baert Date: Thu, 26 Mar 2026 10:08:31 +0100 Subject: [PATCH 1533/1718] MAINT,BUG: refactor ``PyArray_DescrFromScalar`` for parametric support (#31067) Co-authored-by: Maarten Baert --- doc/release/upcoming_changes/31067.c_api.rst | 8 ++ numpy/_core/src/multiarray/dtypemeta.c | 13 +--- numpy/_core/src/multiarray/scalarapi.c | 82 +++++++------------- 3 files changed, 38 insertions(+), 65 deletions(-) create mode 100644 doc/release/upcoming_changes/31067.c_api.rst diff --git a/doc/release/upcoming_changes/31067.c_api.rst b/doc/release/upcoming_changes/31067.c_api.rst new file mode 100644 index 000000000000..d9ab80c1470a --- /dev/null +++ b/doc/release/upcoming_changes/31067.c_api.rst @@ -0,0 +1,8 @@ +``PyArray_DescrFromScalar`` now preserves parametric dtype information +---------------------------------------------------------------------- +``PyArray_DescrFromScalar`` now correctly returns the full dtype descriptor for +scalars of user-defined parametric data types, including any dtype parameters. +Previously, parameters were silently discarded, which could cause incorrect +results in operations like ``astype`` on scalar objects. Internally, the +function now delegates to ``discover_descr_from_pyobject``, which handles +parametric dtypes correctly. diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index c8fb6a1c8490..d1f5a3f93cab 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -573,15 +573,10 @@ discover_datetime_and_timedelta_from_pyobject( PyArray_DTypeMeta *cls, PyObject *obj) { if (PyArray_IsScalar(obj, Datetime) || PyArray_IsScalar(obj, Timedelta)) { - PyArray_DatetimeMetaData *meta; - PyArray_Descr *descr = PyArray_DescrFromScalar(obj); - meta = get_datetime_metadata_from_dtype(descr); - if (meta == NULL) { - return NULL; - } - PyArray_Descr *new_descr = create_datetime_dtype(cls->type_num, meta); - Py_DECREF(descr); - return new_descr; + /* Extract metadata directly from the scalar object. */ + PyArray_DatetimeMetaData *meta = + &((PyDatetimeScalarObject *)obj)->obmeta; + return create_datetime_dtype(cls->type_num, meta); } else { return find_object_datetime_type(obj, cls->type_num); diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index a602e312727b..d2a08031cbd9 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -402,67 +402,37 @@ PyArray_DescrFromTypeObject(PyObject *type) NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar(PyObject *sc) { - int type_num; - PyArray_Descr *descr; - - if (PyArray_IsScalar(sc, Void)) { - descr = (PyArray_Descr *)((PyVoidScalarObject *)sc)->descr; - Py_INCREF(descr); - return descr; - } - - if (PyArray_IsScalar(sc, Datetime) || PyArray_IsScalar(sc, Timedelta)) { - PyArray_DatetimeMetaData *dt_data; - - if (PyArray_IsScalar(sc, Datetime)) { - descr = PyArray_DescrNewFromType(NPY_DATETIME); - } - else { - /* Timedelta */ - descr = PyArray_DescrNewFromType(NPY_TIMEDELTA); - } - if (descr == NULL) { - return NULL; - } - dt_data = &(((PyArray_DatetimeDTypeMetaData *)((_PyArray_LegacyDescr *)descr)->c_metadata)->meta); - memcpy(dt_data, &((PyDatetimeScalarObject *)sc)->obmeta, - sizeof(PyArray_DatetimeMetaData)); - - return descr; + /* + * Look up the DType directly from the scalar's type. This avoids calling + * the NPY_DT_default_descr slot (via PyArray_GetDefaultDescr), which for + * parametric dtypes may raise an error or return an incorrect stub. + * Once we have the DType class, discover_descr_from_pyobject extracts the + * correct instance-specific descriptor (handling void, datetime, string, + * and new-style user-defined parametric dtypes correctly). + */ + PyArray_DTypeMeta *DType = + (PyArray_DTypeMeta *)PyArray_DiscoverDTypeFromScalarType(Py_TYPE(sc)); + if (DType != NULL) { + PyArray_Descr *result = NPY_DT_CALL_discover_descr_from_pyobject(DType, sc); + Py_DECREF(DType); + return result; } - descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); + /* + * Fallback for scalar subclasses that are not directly in the scalar-type + * registry (e.g. a Python subclass of np.float64). These are always + * legacy non-parametric dtypes, so PyArray_DescrFromTypeObject is safe: + * it walks the MRO to find the registered base type and calls + * PyArray_GetDefaultDescr, which works correctly for non-parametric dtypes. + */ + PyArray_Descr *descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); if (descr == NULL) { return NULL; } - if (PyDataType_ISLEGACY(descr) && PyDataType_ISUNSIZED(descr)) { - PyArray_DESCR_REPLACE(descr); - if (descr == NULL) { - return NULL; - } - type_num = descr->type_num; - if (type_num == NPY_STRING) { - descr->elsize = PyBytes_GET_SIZE(sc); - } - else if (type_num == NPY_UNICODE) { - descr->elsize = PyUnicode_GET_LENGTH(sc) * 4; - } - else { - _PyArray_LegacyDescr *ldescr = (_PyArray_LegacyDescr *)descr; - PyArray_Descr *dtype; - dtype = (PyArray_Descr *)PyObject_GetAttrString(sc, "dtype"); - if (dtype != NULL) { - descr->elsize = dtype->elsize; - ldescr->fields = PyDataType_FIELDS(dtype); - Py_XINCREF(ldescr->fields); - ldescr->names = PyDataType_NAMES(dtype); - Py_XINCREF(ldescr->names); - Py_DECREF(dtype); - } - PyErr_Clear(); - } - } - return descr; + DType = NPY_DTYPE(descr); + PyArray_Descr *result = NPY_DT_CALL_discover_descr_from_pyobject(DType, sc); + Py_DECREF(descr); + return result; } /*NUMPY_API From 0419105da9cd0a15a4e02bc22019c2b65272c68a Mon Sep 17 00:00:00 2001 From: Neofelis <118119547+neofelis2X@users.noreply.github.com> Date: Thu, 26 Mar 2026 10:14:20 +0100 Subject: [PATCH 1534/1718] DOC: Small darkmode theme fixes (#31028) --- .../_static/favicon/apple-touch-icon.png | Bin 23752 -> 3401 bytes doc/source/_static/index-images/api.svg | 32 +----------------- .../_static/index-images/contributor.svg | 2 +- .../_static/index-images/getting_started.svg | 32 +----------------- .../_static/index-images/user_guide.svg | 2 +- doc/source/_static/numpy.css | 8 +++-- 6 files changed, 10 insertions(+), 66 deletions(-) diff --git a/doc/source/_static/favicon/apple-touch-icon.png b/doc/source/_static/favicon/apple-touch-icon.png index e6cd574260aab3259b7f050552cf1d7789ae86f7..4af8e0e96e311f9242f0f699fc003208eeed7f58 100644 GIT binary patch literal 3401 zcmX9>dpr~B|6gvoUpCDpVRK)`!77ZfO&fCyrHCT3PVN>`JMJ+$Y#ptL7?yN#7r7i_ zPOFnkhzUorm|MBb+`@0Zzdt_D^ZIsi2dHz+H?60HlBia%f=Uk6!?Q^o|3@&NHrHsj%nfxHHpGSj+zF@N*iC)^OiasC1-3xvv7?gzf0|wcK2Do zLY#eIk?awqB#db5B0pM=?v~AzJeFvN;m^6%q!gq2G!t2;kHPqco?NpG&05#=OcEHz z69Hx{4$=c$QE8`>q@g>+XKXz>O2h8|p=Uk0Uh?_}&|MgecS!Vq(EVgdXX^iu9i3^F zwt+?2X<2PJ`k3$jp|~=2D#;QIaj@yN5}7sUQgYhvQHXZ?1c$t8nIS>d|l~>TJl)(NtrFOvUD*sX>*1qtGniW+U0|8M`F4 zlCeolwmL>lcz}KK^Jpp=j^cV}x#qBP=l4#PU8I_bDev@k!J4%>cDT-u^oez^CX8Q@dM0FW%?Va-OC?E*W-f(h;yNjtw{#Vn0SAT*_=P|X zbjhsBIj)dB-$Y#Tc3fpn$GI80Q@8xqtuqSELIRW!vy%Hp6>8mAjPl}jOmQ??` zn`ZbLlEmtgXqtSq1x}c1JI_);I)rj?^IZ=2nLluDdK(|k_6Dx4eGCY`CHMa39Yi_% zm}#%}2rOn;X}%QI_;^o}%O5ir#v8_9RjDIR$U|OPZ0PyB|ADrVl_S+h33&lN14K1% zv!D9G6#;rj-b}1(WKYa0(nq@X?}+{2kHv*oBz3A`rtD)sK#w2vJo#s=M|gc5@>K^mf{z39K)doqH-<_Wb|}m z_zE>nCm))fProl^9Y8jWF%%>{@&^R3I9iUY2{w$r2HFk+nAf!AjgYewjdr8;QHy|u zb!?i;%D0{q%WV_9$uZ}4mx%WF^(}i(C)RBu|7`clOOH0X3R&v=cIuwK;Mxd_-Hp9O z;#dxo+_mfbPJKN&Tsf_Hw?2-!ba(i!4tlrLHT9Y~7PNgiDap7TPK0)v?ZZg9{dXCKuEOb7DpkoK|kQDr+QuZZcJT?CFu!$fEXZU5+rmXHe6W zGn`O%@5nt1+?c!rD%*kwd1!IoM)JbdC+H<;)?>8BY97zPI*c{m*s@2CXZA)CxUc%& z25F6`#dDrd@LpUxrJ>q1KCRkh<#w&BAb6Z@%pd6?b)MiJ=?uL4OG4(y$mh`BLyGxq z`;f9S-B^_HILB!Y?dK3e<~VYo6iq}^E;vQ+^?PK#k|zg5f%49^IMlgVrtRLmDvEIM zr0Qwfb0bxpwey%Ru+{2c=dIjR#WoF17;+BUF;P^x7=9+U6ZVevXka7n-HH*qkuerC zV&fKuOgteJ!$)a35F-8!+IVv~5|+d~PZfq<|8QuTKEXTn=CH{F6%Pe;b47RDOv`-A z#Q~t|rT9nVDdy=CD+b^J$tfokj)#X6v}i~${F>xPx-U{8+0FLS;z9_UTXqXCa~MGW!T z8nEw-h_Yuw03GqW64+3@BSF6&U~3`Or6N%91pOv~BGGkkDqY|+s1iVb^fwXxKlDxL zb{@^T<1?PcX45*l2x(Eko5S>vCBjAAJp>Es-AuFu3P$E6WNIY=G$-s` z48H;*m|>|WC@t`n<-SRoui9|>bKL;Nt>~5o8$n_|0f9#KM%1>+%acNe zGg5#LlM?H1<_%w{?Sz8h46REZhKc7e_z;NY*3h$E#fm&BiL_e zIF^~~clhsXaH2N5#N`3&G!Upd#ULte=Iace^@r{Rx)S0X=8Tqu8UP)i&PGCaZ~}fC zw8`G-(J3ds69SH6F(_f_A`(~Vatv7%AVzRko6Z&Z@czLvJYY6W6-X-N10*hcZHk&m z$b52d<_iJPB~^p8#QIb*=Q$6PGS|SB49A0A!jp`BJPsw=fwr8rbrBuMeniVb40Z_> z0)sD82hp*tpbpxsWq<+7K<(A)>`PN?TcQ;bGGRs}&~p5A{B@<}d=nFGTwwUIEzozX zA}@vGyg$a+F@r}t9d}i>Y<@$>Q?p!W6lzd%Ox#?;;Z{GKG;B2lBV}F21Nqy4iJ)a~ z$R=n+w&R~xhb3rE1~~qT(;O^8d}rYwX6v3iMqS~}he#YV`O9-^)HeKtjy8gM{kEG9 z9v`T&A|aDMP+mQR;Tz}(II?|?_JnoXUK>Gh+#x{QT})8u`Wb{h9jZ}YX7CPoFWGyA zJ2ZvJbR=X9^KA|DS%U&H`yVoRMZTlnt%DdU9LQ&_#TnE0@daT!kY_N)32Ijag1#{} zBhh4grKkzlbN#*`{b{k~0(PvmPM&OJDegcv07ACfcUv_kr znUeF|a|^|-KN2Ra+BXs&waHqy7Ua=y>}lPaNjAwJ)ZbF>Te2#Iu0Y-n(+}4eM?^WR zViIr1vTpmIdunDuB_)Twls*pKFt5q!y$LOjo{P$Zui|nGJM9*cK%;g0-(5Pqiq}E)~22s_=>%?S9ge_U6sFmX}-?nXUfz1z!I|6eO~><@+$aof6w*YtqbGN z)5wq&2t!?i^q0U5xog!;VQ#vgWnO^di|1z$qE{RYS8>!?`XpNF_d5T`&>o{Gmh`$V zf-shnd%;wv*7dvczj(bDDu&bxH+lya?^J(ht z-i=Hp)H(`nE!yy5#}LI$Y8e-Ho2pYjh=nn6egzC}{52e;{w@Tpg=i%j%j}MRzh5Kn zgMETuTIjEelzJI)j84n6XZ89Kzz_PDYv&{1oBs91{UUV7(1ebyuxSvyP)0PW_=Ole zgS=SRlzKX_J@ju#UH3(O!v}h`eNfOg1@bC<%z0nW=#|$ktfI)jDBx)D3MxnXyT8)X z)AsUlVP{%erd2K1;N1;r{B4<_B|~rU$%Z&7XRep>^LNJS5C6e(T+oJ6u7MiPTrFwi zKV?>9l;QcTsuGD^*)IGvsb#MH+dzC_q&e7`Q~Io5&$K}F&Bvjj&n3{JR2K`~sQ#c6 z_GXS*E^uAC1WBw5!3n$2l5HYWLx2{gNm{3f56Wj{T1gc~iD-5pm?a)k3^Bojf^+p{ dW{KMLe$5$M&=}~qHo@Nr;9&29sYG2#{y)3uT-g8s literal 23752 zcmV)bK&iipP)%bMD)P1Q`* zX0~eoF#P9cX1u#ec@n8AV^1D4v!FBC*pU^>mI=p_Xvu;e6bT9>2m&OC_q);P$G!LW zZJpboL{S9M=msCM*i|Yz!R_vQfA{Ni&+nY?e4pS}yaWJm#TVV)@3`J&{>Fd!$Cj$9 z>g^8U`M~&(2fsgwo*sL9&-7RK+w~sYeQnhW^!7TBq9Yd15JjU9{ zz_VhfK$#3^vRUh9v$chMO@F%3*YkL=Rj2Tg{Tu;=z+FIZAp#!z^A~GsQ;Ajt&Nj!j zd+0bFV$lM_MhIP@%n>qY1CE6@92Jk_D}H?X#os-=u!W~l#z$TKZ253MPHzjKKio@p z6<&IdyPb_4PKPt*p{m}ks*_d(vJVUo8sI^Q+z+q`kZOPqMA9IZNS*=kmW2$8gy)z2 z?3n8{_{huz*n^<2yE}wvy!jP<-6R0$$KDn|mwlUV(n6GTK3zShVmtTen&8+**Fn1& zuZ@-+0CotZgTh?`bcmQLf^mQblsp7kfa4%9L7Yo~y=+5XhH)oCo15UO%%`h^*%MEG zGF#4hebnD`3jlqSQ26f0-lWgJ78#`A*fDctVV`NuPn(r(?X}g`?l+J|n$LCh*JQJ(tMR1Xao9w}TLk`ILUtNqCq(uE(hS<9h$Kq6 z9ukEpVrv=#4vWY+T3&)S1nqpb5H9$MaOuRt+i5)YlwZo{ew2^73+N5+K4s5$ z`r`Mx)w>Zhdkjpc5#9&LZh*~zGytrDND4$TA_MM*AFGv2G9)x&!xzX3i4}kg5SfF; zWg;A~@CJR~AIa9_Mqc`@$5z&|#9!U~x|D^x!7+CMeVxVW{?744JQc4Bv$1MArrI;M z(YR)>q3j27KpY$*q4{zZfTRe_2Qrj0Hz*{tB3LD5n&24`8MGmWY0PkFF>ScIlB2hh zPGhzI>qoNJ-#k{5hE?+33emU)Z@&xZYZviouk-oe>1;EO=`dlq*EptAi2V?>kZct} z14QbDthOSq5RM4Vy8CO5rJGn~LZdYTk~u&Y2rL+Ih9c9j90f5H7&BaO?4_U1{&F1u z;wk^5(6lNQ>zi2WQa|!TcHCMsq<~ng$@St0a;ccac83-R9O1nL9uVN3sPraC0Z~0! zDDS~kYx)&k^;MwfT2EDU2Pq^d7J>|*>4;n)c}idh0^(d4m|@@Z)9}1pdO9rBOx$hK zRCXgOx{G(ln?Lo}KX0g#WSis4c7fR?bKw{TRFV?@3O@`;| ze2`W}An+klfVGPRri_?LT22z2u;3*LaS4`}(=jvo^MUWreUvLAA2mSB(O#OiTg2*928bMnNF>$~;@0Hyk6-VPu{*E4akr zipaxa`G6Q{B~TBtW-X=gsHHaxZH9cHdT}$(Ytx@e*eJg)=Gz583PP+xut0JiVouvI ze3LeOC+`PCKfUn$MCED7HK5!qMmKx>rWd97hK~xM1--rIOMYjEcz6)NLq_CdBo0H+ z2rwy-Sea{hN4!m|BLH$BwU&JzV$OuLCrxNxw+_!uFW5}~Q;%pn{a3n%eQN-Hi+-K! zBCF@Pd!&6O*4(snuM_Z5G59qR+)v_O5k$lxfjD;rGRr)PX!8h!+N1%gtiEXwC#~3j zgm2zN!JqtL;f=Xp^={*>0d$!+Y0KW$bKHHjEACbM$qvuuVIl5=Bm0Ed1F(^l8WCz) zt_l#%oht)(<;9JXF1=XdV+cqHP;=_E0BH-EGX^s@#96}j7ldaR1k*2^&CcOhoO}Jz z0JNr7y}jmh*jMdYG$W7}#G4aPNRfs{r2h5p|1@S`wU#XcizxN)E7d z1croi-qMabt~(rrg-I)BY9Sjej3>j@u_qtZX3eiu%5m+(|DyuvYal{-?9X4Sj=QEd znTQ>vm50PNhXgnPVz&rVmW+$Q1I3H}y#rorX@#c2HmXbOhLBu>$UMj~5^sX?wq-Ch zyqX^x{QXbe=IX|+U$i?P&^IanRz7oU3u0CEg{HvRZ81+;X-&J9?L=g!fV(N&E<~${ zG~fDL^TBUaG804s5Ubi$0pzlXoCi4^hV~*Hne;8EY5n;Z4!ZMrTsnwFJ%1B_v&`e9qAeBp^BA$p|zq0^oogVSB|=+TZ?)sm@A#Mx-Q_oAf4180$(^pD*oBv{64}98mb@kmi>xp<(lT+oy zO}hhUr!mfU(smdqabF`h+ab`AswRO}M{eFr(+h%YRSV3C$P~#5iZ)WrI6{oU+Ofdc zu|n1z>-*ZnIw(-l>xsYkQ3_r|BdyoudbZJsQy*GCXKZZFw{|`TXZG214=hy_Kq@-J zjo*A{1GZ`)SuWC1E_-s<03Ldgn` z0(6XtcEI;}Cbo);0jCLbmOz;9I_U&Vxna5nUaZzJ(h8dWM(6W{hoEINXwDi@B;7+|aX>`&3%M6SE5RCp)Ijuy z$Eq83AXy|Btx}MYsAa9}IgSbBJPgiRgeO-WXK>p0Rx8^dP#*o(iG=Ce2fP)a;3!qKcnn2>(wI-Fq%2k0Z=~EGc(_-a(NPjpqm~gCJOnXjxb#`GD zXjtuk>h@GvZd-&t{1o_~T2WYEP z9y*UFRNV{R94)|d62@Gl#Yiajd?Cvr%k^4UBTb*!G2?KDu$od=+3(N~K4)7w0^CIS~WjM`Hrfkg21_ft! z!j*-Af$s$y?+w;ZOuLUCcfHBFR(hr_1{^TNL6UloEdsXcTqJ=;BS^By`~JXMM>)Md z8H)_RWdQR4W+^fSiz&hmA;kHD!Dx^*6P0aJ%TKaz8K93m{j>*c_mb81@wj*~HGg^>pF|rGrqOMmS)m8xty+ZFudds^gLNX1*-Jeb4vhy3MO6#RC;sY{nwrq9kEPI#P<9(}I$*eqmODjc2Y_ut zG*RFQ#EV35u6~uUe_aE#HhK!hMh~V1GNoljSUUxn%OFO5h8F{ypUiqPeJjLsZWf>g zJ;$A{t`<`_w(Qh2vj$c1y{p*_9W_p@$`p6E$YMjgG!py$)3voY&*Y}@5I#E@fAmB<{2zbhkOKX@^ybEOyXHG`9z!nLykW zbIe;pq$jZq!8C~}LulSQW(_Wbb|MHwCqE|q0%pe2)!Csx=v3l-8Q_XmTfX?ib20zY zQo_`r-352TahfAFB{41-jODE}>>6)&Ff+Y6^0mjcicr?;(Qmz+jBksjJUR`Y%XWYo zeYeAr4xPeTdphWhWW4}2H-(q77SY#0t`S>PszpLBfw-u0XCh|I26iH#Od11{_cy=keDd&bH&z9Hn@8E>7p|d?P(ZoAgRhwy2i_qBm;4wC{h^?h~WHOAsBsS=o2gW zhQ6ol;PJjYUNMl4JpF^Js;auWxR+>R71LozBGLb6;vvgMGa2AVwm82rqJFm@dSWn`6drb*$}x<7hguTeNZ^W)HRDqgA4H8a*YX$eUlu_1+PUi$SRhSpt)m+AB~( z6-KRuBca7aXk#<0-pV4z#4mAjpUf;&vO!;a+Lx9pYU#~QJ(!HqO;yDu6& z2cEpY%v$zlVT_2NRTFdT1x}+On+?P5j&biNcos{G8 zYph+~sFtl|RY-WsT60!JF4-VIRfxxyWJSB8PPTs{UD(W&&88xB_jiuhCsTFXl>~2$ z?1e?A7_A+%ji5f}8ltwT=53&REA!(`!(^Q|tVskYF@7mpa7K|tn>6PI94-X@g#yjl zzW?r#*{%A{8-7mDarX-c8}4yEd4Sg5V}RX=a%2_1K@)T)lGLbNlH%ztIp>YPBqUT$ zSY?H=D8jj@w5og&*fFsn(9->#3-Nq7QPofz+acDu zS4CMwa0s;b0j348np(uAnAyLtVo^zy*mzyG{)n})rl@>IozsJ4K+IF{mS`CY@^)qw zZ~XN49=g2s-~Pc*)Ea(kog6X3Pr;av3FKZB$HfvT){(UYcgqEED!KM|<<5#zWZG}C z%Eg4-S9Z}KHt=qAQ>cU-a)Bb_u;_>7>!D)@Gb^iuKmQ;9$;?LYFYE8uZWeob z+|S+nl>@G0?suRaK0H|LLP}IM(k4_{-*X4OX$|0-hA4|tpN_h=)1(Yq@(mO6w0O?s z#6mE=(LQEFK&!NDRZZRfBF=-f_F+Tb4`3ffQgVf7!N@S=tue8QqN=SZ%)u z7^uz#MHWCzLhLB4ydH#nD>N8g4*bin{`bGGIpEEI_4Js>c7AnRbt2axZt{?0@SuSl zBJBVE>eP|1qB;6x6p>Ys=RkQW0A6)yFU*_#$VT19 zjRAf9xEq^k-02|PWgMpymInZ;1VB{8r=U&<75vum{|!Iy_AJ@P-)ae87g2x=6~wfc zwdo=jY3L6QSXbr5wpoWe zl_L(w4jAsB6`i7K6Ns`BYs%$jZ0d|EI{1UZtC)90Af8iA5kMxR0S>`=SbK)Xp7X=P z^yR_v*@2!L6U#~gVAS=({9L?{sA`UfK}Tf2AUI?Qjn+}gND1z1f-U-)?^?#K5Axd= zmEX(ZR8LDMouV8rE0BoyLE%XN{e%n!Hkd#xHf;kxpBXC@;txOS_dQ-Yp=nc?P_hw@ zxgGoN@g(8K8D>)A#M%Yn4u|HDIJgf)r>dnw2YLxF(yXTFwnlp$cT|r3E-gW~DVjek zEulDtG#pVAXitYWJOi4Mf}fvrCA~P8Z7TFX^@#d8R+d9wz3|#ZX{_B*@4~dXCYp-Y z)I%qvJ4jJ6@Fo(C0;T{-YB%tQ5s~lA;D#ejEUtCo$#GPCOeT0 zWZ|Vs=xJT1Y1?+Tw%PI<;wtR+6YeLrd+4 z@7a2Q-#F4=TL&!)(?*h68IqYuL>w5fivQ@@B4ihe^$UZ%APuruHIfROmB~OBaOdaKnU!=Qn;Dov z4k`z{iMDttDaZQf-b2&9`^{Rf1}&~NI}JizNFAF}|B>w~vjs{G!D=IjF`7ELwHk8o z%4N|6BFvGxixwp*&48G-#p#PP7U58k#l>LPk@3FArF^SmF3sbnT!ZQYpG zid!5?$_ji3EQFLL8oLk#etKd-Z(R2(dZOQbX&tT)-VxdJoeS}UiJ2BSPOGPS*ABV} z*=&%GCww#jj!}i z9LZHO&Q%$pQEbQ7bnZ&jIyMkr-A+emLV- z()QMikO38$D1IpvAE)CB&#l2{8~bIG=Yjqb=yp9vcA5W_6Xkvc)GORk)TVMeo$9JJsdtqccln2s%H z)5~nlEnEun1K-%ViLeeD1wB1-wCfu(;lEv5C51L4W{2@`pAcPRqaoN@v?yo}89yw{K!7`QLkED!vHDKQ1T z$4vJ?i^!M%@RuLIvM?6b<`^vG=Q#b+e<)KIjg-TGd9td-HPvDYjV?_ayrA8+Ub}_e zVMK{E*tHfZ>Xf6a1ploF6>H_6P+g=V5J${8Ij@7K6uCspDHC!kXM!nsn0w*mb4%#y zDL=h9PPgNY6W%>-UVF@Sb@FBxM0v>JPKYU%)3ik`QAw>fiXagQKAN7Mp~^0ZObVcr z-eZKlC1<7)};){#30E=Y}qg=eK4oi_+C|+S}c@ zk}yqGj(qtK-}qgM)jfl2tcZ>&*+SvMN-{oO6|gXzuL_3V9t+X;xH6_n%<+o9m@dFc zF6?vb6JatDcUwb8w!1WkDB)qybV^k2gQ|eWsU0t>PBR?cyp*zee78-lRm`I1n?~gv zMY2Mwj)aOA_N#bdE*1{?i@}nuF+pKpEa?9~hwY}HLW)SOT72zcw{2o7mPoo)pp%Mw zv4aNo9zxlM(5I2Q$|fTz0Hd8e7JXXYTfk66n>UgtBqvleAQ=?PGjMn|3~>gYJ7z<7 zHMWq=ou5bq11M!s_Bvsq$C~a>|IAbur`UB;!OR?ddT` z+Q00?8VasSH@ej|p=Z+z3Guvm%%Lk^`oq`%0H8j4T1~OklNKXm3}rktcEZ|lsvw*l zJJql-^xW0WgiXZMVs)g()YjF-LTooQJ0Z41BC!@K8GT#yzb%#AR4o#9ze<^6Q9g4R zO-MwWJS7r^lN2+dk2w`*kxeuR8P#y)QSyRJD7QZI+DY2C2?j zqJlUPsb2RFEoAW#ej`GCB)(YGc2<3n$s&(ICTMWc(2i4_acemh1o`Rt6?eLSBAuyd zL=3=@AG}soGiz(zlxgv-(aCg;(ke>1-5TBZppA%1uu6K;dvV>?`dWIIvB0d>?*ZzX zPpSiMXn0xYB#nfZ^4V~FH5sm+7Sz2SwG`nyOS2VrX#+LA!C>Mq2BKVlqM- z$ba*P{r`vZmNmc9W{Hjt2)Rg+%Yc2G)(lt|=lpE`;;3y}tjjOi{soP0>rGm$1C-ug zr*pE}ZATPD*nZG*U#>5x9CP*UlOiAKSEynjtDFYn8RN z2-tHJ85A)Cp_Q|NZ!hI+xZ=bU>1m)a@Z^13cP{Y>bJaCm5ii7yNgmnfHmB`g$8irE z=z1B=llOs3E>6B9Mg{|G z2Zh1la#)xNy*;`3z>wdEXa;ffB9&_qPR}t}osB0=D(NC@vyLbw^`KaLpNQNOX>?GB zm!UEeQL}7qRib8y(CP+vhLH0@oVS8g00vM9Cj%$GxN5nQtu5qh`qewJ!Z}TR+8p_E zji4W1Q-jy$!ghl8pb@!WAiGJ<5Ox%BmgTGe`x`wWb)$xdmH0woQYczxnWa^)4jG1+ z3qUv~p-cv_^Q$&oFgBC!yF8OF>#+@>^N-KPcQ@zi9g}NuVfKh)I$@yqVos9@Wv_AMUXq=J*50)|4PgW)001BWNklqVebUn6 z2w-$3PYpLmwDALx*^p+=53-Br&t{iKP@%4!4r%QEty+@|n_}cnG0r~6HG62SQdE^M z(xemO(L7{P&NokTNoZy%oDgD!mX~OtUDXSj)xx58$!81p)*NuD|(HnYtOlRZodQFXU@no#Js@nH!Y}_;=3_HZ=5UuF~d5=J}o~=$* zNfFU`z#O9Uzz`(T+!*rVUvMR{P(WeHL187I$*=Z(ty`&|?~BMAozhiUjM_n;{g1CU z#!~TJ5WC;_=rRtpanw$zW;7c}q$H9iQ^R^~GjGYfo~BW{D+nUZ3w5_rwIXc|3onAv z)cO!D&t+2_!fbk_FEvxq4>!_G+5Q!;2IEzU6~CGiuY+N`A$AH(CmbF!lqQt~qhOIC zRTSrR0A6-PW^IVY9K)5oTgaRk$orciR@GU*#wJX)D`p$S?$et}+AcBXAQ;_4G9-*keLx=4KvKNjhPvxMH zIuWJXd+$oaQF+wVbVvG)V0uG9L(5D=SQJOpR&i9*RYue-gwwF*f{kQt{SoUsWB#e7 z>At=)8w9<*P8V8|?LnCG+?IN=K?7{qVqCYC+TjvptPL}cxU-?-%(*tdxRP;}F}k=? zyot(n-+Y>^KR8uC>e$(~gC^{7X!aBKporPdNG=Pl3S=6o6HBT>q&>>F7O?*a;-#VX`(Z zVWU_RhmpWVp-?Dbc`obBXD5cU1E&#|n|5Bke7ad}$#N&e>F~lRo!d__9gwJA-b}DT z)K9V~Ao;E|+@=AoV;fh#Sf9hi#Z_?2sz@aJIz-A_Dg&Sug`D+ALr0e9#mfexgZ{v= zfuIa=#HcyYjZd4P)vbxQ)-`yRo)nO=vCQghGM1}&v(ZbZb8^Vtvu!C6gG~lD-t36& zG(hLt5X0f(1~rl<(}-Lmi*RdEeyyNOG)YrzCl^H2Fo+`%oCWa?M9$bC7z&-(Viw_w zgIIQYV4yJYjiVb*daQfaMY#k~mxJh-`;M~0Hhr|OKl1czuAlT`?S)Jn4Y4?VHpRv5 z3?%nrJ1FZfBA*7+sW#Mvb}vGnX7IyRUr#w3CAD6Kv>CU58B=9`1Sgl+V(n+ z{vPdQ(qNBRM9q+5o)(p_RP$L)gRW!Bc^m1kMMHa?%L^qErxY!nP19)EOeQVEF)_|4 zgW&RN7+i+mHrto_Wkq@RR{l)8dV6C#(%YNes_Hhv-|33c>XvRmw?m>Tm*&YDnUx2$ zRyDdl*M3U?9r>thL9H0RBg4l?Py{Lo3$;$@2h2-0u!AAO%Z2&OOiQNQKX$Ba@$n*O zubi^mTK!TRJx3Py87;_bo7zcndt4V0;SquJDFbMWx|SM|qmp6N3@H}gN~?))ZTD9KnnT&I)jvl+z((KngOln9HnU8QDTA7W9ApNU#|O+omq;dO3_% zQa=4@lgxHF;q0ng2=g_`q_YQ*gGRJmYYqe0D}uVBzTsMvEdp5)$TyHu-hv2yohwvT zqO}HxD!z{jIRue&mS)gM;d~+B#PFHf>7nPobk(HCrk3i42j8HAA&39st*XW_Un6le zRXJ|Es?!%+)YOQuS0J5HREY9piQPNG)=DSv_hPAqqG1m3mVx`l!c3; zb2G_=QH$zLRExGGLa(z5R%=bg&X;!XqM5K!os?$CHv;kBTFCSCD}Qo}X7R3^WKWOz z?7@H9Vi>f0F83SYAu;HpN@amsf~%IOd7v61QApUT^4sRXQt{EEVw91_Z8V!FqHX9L z4Ohgj+NQ9+uR+9HXg01 zAZcB<({r3dfZR*%NP^0{Xc9{OO%X5L+eRZKfVFRmgE#5(bjAtK^*#CV z$*uol!I7tbSyf*X)OfB_4>*mER*9l0ta&o_!C?Q|X77qqKt)sQw+zs0)e)WG&4`GS zRlaY-@CVZ?GORW2{%3BoWx>&2=c~WDy^3z4!N%ngIObCVT+y`9-E-{2nqF)Ih|`w3 z_V&C$erb*WMwsKp%!EJt(!VZ~O?jg)@98nWvH#0$H7<4;*U{3O(p5E(v{2Y6kx!1K z)RloEj#=}4tYmwA%K&}#b(1nl@*l+VS%Ez@T?oc+dg{?j=dtulRkiV&I>f^VjKw3- zaBt-2jcrDxhEzWk6DO@c@@~xp$0pFYnM1YlLcywu8emW-t+c3~2xXWIFN8XklMl|? ztTX=;9f8_H50oo{_R0x?ad(@DbBzOHc(2eLAXvNR%5a@((PpmlgU6N$=t_P=QSn+i zvabRB1<6-)f%C>s|F4^&JaqN0g@UJ_al3|60Xa&Cr~D!H(t;i9Z{Smi$zxSu-pFJ_ZE zE#(Ex+XZMX)y+lLzK|CJhX1uNn|`bByE8se4jZnV`VoMqU-Ldyovn_+)Fw=P8;#d$ z9NUhN9WZ8x!0rajjy1laCMqLRo zDIQXc#!)Fw8(8XNqSGOoBcIonH?4_8B=ZuS4?Jf%JsoEHCyv}wqxmoX)h{|E9{+tq zehtMDyXaNQ)KSR`apk}daLTu$^}X4%AgtAHBA~{Nzkszrv~XTo&1Wz7ON{XpkT&;`8tD0S%JTb2Fn>AYR5E`AR(I$)%-KzmstBX~+-&M~0DB7^%5>+P4 zs?HuJNjo7lV^pUB%Q5=ySi(=wuFdIfp>FkhPr~>A{}s@V=lxF*`I=k>=yI-iv+uc0 z5jyHy7YqKd0}%5vEMK*dlOri_U`yzSn_Y?bj*lF>_Uwkc!?t^-W;IdkI&BPs4p^s6 zr}D1gBM0brpB9V;L&vl&PC-A(zPWM#V;d>J-or?T=f|3Iqn{5Ux_)1WU@Y1TZOpPIia^5f)2;p12E=3 zo$Quq7?w>UD#K8*X3v$Jc_Gq97PUjeX$l7!VANW3aW!0?uU?2{o{PKLt%}IqJ;z;h zAhsjnCU>uS8LI4p*f4nOqO|+F;p~R5I~dUTz{JW%7qgkauXPUtDxcco3?Se4 zZTM`##4nGXy0~z&h)bI}$Lk&&O|9=~NyKfdG0t8X=a3QZ1I>;|QW&T+Kn-mO{O}XmvZlP>cNUZ{_P)`~fbEW} zPQGTpfSn?dZL^x#D?v!zz{X+Z`Ne|s!xw+|us$)iBtmb#K)KO3N5+UZhd#RTSHJ%V zb#n!HN(^3Oz*n-q|MCm}&#zBy_H#-)zWccQXiMCSr}CW6wAg4s;MkM;qbhG+3&5W5 z98c^?)NhM4=alh)7PoTOb^#<)~^> zGq7(4^u{mdGLwUqNca{PMR2s&?76SS+tbzJI&LP;AWX!N*yUQ4K{hIib69};^$D}K zA6NxA4#*jZ{o%9|eE-$o`GgKgmjURT0pAav#vKG`F0Dzbo+HeUvzgUrfBNmmMmJi# z&Huf}eDYs>zPTzHZ__-W5Mn7X>18Y;*Z1AWwc%49sE_tKM~?2RuFDpx<0-F|p;4ip zNU%klGa_m_8u9YHkHnYQ}u2vJ-Jd?*^DOg z2i8Od(z{N(twF9f;j#(vT3pNB5bT1(?Q1d-I$qYQ%2g^f6g5ML^wt4-^LMH^!?C&d z-F`r?oivMn&?>DuZh;>F_OCOqE}!e$l;l_m7vFu{ed0(>rxEv%&^pF&%2>{XCb2j+ zc{%MRn+iqYoN_vrs(C{kJ$B3tFYGg|-fF$Xs~dj_K+J;S48zfM9C8E#1_gYhN zx-p>FIm6on=wf8tI6~{F_$I@CId`HsG89d?D0p8c@VvG5WH#@f?Q1$bUik*7cvjv2 z!-Yh$ZQ*gp;;S@rPf-mN2~wxksb-p|Ey9!ez#qv3CjH9f#muH<&fZ-BN_S76GqM|Q zQz3}g)HHZ8*Q<%yuoiF}-vrCdxyv@;=QAi2;w_K*ecd{bS%yTJw&HufDV>cU_cS8c z#ga`iV-LYb+79dW5 zs+`H(WFbt<&Cbf=$=qPB(#@kzt?%w!N%*lA8JV=XiUlKkd!0R*U-PnDva*zRc>n>z}wb_17dl$P%wilQ@Mo`$HoffF#@k#GzHy#4!ZUKuFx!~r^oEs zy*qX&wZDd_?sU}8#z57a4n_)7MU6t$p{|PLd|{(GNu841h$!@71+?BfZc;gEi=eyC z#}rkWvQ!6A%Ij9}pTc~2e0CK}{m=Y;uF{tHIuTkQ#qWCwIB%g(3Jh9{a|&wEOy>Oj zB3-*=GO?9~;^^3h)?!<@U=h#cv7KJhtEo>Vkgap!)ma<1M&g!gGXZo`Tk~R6mERM> zyIPFp%DMci0(#3*jxs7N>)mH+TfQY zUSFEe4U}z-v37!`13Gf2R3!%OWU4<%0g+s+jxym4LkwElL56lA44t&^F0B@8*&kFn z-PGD#^PuSb<8!g5M#Pi;LRCEFRJ%p1h)#jrW5pa2;vm)fqNv8|38)%-@0H%l9owr)GS&Ni4fzy`!(W-%?)DBG;;SE+!0H{&g8`K>AI+m)gyWn=+2NywyF zt<$RckTaGK^J6n}v8j`n=JOSAVx!Goi+3>5+!AwqwZ+>(L2-fxw!P?bptE{V;X>83 z&=IjMn+mxL=&~WUChB#~`=dxLP_5|&6(AJIf63*7@Bd_C_;Q&|kLVl91a#ym9Lbz9 zGU@bf(a}kL_AW+(E$WFv83=R1`Ox>$nL;`{HL+Y6s$L07K3`IfJoAhT9P#Q~Qb`Fi zsTj>3bq5gR+(YsZg!|XL9+RT;jtb(dOf)yjjZ3{vcL81MN|kur6((M!k*6|N&1M08z0WEB*hJ-om25u`5p}K7|1O%y++N0mz@IdZGhWEe zPrdN%&uz$m(C_K~_VMaiOI3>jwn^yi5My?V;dT-3gjL-=yB%V50~=rSQNL*tzIOp# zB2Y^`?1}&+)o+BHCFJ`Sc#iqtOgf$)f9a3P6Gqu8Ko>pWqo}P_j)Jn%2xQi1pMzIa zU%kyJIO6u2hptrBIoKs55h1n4Vbl|_p@ zsN|gbyYw3|x0XJ-;T?WAk;S5geMv{P{2+LvV3Ke2{hRyClCG*8(AVG5Mk4SLxYf9b zs)cdMigrBBkDmLZ2Y*q@wSM+{zuf1A=93QPlabz@$OA%Fsrsfs8G5&%WxIBwA38v* zl(R&3Dbuign^5~c-kE?_rFsBops-{$ z=5o9jk&6<}Ma_5hYv(1xXc6H}1i|Zp<;ET>rQDdUK8B2}b!SDOOuV04i@#Z(%bd)) ziQHy8h&Kz+S34WLcm4cn2>jV|fAq2Mm9i$E?|t>57~HQKW1gU;PMfRGVUat1E5mb< zLwhF!TAzfvDPN7W5~Il;&OnO?#telvJX`RCvo76*TRO2)%1zc|)(k0gQ0FJhmqRh% zv%zYACKt}V_@^%{ZKiv8vjBbF&7rC2}g{8i?&gSvVQwsifUw1m7wPQC+ zX!4IYCBP3dxuu`>{rksN{IR6bsk?yQ=x12``|Ze~*ZjiOXG>`h$_tOI11T~J%L_E- zG%Zir&_Cz9!D6Oh(sPYX`Jq?G!u34Q?`S}yXkV6ra9Pb2X>p9J!Lw(b*@dA$`J#G! zZwTtUfWC79y>@@IT7FZuBZ)CeR016ZF%}w(BIk_dg88XrTSxY}FCEY{8ZRk!kHN9cD;Z(4YhNOV$z zmFo7XOv03U%3HEOusr3^UJk)o>zL)tSfOy@#8{!!L5nf~UD_{xtB)da0mKgj+82Z} zIGSC$IPmW`R`uB|pey;gzMDh;+eeO-k`R5AsvK*8)(ES*9~tHBsKl~`oiuPRTiQ`I z4$T|qd_KRl@Zw5xr5u-m4+Ws3XAG%|Lj43@4{UgXEBRkO|7VX)Z}h|YAfQFX&0DVJ zLvQpsrTqQwrm(0!dOd&YC?cy29Ari(-wAt`blw5&s1c0&;Ec{&P6C;e!}Iy|a^#Kf zLm7asB!+EtI_v+P;4&%7bQ=ck^L~*3QT~nmNME0Ne6OENZ#F`2c!oFPcHgaXTn*@= zCU{Y^MNyFy`{On5;XKSyf@mAx|LF3rJT^Z$50I=nbxX@ zCH@W!|7$F822Tx6WN)O2x8;DoQA&Cj(C-1d$hDXwF=K?oA!JB^QNp>*(4V4l7yQ61 zhOT7#T4r*1{Bb3HRs1R&(CY=Nn-#x{Y`mzGzY5Fm)ADa@fp2Bg&RpMjA68e?>+XI( z6o3}<1o7eR6n>`n+iwWhATR=6zPX(brQm``F*PdQPcel=x zmS=a}L(|##TqNI6T~%~1bLT^5k|D1F^i7u2t@!sB5e{;&=4A+f9$M3%7k}WVslUIl zp4j(?0?;4w)_3=JUfq_ejvWR0+Yw99k&Cocr`pSE&s@mcDL-bWOg31sb~RIpifZ@q z=pmLT`l4_YCf>GSaA)!vu_O#wv;8w%9#9C$wiR zRG!vTQBi24pZ4Gmadl$A$;} zfi8qNdepvORHr2KCqWP8NPE8yH+VHQsEUVD@42?a!Z{4XgJO*G)0b%B-&89@Xcb7Jq{Uw6ptWX;4~P%JOE%zNg*Nx>rLo$T zfn)b;U;llJw^e{HB_iHwWxG<&`N}d^5}~zKa^&y(ld)_v;rR2+)+X~9hu?o)-*IE97paI1`oqA2Lx)Gs(RKcb0+HBa~8sr24?oA0y>(zqnr{z%QXju zxK~7FxVGj+u%99SmZAK}_c1yI($#a! zYuLFxo~nw~OTb3YO*C5*Y7pEZz#c~8>^f_yE@hFXwYhb=9GV(H-m#DuK#Yj7pA^bJ zQMLFjx&ESu-yjjbNXmNnrf;NL9!;6jMWKkGhw$mVh)6`_lU@TA$CTVdXZ}C2Vb`k(5n9|YF7VUYs~Ag z_A?IgB?t}xqGs_`6$3iz9%4pB&eHM+l8-$*k;_f=KXXLCJIbX$6o6i@4!xNqxGYq? z_oiaInp2EAv2qH6pJZ0><5En6HvQE-J?7D_-;dX?T&PAe*+CIaCH5LIdj!~7w9R7! zMItjMK&k*0XTRNmws_H!f0_%Om*Q^tDI@ZE5jhNEF95Yet7xn_5=|E=P-So}zb6I$ z&3q<*VQG16<%*8ShXT;Ip$@J3+){L@)v8gkMICW0~CBy)EfgxW2?5UYtF!JiRi)J@l3+Nk%%n#G1kU&1F-9qL; zDi2wCGHUQS8Za0Q`;;0d0010!Nkl{ zxz)t6OJH_^>@ddcAlWWt8$_BZXjD01L2*#Hl-F$m^wctcR99o_69{(0@g6teSJ&K^ zR82l=wv=-6*B*0a@>+>YfV>W(pWrWYt9b3F-~L!+K(w`hzVTV!seq2$5+rg9z`Ol!t4y-FLA4U z3Zq3_OF33&m%rcl-R{DXulFaK>*RP~QrLXIfz4*4HA!ETFfN!S@vH@V zfA~iqd+jcu*Wg@~2c8mO0u}=n_HA*T0oq`EHNb?Qt=(q$3Z3IH(m1HEn`WT?Ld-Bk%Avy$}i;>Ui_awwODp*dhM1S zdHOZ4F5$MuQ)m?{ZAQXYTIqnuK8e&*xdX;*6SAiKyL|n3-UdM1KV`nI2xw3~`@g)} z=*HsPjBAfLg0G0kZ=&d%SNSkX59A<(r$I9u(##{sE{BduFBZ5O=BBg#6V(OuRL(#VVuo1rNddpZ;jwdjakstLPT5CmE6lx@Vs?Cg&S2NuHF>BAJE0~ z{LH_6c}L8v`h*dF%771oI1I=&&6bV6<^9*Z#>C5t?&t+?g`xeMLV?f42 zqYJnLB6RWeKKHNB*LnW@KF4*tRLma06F}6rFZ)-$W^19`YgO7GB;_A0!k>wldj;~C z{QvD;Yiw25nO$q|bD#G06NAC#{Rofn2!`-VTBjuBMO)2uOlK-pOk5l9`09AL-3xex;0Q*{sTR98x zWd}KIVHP;bYQim=F@O$83pEKs7m%wfU6zL3AScDiJ5Kl~aq_zO(w**)51_kyKR>zYfL*(8t<6-O<;mWJAJ#>=0%-Gj5~|R3+pcK?Xia{m(}>tlS>0txs1OAGIdpC45TzImFH$-*>-~!`$?m zDhqw9&(;5*Mt}~c!(HEerL;^+cNk(1C>trVia^~&*~7r@gmg$mTfYP zgpJ=iWQSKIk~MxjB~gDFDC=QtC4@&VUXH&SrZ{O9&{uu+R($nmvE`B4)z*{EBp;yg zVF2q2?L8FHLz7%qV3TIV!DT2x%Z`SVRG25^S0jv_$c@Rjmcv@}F)d&Zq5}e*^9AHm z)bERP=U~j~^l0|fS7QevwZ1{r9gwcWXtjSwLTnk){K7_kuNZ647)Wg z`Cxp0eJoiKkJmDSHGtf2z-5}%Lr&PUL^VVl`!4 z;aU4Ae76&Z_mNmqXj*)nk~dI=g`!wHAM!6*z1ikY*5!EUg`<@vHoM3g&UN6h@zW!t zE_!M7^2NT^^zgvR-(Jq*$NTb*B{%GUJ`&&d(Rc~_Dg1VZAdJORSqLEh)kv)J2EdGhoCvg00ve$RMYehkWM&eXHMuEB`-@0S)Z> z!iiKx*qFC`Frh-%8X}6{dg25Wiex_ zXk`r?Hql}gMOJ{EParCyh5?qKy;&Xdy$$HGwA6=xzw-jLZf?Vo$m0!|p6j));_%# zUo-65e zkvV=Q<(0=DqUEPS*-ElXed!77JjQPizZT)mK**8BaXEwy`&+!!#(|h$mP$njjH!2`alY|sPR6~M&|SRafDXCf#c?^pA*K)?I`=xwX2)}T zVihdeNY8(a;1&X!Y*E&wg3{vQmuqq(ZwcrS;fsdnB~n@0l5|6u6Z2#hBW9TiNO=PG z&01<53Q!>oD6*8mupsy1t^@SmzyISu04Nutfxu?X9x2mDzKM)0hVlxH!9*P^p`g-u z(0lxWvL)7HGYvNb*hvI`YnOOliIz~Go9Ccg0(zq7Hj7z{V_s^-<#?$_mr0Sbgng|U9p%{jjh4TLL509>2W1-sb4b`i4_}2_ z=$w^)_^ThQE{!DW4ZRJpcv@LZq4m}1DH7SKBmoJ&-rrz)F~1vbL< zglq<7Sx^gDL%3Qf>zD)bT#i@16*8eb@10SA77PmU2F3jWVmjHd|41^PEw#zy zP|AyCDhy*wVPrLcH3T<++Lo@I8Des^EHGjG>*4`A*cI;i#v3(O{03wA03q8!Y$eTH zRrCr5(<;jjbFe-P?vY&_@VB3=e+rr zTn#V@_v$oQauC|BZ`%wD+vGv;?RtLoB5}KWY1jRytNr9p*O)AxPO*LH60X_C^jn5p6msFQM1*hKaBNu?4i`hjV( z2L5_L-%blX+1cI%Oa)r#V$5Vr#3-w+X#hq*To!=>woAxXI-GHkcYJa0j2f@=^k84l zna@8ma6^;Yc((M=E^d%9Vr88r1k;y-}?=xHUpkPr6zNQ&Vc8$JT#s(}C#H zg-f=;i#$kTD*){jUwzZ;;bcv1p5tdNGz0PaJeTj<^N9M$dfXGF}qzWA*$ zrWILqkBquLSh?7n?HNA$**(J`mCQZNrT+J~xTEQdv5MO6QhHu3Jh`868fH>gM7ina zARw=nSWp}z!B57k&bjaAomiP@YFg(}U%xcrMVA?R+X&eW$hKTnS#WACx}^@YLcw8b zl2MMLxE#eWhr#5pP()mFDEx~p9k-&bDYU0>%)QX&1|P~RE=+@>D#X14(+fz~WT76AcE zdnKSn`8-^7KtuS@mtL+&mXuX^^tT)0lLQ|TLTLbEoInjAEFcRVcSitS=%gpO|8_?? z#sHlGf%Rm$7KRzh{UTgQinheR0cZZzPq+W<=l}dvx_E0ZMAM7r<4f<`>X7}kp?yL` zrI%TrPt3en^O^;8IG1BQMr}#+>@icmA^jXwJU@ zbRu{X*!7h^E=a{>y)~wR#4Z)RQdXqk%UkS3ONg(?ETFFk^wr_FASt44_{4|+-2~bl zcml+mzLR(1q!pu=yDr3%L!GUe%)F8}vn|i=|3E}F#1>3Nk6yER=u5lH)yH*q)F191 z>3ijS52=J$zGNvke2+)>A1OB}%(vFAHo%hsKOQhoL?TKi^P#g&nDr=V%q*Zk1nAt3 zI{}7~`_r#c3liNDFsY(Ve0fite>!bUs~fgGhu;6Mel+KA{EA_ruLHq&*0HlMBdssK zGzv7Uk5u$aKL=|OFDWfsNZL;k^5-H-P9sT@q18x>HL+PhPh_D3Nx2>fyFob(k>if$ zUFZCE_~^)v%0+=cc(KY3r&6aTbN`?mPmQ@-=NFAjLU zEzfQ#RH^oZ70@-mD*(Og3oob2s-v3?y)6K?6I7pCJ6mq+HLWak__OSFYm_&j$S zN-=K^J82t^jC8%WZ>RdvQ+nOfLQg9|-*73!G=BZ{vZ$A-jG9;@=$?jPD`--Llut#S z$3#=PR{_242fr(;j3u^uF5BQ}G-ci8M7kVYXF8kh8W87msQ_)B|3+O8=pdI?jhcU;qUKLGt#KmmSp{~P&=;-j# z+?>DWOCA8as{oDWX7kjgz3ap?TP>+g02>Kxx?Nr3_o{_fd7}+KIGU{TMiWR6WCt>- z!D1fvCWGs%xg3fgD4xtriZhmv{mmce+c*|mVv86&W`&OyswEzFYWFOlb%Dlc%5eO+ z6#JkTcKvl+4$w2hQ4aN98u3Wf#4BUIT@NEq81M-Jt4LA$*a`VO3J3^XGpBnR(4lr` z>IdCjE=Ta?JYbi6Zfj)qoZeN|Xsedd##-fmt1Et;$F&=a_E8k-2?-XE@Dr^a)nxNK8PYFrQOayeU<5aW~GDBiegad>-1H+$O&bS{R`plLK z;q7YSz%BF%cP*gzeD;kJuSV*GZn-hoP2g#jVz_BjZTOv=aO6w`G$6hJ7oj4bTDnP| z65pMG7ir0sO3O=+Z&cok8FSsWfL1<_6;D1GSzK3D5vzziMRK2zTEtW?5aon3IaY?^ zb+}A-K<6so#3$%Gk*py{M1szc^8&O{cp?k;rX$|Y!k+(4&BjhV!YQf4x%wMYs8zoD z5O@~@Iyicp#->Bb_~P>QB(_*`Js|q;l>#P;-QhAL04*2*(GNmk)J!3!0j)>(!eK z4hy%qllio3$KL&b)~f&KUQKw}cxlqQh1QyNG}P{vMKq%QkF_G0Z%9p%7j#WGK#$cS ztLAyX1gt_l54twz+<7?D>foP~tZB`bWcz;I{dzy1eYR+oZv`!60XN?hfDTxr8V}iB z%OZ)&Xla>?XKLU@*3w`ti2EV3kzidwRjWEKdFmqGNkEVF=>@&^!k|x2!|Rt|cwQXZ zXymB){xL!HJ%~G>PN#cDvZI;K!T!w2+9$Kv$GinT3&3B|Z@otVJ-%+jwQG+>=04b+ zjL#`k2B^ixa1{k>VXy?kxk3%?p+x!o#=Bm^uKFDYv`Rk@3orz-7myyc$a>g=Lz_>Y zqCa{j3)|V-?=DWJPcO{--$^&)!w1YT^PL7`inV0p)_Q&-o*GTY;xbpV*?LP^F2-&m zunuraDXa{jN~|9Gx}$(rFq2&bRBZl)6Ze*Y?E3?dyZrk>Zx}^vbDE-EDB5H1bqjsfLgvV8o~(Vqb6)>K8YpeA zMnpCfxF0}DfRrKPP7t3+Jzy#TJw}F{C4*+uG@PVi*e`+(knI9FAx4fN=q+?B8os zeFL#XZF#~MSM9~QfiN3LSuKWZD9|upB&S0oK^k4091^(i{`A{hUJQeQiMqZ6iX2Ud zXd(p63ZR^CT@ZGN$h%_srV}}l^`+gKbZ4|LI&!Kqo(-(>z9v^(UpdTM0OB=FNcTP5MNPGJkdo zUAr-8%L>n7R9Z*U>5hvSx%m3~|633#OCC_1_P5LeI?NU|9k4s<>!WtWk4L>kl^3z| zJWsU)vD2t>mFjD&l9=!{C+=T<_U+C85$3Uvy>NI_)bl=0Qnk!$RjXToOF>3Y@VRLxd+L3ja{6tB*_59hziIptX*7^GBC9?+4TmZk&s0yhbK7+>O@bGAlXY9a5W(RwEtJ5dH z_JUHX6_@Zb%p_;nm{~vYvXzmj+-K+w`|06#e(_H`&V+ev - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/doc/source/_static/index-images/contributor.svg b/doc/source/_static/index-images/contributor.svg index 3a689e0e4cb2..6fbc4c18ac4d 100644 --- a/doc/source/_static/index-images/contributor.svg +++ b/doc/source/_static/index-images/contributor.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/doc/source/_static/index-images/getting_started.svg b/doc/source/_static/index-images/getting_started.svg index 04db7e615671..c451a0ee8f56 100644 --- a/doc/source/_static/index-images/getting_started.svg +++ b/doc/source/_static/index-images/getting_started.svg @@ -1,31 +1 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/doc/source/_static/index-images/user_guide.svg b/doc/source/_static/index-images/user_guide.svg index d61b0937da75..9f502effd85d 100644 --- a/doc/source/_static/index-images/user_guide.svg +++ b/doc/source/_static/index-images/user_guide.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 1555dafb5539..a08902c9a7d8 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -28,12 +28,15 @@ body { button.btn.version-switcher__button, button.btn.version-switcher__button:hover { - color: black; font-size: small; } /* Main index page overview cards */ +.sd-card { + background-color: var(--pst-color-on-background); +} + .sd-card .sd-card-img-top { height: 60px; width: 60px; @@ -45,7 +48,8 @@ button.btn.version-switcher__button:hover { /* Main index page overview images */ html[data-theme=dark] .sd-card img[src*='.svg'] { - filter: invert(0.82) brightness(0.8) contrast(1.2); + filter: invert(1.0) saturate(0.0); + background: none; } /* Legacy admonition */ From 5bf1efdaf559c4722fad8d0fd4b8b1c9b75bdd5f Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Thu, 26 Mar 2026 21:13:10 +0530 Subject: [PATCH 1535/1718] ENH: make bound array method immortal to reduce reference counting contention (#31004) Co-authored-by: Nathan Goldbaum --- numpy/_core/src/multiarray/array_method.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index c7280435d3c3..c6504ac03d50 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -129,9 +129,9 @@ is_contiguous( * param move_references UNUSED -- listed below but doxygen doesn't see as a parameter * @param strides Array of step sizes for each dimension of the arrays involved * @param out_loop Output pointer to the function that will perform the strided loop. - * @param out_transferdata Output pointer to auxiliary data (if any) + * @param out_transferdata Output pointer to auxiliary data (if any) * needed by the out_loop function. - * @param flags Output pointer to additional flags (if any) + * @param flags Output pointer to additional flags (if any) * needed by the out_loop function * @returns 0 on success -1 on failure. */ @@ -485,7 +485,12 @@ PyArrayMethod_FromSpec_int(PyArrayMethod_Spec *spec, int private) return NULL; } strcpy(res->method->name, spec->name); - +#ifdef Py_GIL_DISABLED + // Mark immortal to reduce reference count contention in PyArray_GetCastingImpl + // If we ever allow replacing ArrayMethod objects or cleanup it DTypes or ufuncs, this may need to be reconsidered. + // An alternative that might help is to store cast methods in a PyArrayIdentityHash instead of a dict. + PyUnstable_SetImmortal((PyObject *)res->method); +#endif return res; } From d69bddc2c40a457f3e9a5072181b67d3af7e285e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Mar 2026 17:15:54 +0200 Subject: [PATCH 1536/1718] MAINT: Fix typos --- doc/neps/index.rst | 2 +- doc/source/conf.py | 2 +- doc/source/reference/c-api/generalized-ufuncs.rst | 2 +- meson_cpu/meson.build | 2 +- numpy/__init__.pyi | 2 +- numpy/_core/einsumfunc.py | 4 ++-- numpy/_core/include/numpy/dtype_api.h | 2 +- numpy/_core/src/common/npy_cpu_dispatch.h | 4 ++-- numpy/_core/src/common/npy_cpu_features.c | 2 +- numpy/_core/src/common/simd/lsx/memory.h | 2 +- numpy/_core/src/common/simd/simd.hpp | 4 ++-- numpy/_core/src/multiarray/array_method.c | 4 ++-- numpy/_core/src/multiarray/arraytypes.c.src | 4 ++-- numpy/_core/src/multiarray/calculation.c | 2 +- numpy/_core/src/multiarray/ctors.c | 2 +- numpy/_core/src/multiarray/mapping.c | 2 +- numpy/_core/src/multiarray/scalarapi.c | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 4 ++-- numpy/_core/src/npysort/binsearch.cpp | 2 +- numpy/_core/src/umath/legacy_array_method.c | 6 +++--- .../_core/src/umath/special_integer_comparisons.cpp | 2 +- numpy/_core/src/umath/ufunc_object.c | 4 ++-- numpy/_core/src/umath/ufunc_type_resolution.c | 2 +- numpy/_core/tests/test_api.py | 2 +- numpy/_core/tests/test_array_coercion.py | 2 +- numpy/_core/tests/test_deprecations.py | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/_core/tests/test_einsum.py | 2 +- numpy/_core/tests/test_multiarray.py | 12 ++++++------ numpy/_core/tests/test_numerictypes.py | 6 +++--- numpy/_core/tests/test_scalarbuffer.py | 2 +- numpy/_core/tests/test_scalarmath.py | 4 ++-- numpy/_core/tests/test_strings.py | 2 +- numpy/_globals.py | 2 +- numpy/_typing/_nested_sequence.py | 2 +- numpy/_utils/__init__.py | 2 +- numpy/_utils/__init__.pyi | 2 +- numpy/_utils/{_convertions.py => _conversions.py} | 0 numpy/_utils/{_convertions.pyi => _conversions.pyi} | 0 numpy/conftest.py | 6 +++--- numpy/f2py/tests/test_array_from_pyobj.py | 4 ++-- numpy/f2py/tests/test_crackfortran.py | 2 +- numpy/f2py/tests/test_regression.py | 2 +- numpy/lib/_function_base_impl.py | 4 ++-- numpy/lib/tests/test_function_base.py | 4 ++-- numpy/lib/tests/test_loadtxt.py | 2 +- numpy/lib/tests/test_nanfunctions.py | 2 +- numpy/ma/tests/test_extras.py | 2 +- numpy/matrixlib/tests/test_defmatrix.py | 2 +- numpy/random/tests/test_direct.py | 4 ++-- numpy/random/tests/test_generator_mt19937.py | 2 +- .../tests/test_generator_mt19937_regressions.py | 2 +- numpy/random/tests/test_randomstate_regression.py | 2 +- numpy/typing/tests/data/fail/ndarray_misc.pyi | 2 +- 54 files changed, 75 insertions(+), 75 deletions(-) rename numpy/_utils/{_convertions.py => _conversions.py} (100%) rename numpy/_utils/{_convertions.pyi => _conversions.pyi} (100%) diff --git a/doc/neps/index.rst b/doc/neps/index.rst index 1891641cbafd..5202c9dd6e91 100644 --- a/doc/neps/index.rst +++ b/doc/neps/index.rst @@ -6,7 +6,7 @@ This page provides an overview of development priorities for NumPy. Specifically, it contains a roadmap with a higher-level overview, as well as NumPy Enhancement Proposals (NEPs)—suggested changes to the library—in various stages of discussion or completion. -See :doc:`nep-0000` for more informations about NEPs. +See :doc:`nep-0000` for more information about NEPs. Roadmap ------- diff --git a/doc/source/conf.py b/doc/source/conf.py index 416d7d735ca1..a36cb6bdecb9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -70,7 +70,7 @@ class PyTypeObject(ctypes.Structure): if sys.implementation.name == 'cpython': c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') else: - # It is not guarenteed that the c_typ has this model on other + # It is not guaranteed that the c_typ has this model on other # implementations _name_cache[typ] = b"numpy." + name.encode('utf8') diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index b8a37e98b81e..7a436b102600 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -63,7 +63,7 @@ distances among them. The output dimension ``p`` must therefore be equal to in an output array of the right size. If the size of a core dimension of an output cannot be determined from a passed in input or output array, an error will be raised. This can be changed by defining a ``PyUFunc_ProcessCoreDimsFunc`` function -and assigning it to the ``proces_core_dims_func`` field of the ``PyUFuncObject`` +and assigning it to the ``process_core_dims_func`` field of the ``PyUFuncObject`` structure. See below for more details. Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index 02bbe5f7618e..3cd5ffd7e16c 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -203,7 +203,7 @@ foreach opt_name, conf : parse_options warning('CPU Feature "@0@" is no longer explicitly supported, redirecting to "@1@".'.format(tok, ntok)) endif warning('Please check the latest documentation for build options.') - if ntok == '' or not append # redirected features not safe to be execluded + if ntok == '' or not append # redirected features not safe to be excluded continue endif tok = ntok diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 64956883597b..34507adc48da 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3774,7 +3774,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] - # NOTE: this wont't raise, but won't do anything either + # NOTE: this won't raise, but won't do anything either @overload @deprecated("Resizing a NumPy generic inplace has been deprecated in NumPy 2.5") def resize(self, /, *, refcheck: py_bool = True) -> None: ... diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 1d191cedfc2f..0b86161bd2a5 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -1188,14 +1188,14 @@ def bmm_einsum(eq, a, b, out=None, **kwargs): # prepare left if eq_a is not None: - # diagonals, sums, and tranpose + # diagonals, sums, and transpose a = c_einsum(eq_a, a) if new_shape_a is not None: a = reshape(a, new_shape_a) # prepare right if eq_b is not None: - # diagonals, sums, and tranpose + # diagonals, sums, and transpose b = c_einsum(eq_b, b) if new_shape_b is not None: b = reshape(b, new_shape_b) diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 5ac964782ec0..64509ba3a955 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -495,7 +495,7 @@ typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); /* - * Constants that can be queried and used e.g. by reduce identies defaults. + * Constants that can be queried and used e.g. by reducing identities defaults. * These are also used to expose .finfo and .iinfo for example. */ /* Numerical constants */ diff --git a/numpy/_core/src/common/npy_cpu_dispatch.h b/numpy/_core/src/common/npy_cpu_dispatch.h index 49d29b8aa655..13f34159b235 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.h +++ b/numpy/_core/src/common/npy_cpu_dispatch.h @@ -8,7 +8,7 @@ */ #include "npy_cpu_features.h" // NPY_CPU_HAVE /** - * This header genereated by the build system and contains: + * This header was generated by the build system and contains: * * - Headers for platform-specific instruction sets. * - Helper macros that encapsulate enabled features through user-defined build options @@ -79,7 +79,7 @@ npy_cpu_dispatch_trace(const char *func_name, const char *signature, * Extract the enabled CPU targets from the generated configuration file. * * This macro is used to extract the enabled CPU targets from the generated configuration file, - * which is derived from 'meson.multi_targets()' or from 'disutils.CCompilerOpt' in the case of using distutils. + * which is derived from 'meson.multi_targets()' or from 'disuttils.CCompilerOpt' in the case of using distutils. * It then calls 'npy_cpu_dispatch_trace()' to insert a new item into the '__cpu_targets_info__' dictionary, * based on the provided FUNC_NAME and SIGNATURE. * diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index faffb7fc0781..ba35c962bdd7 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -502,7 +502,7 @@ npy__cpu_init_features(void) // long mode only npy__cpu_have[NPY_CPU_FEATURE_LAHF] = (reg[2] & (1 << 0)) != 0; #else - // alawys available + // always available npy__cpu_have[NPY_CPU_FEATURE_LAHF] = 1; #endif npy__cpu_have[NPY_CPU_FEATURE_LZCNT] = (reg[2] & (1 << 5)) != 0; diff --git a/numpy/_core/src/common/simd/lsx/memory.h b/numpy/_core/src/common/simd/lsx/memory.h index 9c3e6442c6d6..aaf32e5ce58f 100644 --- a/numpy/_core/src/common/simd/lsx/memory.h +++ b/numpy/_core/src/common/simd/lsx/memory.h @@ -528,7 +528,7 @@ NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_LSX_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_LSX_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 40556a68c59d..cef7d0ed191f 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -39,7 +39,7 @@ // Indicates if the SIMD operations are available for float16. #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) -// Note: Highway requires SIMD extentions with native float32 support, so we don't need +// Note: Highway requires SIMD extensions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. @@ -64,7 +64,7 @@ namespace simd { /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. namespace hn = hwy::HWY_NAMESPACE; -// internaly used by the template header +// internally used by the template header template using _Tag = hn::ScalableTag; #endif diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index c7280435d3c3..a1262e7d7410 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -567,7 +567,7 @@ boundarraymethod_dealloc(PyObject *self) * changes and especially testing if they were to be made public. */ static PyObject * -boundarraymethod__resolve_descripors( +boundarraymethod__resolve_descriptors( PyBoundArrayMethodObject *self, PyObject *descr_tuple) { int nin = self->method->nin; @@ -957,7 +957,7 @@ PyArrayMethod_GetMaskedStridedLoop( PyMethodDef boundarraymethod_methods[] = { - {"_resolve_descriptors", (PyCFunction)boundarraymethod__resolve_descripors, + {"_resolve_descriptors", (PyCFunction)boundarraymethod__resolve_descriptors, METH_O, "Resolve the given dtypes."}, {"_simple_strided_call", (PyCFunction)boundarraymethod__simple_strided_call, METH_O, "call on 1-d inputs and pre-allocated outputs (single call)."}, diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index b00aad2333bd..54cd65e5ebcc 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -4380,7 +4380,7 @@ static int /* Keeping Half macros consistent with standard C -Refernce: https://en.cppreference.com/w/c/types/limits.html +Reference: https://en.cppreference.com/w/c/types/limits.html */ #define HALF_MAX 31743 /* Bit pattern for 65504.0 */ #define HALF_MIN 1024 /* Bit pattern for smallest positive normal: 2^-14 */ @@ -4476,7 +4476,7 @@ static int /* Definition: Minimum negative integer such that FLT_RADIX raised by power one less than that integer is a normalized float, double and long double respectively - refernce: https://en.cppreference.com/w/c/types/limits.html + reference: https://en.cppreference.com/w/c/types/limits.html */ *(npy_intp *)ptr = @ABB@_MIN_EXP - 1; return 1; diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index b95b37987f8e..2171e967c4dc 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -654,7 +654,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) op2 = n_ops.multiply; if (decimals == INT_MIN) { // not technically correct but it doesn't matter because no one in - // this millenium is using floating point numbers with enough + // this milllenium is using floating point numbers with enough // accuracy for this to matter decimals = INT_MAX; } diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 91a3db2d6e5f..b5c6776970f0 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1060,7 +1060,7 @@ PyArray_NewFromDescrAndBase( * NPY_KEEPORDER - Keeps the axis ordering of prototype. * descr - If not NULL, overrides the data type of the result. * dtype - If not NULL and if descr is NULL, overrides the data type - of the result, so long as dtype is non-parameteric + of the result, so long as dtype is non-parametric * ndim - If not -1, overrides the shape of the result. * dims - If ndim is not -1, overrides the shape of the result. * subok - If 1, use the prototype's array subtype, otherwise diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 5a8ec64664ac..07f08765f875 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -2755,7 +2755,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit) * * @param Index information filled by prepare_index. * @param Number of indices (gotten through prepare_index). - * @param Kind of index (gotten through preprare_index). + * @param Kind of index (gotten through prepare_index). * @param NpyIter flags for an extra array. If 0 assume that there is no * extra operand. NPY_ITER_ALLOCATE can make sense here. * @param Array being indexed diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index d2a08031cbd9..08923508d601 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -346,7 +346,7 @@ PyArray_DescrFromTypeObject(PyObject *type) _PyArray_LegacyDescr *conv = NULL; int res = PyObject_GetOptionalAttr(type, npy_interned_str.dtype, &attr); if (res < 0) { - return NULL; // Should be a rather criticial error, so just fail. + return NULL; // Should be a rather critical error, so just fail. } if (res == 1) { if (!PyArray_DescrCheck(attr)) { diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index e23fcef06574..4c9098d7d9ca 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2128,7 +2128,7 @@ static PyObject * gentype___copy__(PyObject *self) { // scalars are immutable, so we can return a new reference - // the only expections are scalars with void dtype + // the only exceptions are scalars with void dtype if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type)) { // path via array return gentype_generic_method(self, NULL, NULL, "__copy__"); @@ -2142,7 +2142,7 @@ gentype___deepcopy__(PyObject *self, PyObject *args) // note: maybe the signature needs to be updated as __deepcopy__ can accept the keyword memo // scalars are immutable, so we can return a new reference - // the only expections are scalars with void dtype + // the only exceptions are scalars with void dtype // if the number of arguments is not 1, we let gentype_generic_method do the // error handling if (PyObject_IsInstance(self, (PyObject *)&PyVoidArrType_Type) || (PyTuple_Size(args)!=1)) { diff --git a/numpy/_core/src/npysort/binsearch.cpp b/numpy/_core/src/npysort/binsearch.cpp index 3ec4fecef0c6..6094bebe9f1a 100644 --- a/numpy/_core/src/npysort/binsearch.cpp +++ b/numpy/_core/src/npysort/binsearch.cpp @@ -123,7 +123,7 @@ binsearch(const char *arr, const char *key, char *ret, npy_intp arr_len, Optimization: we unroll the first iteration for the following reasons: 1. ret is not initialized with the bases, so we save |keys| writes - by not having to intialize it with 0s. + by not having to initialize it with 0s. 2. By assuming the initial base for every key is 0, we also save |keys| reads. 3. In the first iteration, all elements are compared against the diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 7a85937fcc8f..418053ea9e1d 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -367,7 +367,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, flags = _NPY_METH_FORCE_CAST_INPUTS; } - PyArrayMethod_GetReductionInitial *get_reduction_intial = NULL; + PyArrayMethod_GetReductionInitial *get_reduction_initial = NULL; if (ufunc->nin == 2 && ufunc->nout == 1) { npy_bool reorderable = NPY_FALSE; PyObject *identity_obj = PyUFunc_GetDefaultIdentity( @@ -385,7 +385,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, flags |= NPY_METH_IS_REORDERABLE; } if (identity_obj != Py_None) { - get_reduction_intial = &get_initial_from_ufunc; + get_reduction_initial = &get_initial_from_ufunc; } } for (int i = 0; i < ufunc->nin+ufunc->nout; i++) { @@ -401,7 +401,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, PyType_Slot slots[4] = { {NPY_METH_get_loop, &get_wrapped_legacy_ufunc_loop}, {NPY_METH_resolve_descriptors, &simple_legacy_resolve_descriptors}, - {NPY_METH_get_reduction_initial, get_reduction_intial}, + {NPY_METH_get_reduction_initial, get_reduction_initial}, {0, NULL}, }; if (any_output_flexible) { diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 06babeeda0a8..201a95b14dc0 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -141,7 +141,7 @@ get_value_range(PyObject *value, int type_num, int *range) } else { /* - * If we are checking for unisgned long long, the value may be larger + * If we are checking for unsigned long long, the value may be larger * then long long, but within range of unsigned long long. Check this * by doing the normal Python integer comparison. */ diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 2ffdb55cf184..4b928296a8d3 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4546,7 +4546,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, PyObject *keepdims_obj = NULL, *casting_obj = NULL, *order_obj = NULL; PyObject *subok_obj = NULL, *signature_obj = NULL, *sig_obj = NULL; PyObject *dtype_obj = NULL; - /* Typically, NumPy defaults to returnin scalars for 0-D results */ + /* Typically, NumPy defaults to returning scalars for 0-D results */ npy_bool return_scalar = NPY_TRUE; /* Skip parsing if there are no keyword arguments, nothing left to do */ @@ -4640,7 +4640,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, if ((where_obj != NULL && where_obj != Py_True) && (full_args.out == NULL) && (out_obj == NULL)) { if (PyErr_WarnEx(PyExc_UserWarning, - "'where' used without 'out', expect unitialized memory in output. " + "'where' used without 'out', expect uninitialized memory in output. " "If this is intentional, use out=None.", 1) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index eaea560e9b98..4a9d249c0dbc 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1158,7 +1158,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc, return -1; } - // This is wrong agaian cause of elsize, but only the DType matters + // This is wrong again because of elsize, but only the DType matters // here (String or Unicode). out_dtypes[2] = out_dtypes[1]; Py_INCREF(out_dtypes[1]); diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 216a2c75afb8..f3c10196eb13 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -475,7 +475,7 @@ def test_copyto_cast_safety(): np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") -def test_copyto_permut(): +def test_copyto_permute(): # test explicit overflow case pad = 500 l = [True] * pad + [True, True, True, True] diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 96bbb679d6c9..c38d022c10e3 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -910,7 +910,7 @@ def test_empty_string(): @pytest.mark.parametrize("res_dt,hug_val", [("float16", "1e30"), ("float32", "1e200")]) def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): - # This test primarly tests setitem + # This test primarily tests setitem val = np.array(["3M"], dtype=dtype)[0] # use the scalar with pytest.raises(ValueError): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index c17f8afa3d3c..525060062e01 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -345,7 +345,7 @@ class TestTooManyArgsExtremum(_DeprecationTestCase): message = "Passing more than 2 positional arguments to np.maximum and np.minimum " @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) - def test_extremem_3_args(self, ufunc): + def test_extremum_3_args(self, ufunc): self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 7b5966d0a56b..953b7c552e10 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1199,7 +1199,7 @@ def test_simple(self): def test_object_flag_not_inherited(self): # The following dtype still indicates "object", because its included - # in the unaccessible space (maybe this could change at some point): + # in the inaccessible space (maybe this could change at some point): arr = np.ones(3, "i,O,i")[["f0", "f2"]] assert arr.dtype.hasobject canonical_dt = np.result_type(arr.dtype) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index dd6ce7d7aea1..6f4d38b8a97a 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1184,7 +1184,7 @@ def assert_path_equal(self, comp, benchmark): ret &= (comp[pos + 1] == benchmark[pos + 1]) assert_(ret) - def test_memory_contraints(self): + def test_memory_constraints(self): # Ensure memory constraints are satisfied outer_test = self.build_operands('a,b,c->abc') diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5f9fe44a7011..86e455e5f365 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1400,7 +1400,7 @@ def test_ndmax_is_negative(self): with pytest.raises(ValueError, match="ndmax must be in the range"): np.array(data, ndmax=-1) - def test_ndmax_greather_than_NPY_MAXDIMS(self): + def test_ndmax_greater_than_NPY_MAXDIMS(self): data = [1, 2, 3] # current NPY_MAXDIMS is 64 with pytest.raises(ValueError, match="ndmax must be in the range"): @@ -1808,7 +1808,7 @@ def test_structured_cast_promotion_fieldorder(self): @pytest.mark.parametrize("align", [True, False]) def test_structured_promotion_packs(self, dtype_dict, align): # Structured dtypes are packed when promoted (we consider the packed - # form to be "canonical"), so tere is no extra padding. + # form to be "canonical"), so there is no extra padding. dtype = np.dtype(dtype_dict, align=align) # Remove non "canonical" dtype options: dtype_dict.pop("itemsize", None) @@ -8331,22 +8331,22 @@ def test_complex_warning(self): class TestMinScalarType: - def test_usigned_shortshort(self): + def test_unsigned_shortshort(self): dt = np.min_scalar_type(2**8 - 1) wanted = np.dtype('uint8') assert_equal(wanted, dt) - def test_usigned_short(self): + def test_unsigned_short(self): dt = np.min_scalar_type(2**16 - 1) wanted = np.dtype('uint16') assert_equal(wanted, dt) - def test_usigned_int(self): + def test_unsigned_int(self): dt = np.min_scalar_type(2**32 - 1) wanted = np.dtype('uint32') assert_equal(wanted, dt) - def test_usigned_longlong(self): + def test_unsigned_longlong(self): dt = np.min_scalar_type(2**63 - 1) wanted = np.dtype('uint64') assert_equal(wanted, dt) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 3fd26c32a500..1cb204a69c2c 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -257,7 +257,7 @@ def test_access_top_fields(self): assert_equal(h['z'], np.array([self._buffer[0][5], self._buffer[1][5]], dtype='u1')) - def test_nested1_acessors(self): + def test_nested1_accessors(self): """Check reading the nested fields of a nested array (1st level)""" h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: @@ -287,7 +287,7 @@ def test_nested1_acessors(self): self._buffer[1][3][1]], dtype='c16')) - def test_nested2_acessors(self): + def test_nested2_accessors(self): """Check reading the nested fields of a nested array (2nd level)""" h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: @@ -573,7 +573,7 @@ def test_names_reflect_attributes(self, t): assert getattr(np, t.__name__) is t @pytest.mark.parametrize('t', numeric_types) - def test_names_are_undersood_by_dtype(self, t): + def test_names_are_understood_by_dtype(self, t): """ Test the dtype constructor maps names back to the type """ assert np.dtype(t.__name__).type is t diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 4d2744b85e53..f467c060d6fc 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -134,7 +134,7 @@ def test_str_ucs4(self, s): v = memoryview(s) assert self._as_dict(v) == expected - # integers of the paltform-appropriate endianness + # integers of the platform-appropriate endianness code_points = np.frombuffer(v, dtype='i4') assert_equal(code_points, [ord(c) for c in s]) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 2f01a1dea16c..4b5bceff2316 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -397,7 +397,7 @@ def test_inplace_floordiv_handling(self): a //= b class TestComparison: - def test_comparision_different_types(self): + def test_comparison_different_types(self): x = np.array(1) y = np.array('s') eq = x == y @@ -1066,7 +1066,7 @@ def test_longdouble_complex(): def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): # This tests that python scalar subclasses behave like a float64 (if they # don't override it). - # In an earlier version of NEP 50, they behaved like the Python buildins. + # In an earlier version of NEP 50, they behaved like the Python builtins. def op_func(self, other): return __op__ diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 5a4b0a6a7f32..a4eff7a0f7b1 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -193,7 +193,7 @@ def test_large_string_cast(): @pytest.mark.parametrize("dt", ["S1", "U1"]) -def test_in_place_mutiply_no_overflow(dt): +def test_in_place_multiply_no_overflow(dt): # see gh-30495 a = np.array("a", dtype=dt) a *= 20 diff --git a/numpy/_globals.py b/numpy/_globals.py index ada8d5c41af0..76f380b3e23e 100644 --- a/numpy/_globals.py +++ b/numpy/_globals.py @@ -97,7 +97,7 @@ def __bool__(self): class _SignatureDescriptor: - # A descriptor to store on the ufunc __dict__ that avoids definig a + # A descriptor to store on the ufunc __dict__ that avoids defining a # signature for the ufunc class/type but allows the instance to have one. # This is needed because inspect.signature() chokes on normal properties # (as of 3.14 at least). diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 13711be397e9..6755c2ec0ec9 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -15,7 +15,7 @@ class _NestedSequence[T](Protocol): Warning ------- `_NestedSequence` currently does not work in combination with typevars, - *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + *e.g.* ``def func(a: _NestedSequence[T]) -> T: ...``. See Also -------- diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 84ee99db1be8..a64482539b9a 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -11,7 +11,7 @@ import functools import warnings -from ._convertions import asbytes, asunicode +from ._conversions import asbytes, asunicode def set_module(module): diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 7a78cabe60f3..f4c27cf2917f 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -2,7 +2,7 @@ from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, overload, type_check_only -from ._convertions import asbytes as asbytes, asunicode as asunicode +from ._conversions import asbytes as asbytes, asunicode as asunicode ### diff --git a/numpy/_utils/_convertions.py b/numpy/_utils/_conversions.py similarity index 100% rename from numpy/_utils/_convertions.py rename to numpy/_utils/_conversions.py diff --git a/numpy/_utils/_convertions.pyi b/numpy/_utils/_conversions.pyi similarity index 100% rename from numpy/_utils/_convertions.pyi rename to numpy/_utils/_conversions.pyi diff --git a/numpy/conftest.py b/numpy/conftest.py index c8f810aeda64..d116d6e65727 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -23,9 +23,9 @@ try: import pytest_run_parallel # noqa: F401 - PARALLEL_RUN_AVALIABLE = True + PARALLEL_RUN_AVAILABLE = True except ModuleNotFoundError: - PARALLEL_RUN_AVALIABLE = False + PARALLEL_RUN_AVAILABLE = False _old_fpu_mode = None _collect_results = {} @@ -66,7 +66,7 @@ def pytest_configure(config): "leaks_references: Tests that are known to leak references.") config.addinivalue_line("markers", "slow: Tests that are very slow.") - if not PARALLEL_RUN_AVALIABLE: + if not PARALLEL_RUN_AVAILABLE: config.addinivalue_line("markers", "parallel_threads(n): run the given test function in parallel " "using `n` threads.", diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 9046f2df4fa4..ee57d5d65b2f 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -663,7 +663,7 @@ def test_inplace(self): a = self.array(shape, intent.inplace, obj) # Spot check that they contain the same information initially. assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) - # If we change a.arr, that will not immediatetly be reflected in obj. + # If we change a.arr, that will not immediately be reflected in obj. change_item = 54 if self.type.dtype != bool else False a.arr[1][2] = change_item assert a.arr[1][2] == np.array(change_item, dtype=self.type.dtype) @@ -673,7 +673,7 @@ def test_inplace(self): assert a.arr.base is obj # It has a different organization from obj. assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] - # If we resolve the write-back, obj will be propertly filled. + # If we resolve the write-back, obj will be properly filled. code = wrap.resolve_write_back_if_copy(a.arr) assert code == 1, "no write-back resolution was done!" assert obj[1][2] == np.array(change_item, dtype=self.type.dtype) diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index c3967cfb967b..aeef49d1f4b0 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -60,7 +60,7 @@ def test_access_type(self, tmp_path): assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} assert set(tt['c']['attrspec']) == {'public'} - def test_nowrap_private_proceedures(self, tmp_path): + def test_nowrap_private_procedures(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index d6738cdc4552..96d92aaaa292 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -123,7 +123,7 @@ def test_gh26466(self): res = self.module.testsub2() npt.assert_allclose(expected, res) -class TestF90Contiuation(util.F2PyTest): +class TestF90Continuation(util.F2PyTest): # Check that comments are stripped from F90 continuation lines sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 4548bb19ac0a..0d1b9a0331cf 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1956,7 +1956,7 @@ def trim_zeros(filt, trim='fb', axis=None): returned that still contains all values which are not zero. If an axis is specified, `filt` will be sliced in that dimension only on the sides specified by `trim`. The remaining area will be the - smallest that still contains all values wich are not zero. + smallest that still contains all values which are not zero. .. versionadded:: 2.2.0 @@ -5070,7 +5070,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are intended to be used with :ref:`basics.broadcasting`. When all coordinates are used in an expression, broadcasting still leads to a - fully-dimensonal result array. + fully-dimensional result array. Default is False. diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 515ea92eccfa..ac07c2388c3d 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -507,9 +507,9 @@ class subclass(np.ndarray): assert_equal(type(rw), subclass) def test_upcasting(self): - typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), + types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] - for at, wt, rt in typs: + for at, wt, rt in types: a = np.array([[1, 2], [3, 4]], dtype=at) w = np.array([[1, 2], [3, 4]], dtype=wt) assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index b50478209520..dcac6f1a866d 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -529,7 +529,7 @@ def test_quoted_field(q): @pytest.mark.parametrize("q", ('"', "'", "`")) -def test_quoted_field_with_whitepace_delimiter(q): +def test_quoted_field_with_whitespace_delimiter(q): txt = StringIO( f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" ) diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 6ef86bf84ee0..f67adcacdb9f 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -835,7 +835,7 @@ def test_nanstd_with_mean_keyword(self): "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" ) -# All `inexact` + `timdelta64` type codes +# All `inexact` + `timedelta64` type codes _TYPE_CODES = list(np.typecodes["AllFloat"]) _TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 1993ffe3e90d..a210e72de46c 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1231,7 +1231,7 @@ def test_nan_behavior(self): b[2] = np.nan assert_equal(np.ma.median(a, (0, 2)), b) - def test_ambigous_fill(self): + def test_ambiguous_fill(self): # 255 is max value, used as filler for sort a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) a = np.ma.masked_array(a, mask=a == 3) diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index 93154c3c2207..397767037b91 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -323,7 +323,7 @@ def test_basic(self): class TestNewScalarIndexing: a = matrix([[1, 2], [3, 4]]) - def test_dimesions(self): + def test_dimensions(self): a = self.a x = a[0] assert_equal(x.ndim, 2) diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index bdd2ee7d633e..d85fed78617f 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -458,7 +458,7 @@ def test_advance_symmetry(self): assert val_neg == val_pos assert val_big == val_pos - def test_advange_large(self): + def test_advance_large(self): rs = Generator(self.bit_generator(38219308213743)) pcg = rs.bit_generator state = pcg.state["state"] @@ -497,7 +497,7 @@ def test_advance_symmetry(self): assert val_neg == val_pos assert val_big == val_pos - def test_advange_large(self): + def test_advance_large(self): rs = Generator(self.bit_generator(38219308213743)) pcg = rs.bit_generator state = pcg.state diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 7d13c49149b3..2f066fb725b9 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1685,7 +1685,7 @@ def test_standard_exponential(self): [1.093830802293668, 1.256977002164613]]) assert_array_almost_equal(actual, desired, decimal=15) - def test_standard_expoential_type_error(self): + def test_standard_exponential_type_error(self): assert_raises(TypeError, random.standard_exponential, dtype=np.int32) def test_standard_gamma(self): diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 21093ef73eb6..3b82ab414917 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -112,7 +112,7 @@ def test_beta_expected_zero_frequency(self): # # CDF of the beta distribution at x: # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) # n = 1000000 - # exprected_freq = float(n*p) + # expected_freq = float(n*p) # expected_freq = 77616.90831318991 assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 1c8882d1b672..e5d68c2f7ab7 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -161,7 +161,7 @@ def test_named_argument_initialization(self): rs2 = np.random.RandomState(seed=123456789) assert rs1.randint(0, 100) == rs2.randint(0, 100) - def test_choice_retun_dtype(self): + def test_choice_return_dtype(self): # GH 9867, now long since the NumPy default changed. c = np.random.choice(10, p=[.1] * 10, size=2) assert c.dtype == np.dtype(np.long) diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 29418930061c..47d797291cfd 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -21,7 +21,7 @@ f8.argpartition(0) # type: ignore[attr-defined] f8.partition(0) # type: ignore[attr-defined] f8.dot(1) # type: ignore[attr-defined] -# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# NOTE: The following functions return `Never`, causing mypy to stop analysis at that # point, which we circumvent by wrapping them in a function. def f8_diagonal(x: np.float64) -> Never: From 9b6f36ba4a539aa413adbf04b402e1d6aaed9d8a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 26 Mar 2026 17:15:54 +0200 Subject: [PATCH 1537/1718] MAINT: Fix typos in NEPs --- doc/neps/nep-0025-missing-data-3.rst | 2 +- ...27-zero-rank-arrarys.rst => nep-0027-zero-rank-arrays.rst} | 0 doc/neps/nep-0040-legacy-datatype-impl.rst | 2 +- doc/neps/nep-0043-extensible-ufuncs.rst | 4 ++-- doc/neps/nep-0053-c-abi-evolution.rst | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) rename doc/neps/{nep-0027-zero-rank-arrarys.rst => nep-0027-zero-rank-arrays.rst} (100%) diff --git a/doc/neps/nep-0025-missing-data-3.rst b/doc/neps/nep-0025-missing-data-3.rst index 1756ce491188..1cea24fc8ee8 100644 --- a/doc/neps/nep-0025-missing-data-3.rst +++ b/doc/neps/nep-0025-missing-data-3.rst @@ -432,7 +432,7 @@ follows: * strings: the first byte (or, in the case of unicode strings, first 4 bytes) is used as a flag to indicate NA, and the rest of the data gives the actual string. (no R compatibility possible) -* objects: Two options (FIXME): either we don't include an NA-ful version, or +* objects: Two options (FIXME): either we don't include an NA-full version, or we use np.NA as the NA bit pattern. * boolean: we do whatever R does (FIXME: look this up -- 0 == FALSE, 1 == TRUE, 2 == NA?) diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrays.rst similarity index 100% rename from doc/neps/nep-0027-zero-rank-arrarys.rst rename to doc/neps/nep-0027-zero-rank-arrays.rst diff --git a/doc/neps/nep-0040-legacy-datatype-impl.rst b/doc/neps/nep-0040-legacy-datatype-impl.rst index 6fa652eb07ee..3aacc5e9d75d 100644 --- a/doc/neps/nep-0040-legacy-datatype-impl.rst +++ b/doc/neps/nep-0040-legacy-datatype-impl.rst @@ -523,7 +523,7 @@ in the array and: If ``dtype=...`` is given, this dtype is used unmodified, unless it is an unspecific *parametric dtype instance* which means "S0", "V0", "U0", -"datetime64", and "timdelta64". +"datetime64", and "timedelta64". These are thus flexible datatypes without length 0 – considered to be unsized – and datetimes or timedelta without a unit attached ("generic unit"). diff --git a/doc/neps/nep-0043-extensible-ufuncs.rst b/doc/neps/nep-0043-extensible-ufuncs.rst index 1370e14d6c4e..b10e21b1e9d8 100644 --- a/doc/neps/nep-0043-extensible-ufuncs.rst +++ b/doc/neps/nep-0043-extensible-ufuncs.rst @@ -316,7 +316,7 @@ the following: .. code-block:: python def promote_timedelta_integer(ufunc, dtypes): - new_dtypes = (Timdelta64, Int64, dtypes[-1]) + new_dtypes = (Timedelta64, Int64, dtypes[-1]) # Resolve again, using Int64: return ufunc.resolve_impl(new_dtypes) @@ -609,7 +609,7 @@ definitions (see also :ref:`NEP 42 ` ``CastingImpl``): int nin, nout; PyArray_DTypeMeta **dtypes; - /* Operand descriptors, filled in by resolve_desciptors */ + /* Operand descriptors, filled in by resolve_descriptors */ PyArray_Descr **descriptors; void *reserved; // For Potential in threading (Interpreter state) diff --git a/doc/neps/nep-0053-c-abi-evolution.rst b/doc/neps/nep-0053-c-abi-evolution.rst index 16744dc0fde3..c0193af6732c 100644 --- a/doc/neps/nep-0053-c-abi-evolution.rst +++ b/doc/neps/nep-0053-c-abi-evolution.rst @@ -47,7 +47,7 @@ The implementation of this NEP consists would consist of two steps: Motivation and scope ==================== -The NumPy API conists of more than 300 functions and numerous macros. +The NumPy API consists of more than 300 functions and numerous macros. Many of these are outdated: some were only ever used within NumPy, exist only for compatibility with NumPy's predecessors, or have no or only a single known downstream user (i.e. SciPy). From d6b70c4c338929ab9f5870e4dedfe02791e150ba Mon Sep 17 00:00:00 2001 From: Harshith J Date: Thu, 26 Mar 2026 22:59:55 +0530 Subject: [PATCH 1538/1718] TST: fix POWER VSX feature mapping (#30801) --- numpy/_core/tests/test_cpu_features.py | 34 +++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index c95886752949..1f814c99db4b 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -387,10 +387,42 @@ def load_flags(self): @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") class Test_POWER_Features(AbstractTest): features = ["VSX", "VSX2", "VSX3", "VSX4"] - features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"} + features_map = { + "VSX": "ARCH_2_06", + "VSX2": "ARCH_2_07", + "VSX3": "ARCH_3_00", + "VSX4": "ARCH_3_1B" + } def load_flags(self): self.load_flags_auxv() + platform = self._get_platform() + + if platform: + power_match = re.search(r'power(\d+)', platform, re.IGNORECASE) + if power_match: + power_gen = int(power_match.group(1)) + if power_gen >= 7: + self.features_flags.add("ARCH_2_06") + if power_gen >= 8: + self.features_flags.add("ARCH_2_07") + if power_gen >= 9: + self.features_flags.add("ARCH_3_00") + if power_gen >= 10: + self.features_flags.add("ARCH_3_1B") + + def _get_platform(self): + """Get the AT_PLATFORM value from AUXV""" + try: + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for line in auxv.split(b'\n'): + if line.startswith(b'AT_PLATFORM'): + parts = line.split(b':', 1) + if len(parts) == 2: + return parts[1].strip().decode().lower() + except Exception: + pass + return None is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) From 82a86f1aa4667fd1942d57b024d3a6df4a2f7e94 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Mar 2026 13:26:26 -0600 Subject: [PATCH 1539/1718] MAINT: Bump pyrefly from 0.57.0 to 0.57.1 in /requirements (#31078) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 1a44fa2e303e..713bebb044db 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.19.1 -pyrefly==0.57.0 +pyrefly==0.57.1 From f368632dfc77d50660f5d2dd8dba24c18c3b3b08 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 27 Mar 2026 12:04:35 +0200 Subject: [PATCH 1540/1718] Remove reference to distutils Co-authored-by: Joren Hammudoglu --- numpy/_core/src/common/npy_cpu_dispatch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_cpu_dispatch.h b/numpy/_core/src/common/npy_cpu_dispatch.h index 13f34159b235..1bebc3b01be3 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.h +++ b/numpy/_core/src/common/npy_cpu_dispatch.h @@ -79,7 +79,7 @@ npy_cpu_dispatch_trace(const char *func_name, const char *signature, * Extract the enabled CPU targets from the generated configuration file. * * This macro is used to extract the enabled CPU targets from the generated configuration file, - * which is derived from 'meson.multi_targets()' or from 'disuttils.CCompilerOpt' in the case of using distutils. + * which is derived from 'meson.multi_targets()'. * It then calls 'npy_cpu_dispatch_trace()' to insert a new item into the '__cpu_targets_info__' dictionary, * based on the provided FUNC_NAME and SIGNATURE. * From 7d00fa00c37e9b3b0c441858fa691382eb7149c1 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Fri, 27 Mar 2026 12:06:24 +0200 Subject: [PATCH 1541/1718] Ouch... I had replaced a typo with a different typo! --- numpy/_core/src/multiarray/calculation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 2171e967c4dc..a9d2f219760d 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -654,7 +654,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) op2 = n_ops.multiply; if (decimals == INT_MIN) { // not technically correct but it doesn't matter because no one in - // this milllenium is using floating point numbers with enough + // this millennium is using floating point numbers with enough // accuracy for this to matter decimals = INT_MAX; } From e13e931465415da97890ea6d4c95b14d4d2e68c3 Mon Sep 17 00:00:00 2001 From: Denis Prokopenko <22414094+denproc@users.noreply.github.com> Date: Sat, 28 Mar 2026 17:04:08 +0000 Subject: [PATCH 1542/1718] MAINT: numpy.i: Replace deprecated `sprintf` with `snprintf` (#31056) --- tools/swig/numpy.i | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index 747446648c8b..a7392abb20d9 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -40,6 +40,7 @@ #define NO_IMPORT_ARRAY #endif #include "stdio.h" +#include #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include %} @@ -457,11 +458,11 @@ void free_cap(PyObject * cap) { for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); + snprintf(s, sizeof(s), "%d, ", exact_dimensions[i]); + strncat(dims_str, s, sizeof(dims_str) - strlen(dims_str) - 1); } - sprintf(s, " or %d", exact_dimensions[n-1]); - strcat(dims_str,s); + snprintf(s, sizeof(s), " or %d", exact_dimensions[n-1]); + strncat(dims_str, s, sizeof(dims_str) - strlen(dims_str) - 1); PyErr_Format(PyExc_TypeError, "Array must have %s dimensions. Given array has %d dimensions", dims_str, @@ -497,20 +498,20 @@ void free_cap(PyObject * cap) { if (size[i] == -1) { - sprintf(s, "*,"); + snprintf(s, sizeof(s), "*,"); } else { - sprintf(s, "%ld,", (long int)size[i]); + snprintf(s, sizeof(s), "%ld,", (long int)size[i]); } - strcat(desired_dims,s); + strncat(desired_dims, s, sizeof(desired_dims) - strlen(desired_dims) - 1); } len = strlen(desired_dims); desired_dims[len-1] = ']'; for (i = 0; i < n; i++) { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); + snprintf(s, sizeof(s), "%ld,", (long int)array_size(ary,i)); + strncat(actual_dims, s, sizeof(actual_dims) - strlen(actual_dims) - 1); } len = strlen(actual_dims); actual_dims[len-1] = ']'; From 30dcf3cf9e844c6e36f780cb19fb6153ff74a59d Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sun, 29 Mar 2026 11:48:00 +0200 Subject: [PATCH 1543/1718] ENH: Add internal real/imag ufuncs, use for attributes and refine object dtype (#30984) Restructure real/imag attributes to use an `ArrayMethod` that can be registered similar to other ufuncs and sorting functions. Object dtype is changed here to use `getattr(element, "real", element)` and `getattr(element, "imag", 0)` as a step in a more reliable direction. The returned array is marked as read-only to prevent trying to modify `.real/.imag` results directly. (If the object changes are problematic, they can be modified again. The main reason for this is also to allow user DTypes to define this correctly.) --- doc/release/upcoming_changes/30984.capi.rst | 5 + doc/release/upcoming_changes/30984.change.rst | 15 + doc/source/reference/c-api/array.rst | 23 +- numpy/_core/_add_newdocs.py | 8 + numpy/_core/code_generators/generate_umath.py | 12 + .../_core/code_generators/ufunc_docstrings.py | 56 ++++ numpy/_core/include/numpy/dtype_api.h | 7 +- numpy/_core/meson.build | 1 + .../_core/src/common/lowlevel_strided_loops.h | 11 +- numpy/_core/src/common/npy_import.h | 2 - numpy/_core/src/multiarray/array_method.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 3 +- numpy/_core/src/multiarray/dtypemeta.h | 3 + numpy/_core/src/multiarray/getset.c | 200 ++++++------ numpy/_core/src/multiarray/npy_static_data.c | 4 + numpy/_core/src/multiarray/npy_static_data.h | 4 + numpy/_core/src/multiarray/number.c | 2 + numpy/_core/src/multiarray/number.h | 2 + numpy/_core/src/umath/_scaled_float_dtype.c | 5 +- numpy/_core/src/umath/dispatching.cpp | 141 +++++---- numpy/_core/src/umath/real_imag_ufuncs.cpp | 297 ++++++++++++++++++ numpy/_core/src/umath/real_imag_ufuncs.h | 15 + .../src/umath/special_integer_comparisons.cpp | 4 +- numpy/_core/src/umath/umathmodule.c | 5 + numpy/_core/tests/test_arrayobject.py | 102 ++++++ numpy/lib/_type_check_impl.py | 16 +- 26 files changed, 754 insertions(+), 191 deletions(-) create mode 100644 doc/release/upcoming_changes/30984.capi.rst create mode 100644 doc/release/upcoming_changes/30984.change.rst create mode 100644 numpy/_core/src/umath/real_imag_ufuncs.cpp create mode 100644 numpy/_core/src/umath/real_imag_ufuncs.h diff --git a/doc/release/upcoming_changes/30984.capi.rst b/doc/release/upcoming_changes/30984.capi.rst new file mode 100644 index 000000000000..e2a41c10424f --- /dev/null +++ b/doc/release/upcoming_changes/30984.capi.rst @@ -0,0 +1,5 @@ +* It is now possible to register ``"real"`` and ``"imag"`` + ArrayMethods via ``PyUFunc_AddLoopsFromSpecs``. These will + be used for ``imag`` and ``real`` and should normally + set ``*view_offset`` in their ``resolve_descriptors`` function + to allow the array attributes to return views. diff --git a/doc/release/upcoming_changes/30984.change.rst b/doc/release/upcoming_changes/30984.change.rst new file mode 100644 index 000000000000..16bb631b30e9 --- /dev/null +++ b/doc/release/upcoming_changes/30984.change.rst @@ -0,0 +1,15 @@ +``object`` dtype in ``.real`` and ``.imag`` and related functions +----------------------------------------------------------------- +The array attributes ``.real`` and ``.imag`` now behave differently +for object arrays and return ``getattr(element, "real", element)`` +or ``getattr(element, "imag", 0)`` elementwise. +Additionally, the return for both is now read-only to avoid possible +in-place changes having no effect. + +This change also affects ``np.isreal()`` which uses ``arr.imag``. + +Previously, ``.imag`` always returned ``0`` while ``.real`` returned +the original array unmodified. +The new behavior now returnes the correct values for complex Python +objects but may also lead to surprises for example if ``element.real()`` +is a method and not a property. diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 39ff96ae901c..92b57daf04c9 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -1928,6 +1928,9 @@ with the rest of the ArrayMethod API. entry points, ``(module ':')? (object '.')* name``, with ``numpy`` the default module. Examples: ``sin``, ``strings.str_len``, ``numpy.strings:str_len``. + Note that some names are supported but do not directly correspond + to ufuncs: ``"sort"``, ``"argsort"``, ``"real"``, ``"imag"``. + (These do use ufunc-likes or even ufuncs internally.) .. c:member:: PyArrayMethod_Spec *spec @@ -1940,7 +1943,9 @@ with the rest of the ArrayMethod API. Add multiple loops to ufuncs from ArrayMethod specs. This also handles the registration of methods for the ufunc-like functions - ``sort`` and ``argsort``. See :ref:`array-methods-sorting` for details. + ``sort`` and ``argsort`` (see :ref:`array-methods-sorting` for details), + as well as for the array attributes ``.real`` and ``.imag`` needed + for user defined complex DTypes (with ``"real"`` and ``"imag"`` as names). The ``slots`` argument must be a NULL-terminated array of `PyUFunc_LoopSlot` (see above), which give the name of the @@ -1971,6 +1976,22 @@ with the rest of the ArrayMethod API. attempt a search for a new loop or promoter that can accomplish the operation by casting the inputs to the "promoted" DTypes. + A promoter should honor ``signature[]`` (if set). A promoter must return ``-1`` + on failure. A Python error may be set but is not required (a general error is + set in either paths, although the original error is chained). + A promoter must return ``0`` or ``1`` on success. NumPy normally checks that + ``new_op_dtypes`` are different from ``op_dtypes`` to prevent recursion. + This check is skipped if the promoter returns ``1``, which allows the promoter + to add a new loop (when adding a new loop, ``new_op_dtypes`` should be identical + to ``op_dtypes``). + + .. versionchanged:: 2.5 + After 2.5 a return of ``1`` indicates that the promoter was successful + skipping a recursion protection step. + This mainly allows the promoter to add new loop to the ufunc that must + now match instead of the promoter itself. + (Normally, a promoter must modify the DTypes help find the right loop.) + .. c:function:: int PyUFunc_GiveFloatingpointErrors( \ const char *name, int fpe_errors) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index fd9998381105..63ede7cc2c0b 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -2665,6 +2665,11 @@ """ The imaginary part of the array. + Returns a view into the original array for complex arrays. + For non-complex arrays, returns a zero array of the same dtype. + For ``object`` arrays returns elementwise ``.imag`` or ``0`` + if ``.imag`` is undefined. + Examples -------- >>> import numpy as np @@ -2856,6 +2861,9 @@ """ The real part of the array. + Usually returns a view into the original array, but returns + elementwise ``.real`` for arrays of objects. + Examples -------- >>> import numpy as np diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 43cfe6f9d5e3..b0bcbe6d4ec7 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1172,6 +1172,18 @@ def english_upper(s): TD(O), signature='(n),(n,m)->(m)', ), +# Real and imag ufunc helpers (loops added later): +'real': + Ufunc(1, 1, None, + docstrings.get('numpy._core.umath.real'), + None, + ), +'imag': + Ufunc(1, 1, None, + docstrings.get('numpy._core.umath.imag'), + None, + ), +# String ufuncs (loops added later): 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 40842b1cea43..e3e0f9bb134b 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -4522,6 +4522,62 @@ def add_newdoc(place, name, doc): """) +add_newdoc('numpy._core.umath', 'real', + """ + Returns the real part of the elements in the array. + + Parameters + ---------- + x : array_like + $PARAMS + + Returns + ------- + y : ndarray + Real part of input array. + $OUT_SCALAR_1 + + See Also + -------- + ndarray.real + ndarray.imag + + Notes + ----- + This ufunc is used internally to implement the `ndarray.real` + attribute and the `np.real` function. It should not be used directly. + + """) + +add_newdoc('numpy._core.umath', 'imag', + """ + Returns the imaginary part of the elements in the array. + + Unlike typical ufuncs, the return is typically a view into the original array. + + Parameters + ---------- + x : array_like + $PARAMS + + Returns + ------- + y : ndarray + Complex part of input array or zeros. + $OUT_SCALAR_1 + + See Also + -------- + ndarray.imag + ndarray.real + + Notes + ----- + This ufunc is used internally to implement the `ndarray.imag` + attribute and the `np.imag` function. It should not be used directly. + + """) + add_newdoc('numpy._core.umath', 'str_len', """ Returns the length of each element. For byte strings, diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 5ac964782ec0..ca837725fc75 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -80,6 +80,7 @@ typedef enum { * assume that if set, it also applies to normal operations though! */ NPY_METH_IS_REORDERABLE = 1 << 3, + _NPY_METH_IS_CAST = 1 << 4, /* automatically set for casts */ /* * Private flag for now for *logic* functions. The logical functions * `logical_or` and `logical_and` can always cast the inputs to booleans @@ -114,18 +115,18 @@ typedef struct PyArrayMethod_Context_tag { PyArray_Descr *const *descriptors; #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION void * _reserved; - /* + /* * Optional flag to pass information into the inner loop * NPY_ARRAYMETHOD_CONTEXT_FLAGS */ uint64_t flags; - + /* * Optional run-time parameters to pass to the loop (currently used in sorting). * Fixed parameters are expected to be passed via auxdata. */ void *parameters; - + /* Structure may grow (this is harmless for DType authors) */ #endif } PyArrayMethod_Context; diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5b78cc307be0..d47af9345201 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1207,6 +1207,7 @@ src_umath = umath_gen_headers + [ src_file.process('src/umath/scalarmath.c.src'), 'src/umath/ufunc_object.c', 'src/umath/umathmodule.c', + 'src/umath/real_imag_ufuncs.cpp', 'src/umath/special_integer_comparisons.cpp', 'src/umath/string_ufuncs.cpp', 'src/umath/stringdtype_ufuncs.cpp', diff --git a/numpy/_core/src/common/lowlevel_strided_loops.h b/numpy/_core/src/common/lowlevel_strided_loops.h index 9bcfcf2d3f37..fedd38a7b212 100644 --- a/numpy/_core/src/common/lowlevel_strided_loops.h +++ b/numpy/_core/src/common/lowlevel_strided_loops.h @@ -10,10 +10,14 @@ /* For PyArray_ macros used below */ #include "numpy/ndarrayobject.h" +#ifdef __cplusplus +extern "C" { +#endif + /* * NOTE: This API should remain private for the time being, to allow * for further refinement. I think the 'aligned' mechanism - * needs changing, for example. + * needs changing, for example. * * Note: Updated in 2018 to distinguish "true" from "uint" alignment. */ @@ -787,4 +791,9 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2); \ } + +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_LOWLEVEL_STRIDED_LOOPS_H_ */ diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 9eab510726aa..7bf1bcd88831 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -49,8 +49,6 @@ typedef struct npy_runtime_imports_struct { PyObject *_var; PyObject *_view_is_safe; PyObject *_void_scalar_to_string; - PyObject *sort; - PyObject *argsort; } npy_runtime_imports_struct; NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index c6504ac03d50..529d294e7199 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -86,7 +86,7 @@ default_resolve_descriptors( * abstract ones or unspecified outputs). We can use the common-dtype * operation to provide a default here. */ - if (method->casting == NPY_NO_CASTING) { + if (method->casting == NPY_NO_CASTING && (method->flags & _NPY_METH_IS_CAST)) { /* * By (current) definition no-casting should imply viewable. This * is currently indicated for example for object to object cast. diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 0e976009a767..b2a179b78cca 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -930,7 +930,7 @@ PyArray_CastDescrToDType(PyArray_Descr *descr, PyArray_DTypeMeta *given_DType) Py_INCREF(descr); return descr; } - if (!NPY_DT_is_parametric(given_DType)) { + if (!NPY_DT_is_parametric(given_DType) && !NPY_DT_is_abstract(given_DType)) { /* * Don't actually do anything, the default is always the result * of any cast. @@ -2090,6 +2090,7 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) if (meth == NULL) { return -1; } + meth->method->flags |= _NPY_METH_IS_CAST; int res = PyArray_AddCastingImplementation(meth); Py_DECREF(meth); if (res < 0) { diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index d95463f486df..c59772203a85 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -96,6 +96,9 @@ typedef struct { */ PyArrayMethodObject *sort_meth; PyArrayMethodObject *argsort_meth; + /* Definition for real and imaginary parts, and the (internal) ufuncs */ + PyBoundArrayMethodObject *real_meth; + PyBoundArrayMethodObject *imag_meth; } NPY_DType_Slots; // This must be updated if new slots before within_dtype_castingimpl diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index cec1ae275b71..6275b5f55fb6 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -22,6 +22,7 @@ #include "getset.h" #include "arrayobject.h" #include "mem_overlap.h" +#include "number.h" #include "alloc.h" #include "npy_buffer.h" #include "shape.h" @@ -573,132 +574,109 @@ array_base_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) } } + /* - * Create a view of a complex array with an equivalent data-type - * except it is real instead of complex. + * Fetches the real or imaginary part of an array. If `need_view` is set the return + * cannot be a copy (must be a view). + * If `need_view` is zero it will return the `ufunc`s result (a new array) and set it + * to read-only. */ -static PyArrayObject * -_get_part(PyArrayObject *self, int imag) +static PyObject * +_get_part(PyArrayObject *self, PyObject *ufunc, PyBoundArrayMethodObject *meth, int need_view) { - int float_type_num; - PyArray_Descr *type; - PyArrayObject *ret; - int offset; - - switch (PyArray_DESCR(self)->type_num) { - case NPY_CFLOAT: - float_type_num = NPY_FLOAT; - break; - case NPY_CDOUBLE: - float_type_num = NPY_DOUBLE; - break; - case NPY_CLONGDOUBLE: - float_type_num = NPY_LONGDOUBLE; - break; - default: - PyErr_Format(PyExc_ValueError, - "Cannot convert complex type number %d to float", - PyArray_DESCR(self)->type_num); - return NULL; - - } - type = PyArray_DescrFromType(float_type_num); - if (type == NULL) { + PyObject *ret = NULL; + PyArray_Descr *descrs[2] = {PyArray_DESCR(self), NULL}; + PyArray_Descr *loop_descrs[2] = {NULL, NULL}; + npy_intp view_offset = NPY_MIN_INTP; + int res = meth->method->resolve_descriptors( + meth->method, meth->dtypes, descrs, loop_descrs, &view_offset); + if (res < 0) { return NULL; } - - offset = (imag ? type->elsize : 0); - - if (!PyArray_ISNBO(PyArray_DESCR(self)->byteorder)) { - Py_SETREF(type, PyArray_DescrNew(type)); - if (type == NULL) { - return NULL; + if (view_offset != NPY_MIN_INTP) { + Py_INCREF(loop_descrs[1]); + ret = PyArray_NewFromDescr_int( + Py_TYPE(self), loop_descrs[1], + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_BYTES(self) + view_offset, + PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + } + else if (!need_view) { + // resolve_descriptors was successful, but view_offset is not set so we call + // the ufunc to let it deal with the (potential) complexity. + ret = PyArray_GenericUnaryFunction(self, ufunc); + if (ret != NULL && PyArray_Check(ret)) { + // Make result read-only, since otherwise `arr.imag[...] = val` + // would for example work. + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); } - type->byteorder = PyArray_DESCR(self)->byteorder; - } - ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( - Py_TYPE(self), - type, - PyArray_NDIM(self), - PyArray_DIMS(self), - PyArray_STRIDES(self), - PyArray_BYTES(self) + offset, - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self); - if (ret == NULL) { - return NULL; } + + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return ret; } -/* For Object arrays, we need to get and set the - real part of each element. - */ static PyObject * array_real_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->real_meth; - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - return (PyObject *)ret; + if (meth == NULL) { + // If the method is not set, we just assume this can be seen as "real" + // it really may be nice to change that one day. + return Py_NewRef((PyObject *)self); } - else { - Py_INCREF(self); - return (PyObject *)self; - } -} + return _get_part(self, n_ops.real, meth, /* need_view */ 0); +} static int array_real_set(PyArrayObject *self, PyObject *val, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; - PyArrayObject *new; - int retcode; - if (val == NULL) { PyErr_SetString(PyExc_AttributeError, "Cannot delete array real part"); return -1; } - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 0); - if (ret == NULL) { - return -1; - } - } - else { + + PyArrayObject *part; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->real_meth; + + if (meth == NULL) { + // See above, we may want to not guess this always... Py_INCREF(self); - ret = self; + part = self; } - new = (PyArrayObject *)PyArray_FROM_O(val); - if (new == NULL) { - Py_DECREF(ret); - return -1; + else { + part = (PyArrayObject *)_get_part( + self, n_ops.real, meth, /* need_view */ 1); + if (part == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "Cannot set real part when `.real` isn't a view."); + } + return -1; + } } - retcode = PyArray_CopyInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return retcode; + + int ret = PyArray_CopyObject(part, val); + Py_DECREF(part); + return ret; } -/* For Object arrays we need to get - and set the imaginary part of - each element -*/ static PyObject * array_imag_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { - PyArrayObject *ret; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth; - if (PyArray_ISCOMPLEX(self)) { - ret = _get_part(self, 1); - } - else { + if (meth == NULL) { + // We assume this is a real type, so return a zeroed array. Py_INCREF(PyArray_DESCR(self)); - ret = (PyArrayObject *)PyArray_NewFromDescr_int( + PyObject *ret = PyArray_NewFromDescr_int( Py_TYPE(self), PyArray_DESCR(self), PyArray_NDIM(self), @@ -709,9 +687,11 @@ array_imag_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) if (ret == NULL) { return NULL; } - PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); + return ret; } - return (PyObject *) ret; + + return _get_part(self, n_ops.imag, meth, /* need_view */ 0); } static int @@ -722,30 +702,28 @@ array_imag_set(PyArrayObject *self, PyObject *val, void *NPY_UNUSED(ignored)) "Cannot delete array imaginary part"); return -1; } - if (PyArray_ISCOMPLEX(self)) { - PyArrayObject *ret; - PyArrayObject *new; - int retcode; + PyArrayObject *part; + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth; - ret = _get_part(self, 1); - if (ret == NULL) { - return -1; - } - new = (PyArrayObject *)PyArray_FROM_O(val); - if (new == NULL) { - Py_DECREF(ret); - return -1; - } - retcode = PyArray_CopyInto(ret, new); - Py_DECREF(ret); - Py_DECREF(new); - return retcode; - } - else { + if (meth == NULL) { PyErr_SetString(PyExc_TypeError, - "array does not have imaginary part to set"); + "Cannot set imaginary part of non-complex array."); return -1; } + + part = (PyArrayObject *)_get_part( + self, n_ops.imag, meth, /* need_view */ 1); + if (part == NULL) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "Cannot set imaginary part when `.imag` isn't a view."); + } + return -1; + } + + int ret = PyArray_CopyObject(part, val); + Py_DECREF(part); + return ret; } static PyObject * diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 1c31dcd5d810..bb5c23a59d3c 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -70,6 +70,10 @@ intern_strings(void) INTERN_STRING(dl_device, "dl_device"); INTERN_STRING(max_version, "max_version"); INTERN_STRING(array_dealloc, "array_dealloc"); + INTERN_STRING(real, "real"); + INTERN_STRING(imag, "imag"); + INTERN_STRING(sort, "sort"); + INTERN_STRING(argsort, "argsort"); return 0; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 68b3d27c8160..0411f969f678 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -49,6 +49,10 @@ typedef struct npy_interned_str_struct { PyObject *dl_device; PyObject *max_version; PyObject *array_dealloc; + PyObject *real; + PyObject *imag; + PyObject *sort; + PyObject *argsort; } npy_interned_str_struct; /* diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index e27079a569ef..83c9b9ffad1a 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -122,6 +122,8 @@ _PyArray_SetNumericOps(PyObject *dict) SET(conjugate); SET(matmul); SET(clip); + SET(real); + SET(imag); // initialize static globals needed for matmul npy_static_pydata.axes_1d_obj_kwargs = Py_BuildValue( diff --git a/numpy/_core/src/multiarray/number.h b/numpy/_core/src/multiarray/number.h index 8cbcf5f2da25..497793abd7a8 100644 --- a/numpy/_core/src/multiarray/number.h +++ b/numpy/_core/src/multiarray/number.h @@ -41,6 +41,8 @@ typedef struct { PyObject *conjugate; PyObject *matmul; PyObject *clip; + PyObject *real; + PyObject *imag; } NumericOps; extern NPY_NO_EXPORT NumericOps n_ops; diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 9bf318d97b10..d26cd9b9b7ca 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -1026,8 +1026,9 @@ sfloat_init_ufuncs(void) { PyUFunc_LoopSlot loops[] = { {"multiply", &multiply_spec}, {"_core._multiarray_umath.add", &add_spec}, - {"numpy:sort", &sort_spec}, - {"numpy._core.fromnumeric:argsort", &argsort_spec}, + // These names must match exactly right now (not ufuncs) + {"sort", &sort_spec}, + {"argsort", &argsort_spec}, {NULL, NULL} }; if (PyUFunc_AddLoopsFromSpecs(loops) < 0) { diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 2998ad0465de..aa5020a5b0fa 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -194,6 +194,33 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) } + +/* + * There are a few ArrayMethods we store on the DType since they are not + * (only) used via ufuncs. Some don't need to be bound (DTypes attached) + * and are currently not. + */ +template +static int +set_static_method(PyArrayMethod_Spec *spec) { + PyArray_DTypeMeta *dtype = spec->dtypes[0]; + PyBoundArrayMethodObject *meth = PyArrayMethod_FromSpec_int(spec, 0); + if (meth == NULL) { + return -1; + } + + if constexpr (!bound) { + Py_INCREF(meth->method); + NPY_DT_SLOTS(dtype)->*slot = meth->method; + Py_DECREF(meth); + } + else { + NPY_DT_SLOTS(dtype)->*slot = meth; + } + return 0; +} + + /*UFUNC_API * Add multiple loops to ufuncs from ArrayMethod specs. This also * handles the registration of sort and argsort methods for dtypes @@ -202,64 +229,67 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs(PyUFunc_LoopSlot *slots) { - if (npy_cache_import_runtime( - "numpy", "sort", &npy_runtime_imports.sort) < 0) { - return -1; - } - if (npy_cache_import_runtime( - "numpy", "argsort", &npy_runtime_imports.argsort) < 0) { - return -1; - } + int ret = -1; + PyObject *ufunc = NULL; PyUFunc_LoopSlot *slot; for (slot = slots; slot->name != NULL; slot++) { - PyObject *ufunc = npy_import_entry_point(slot->name); - if (ufunc == NULL) { - return -1; + // Hardcode slot names for attributes and non-ufuncs stored on the DType + // (Also avoids circular imports a bit.) + if (strcmp(slot->name, "real") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.real)); } - - if (ufunc == npy_runtime_imports.sort) { - Py_DECREF(ufunc); - - PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; - PyBoundArrayMethodObject *sort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); - if (sort_meth == NULL) { - return -1; + else if (strcmp(slot->name, "imag") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.imag)); + } + else if (strcmp(slot->name, "sort") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.sort)); + } + else if (strcmp(slot->name, "argsort") == 0) { + Py_XSETREF(ufunc, Py_NewRef(npy_interned_str.argsort)); + } + else { + Py_XSETREF(ufunc, npy_import_entry_point(slot->name)); + if (ufunc == NULL) { + goto finish; } - - NPY_DT_SLOTS(dtype)->sort_meth = sort_meth->method; - Py_INCREF(sort_meth->method); - Py_DECREF(sort_meth); } - else if (ufunc == npy_runtime_imports.argsort) { - Py_DECREF(ufunc); - PyArray_DTypeMeta *dtype = slot->spec->dtypes[0]; - PyBoundArrayMethodObject *argsort_meth = PyArrayMethod_FromSpec_int(slot->spec, 0); - if (argsort_meth == NULL) { - return -1; + if (ufunc == npy_interned_str.real) { + if (set_static_method<&NPY_DType_Slots::real_meth, true>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.imag) { + if (set_static_method<&NPY_DType_Slots::imag_meth, true>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.sort) { + if (set_static_method<&NPY_DType_Slots::sort_meth, false>(slot->spec) < 0) { + goto finish; + } + } + else if (ufunc == npy_interned_str.argsort) { + if (set_static_method<&NPY_DType_Slots::argsort_meth, false>(slot->spec) < 0) { + goto finish; } - - NPY_DT_SLOTS(dtype)->argsort_meth = argsort_meth->method; - Py_INCREF(argsort_meth->method); - Py_DECREF(argsort_meth); } else { if (!PyObject_TypeCheck(ufunc, &PyUFunc_Type)) { PyErr_Format(PyExc_TypeError, "%s was not a ufunc!", slot->name); - Py_DECREF(ufunc); - return -1; + goto finish; } - - int ret = PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0); - Py_DECREF(ufunc); - if (ret < 0) { - return -1; + if (PyUFunc_AddLoopFromSpec_int(ufunc, slot->spec, 0) < 0) { + goto finish; } } } - return 0; + ret = 0; + finish: + Py_XDECREF(ufunc); + return ret; } @@ -595,22 +625,21 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, return NULL; } /* - * If none of the dtypes changes, we would recurse infinitely, abort. - * (Of course it is nevertheless possible to recurse infinitely.) - * - * TODO: We could allow users to signal this directly and also move - * the call to be (almost immediate). That would call it - * unnecessarily sometimes, but may allow additional flexibility. - */ - int dtypes_changed = 0; - for (int i = 0; i < nargs; i++) { - if (new_op_dtypes[i] != op_dtypes[i]) { - dtypes_changed = 1; - break; + * If none of the dtypes changes, we would recurse infinitely, abort. + * (Of course it is nevertheless possible to recurse infinitely.) + * If the user indicates a `1` result, we trust the user. + */ + if (promoter_result != 1) { + int dtypes_changed = 0; + for (int i = 0; i < nargs; i++) { + if (new_op_dtypes[i] != op_dtypes[i]) { + dtypes_changed = 1; + break; + } + } + if (!dtypes_changed) { + goto finish; } - } - if (!dtypes_changed) { - goto finish; } } else { diff --git a/numpy/_core/src/umath/real_imag_ufuncs.cpp b/numpy/_core/src/umath/real_imag_ufuncs.cpp new file mode 100644 index 000000000000..f30c16460709 --- /dev/null +++ b/numpy/_core/src/umath/real_imag_ufuncs.cpp @@ -0,0 +1,297 @@ +/* + * This file implements the real and imag ufuncs which are in turn used + * for the `imag` and `real` attributes of arrays. + * The ArrayMethods are primarily stored on the DType for `real` and `imag` + * while the ufunc uses a promoter to access these dynamically. + */ + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#define _UMATHMODULE + +#include +#include "npy_pycompat.h" // PyObject_GetOptionalAttr +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" +#include "dispatching.h" + +#include "numpyos.h" +#include "dtypemeta.h" +#include "dtype_transfer.h" +#include "lowlevel_strided_loops.h" +#include "array_method.h" + +#include "real_imag_ufuncs.h" + + +template +static NPY_CASTING +complex_to_real_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], + PyArray_Descr *loop_descrs[2], + npy_intp *view_offset) +{ + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(dtypes[1]->singleton); + loop_descrs[1] = dtypes[1]->singleton; + + if (PyDataType_ISBYTESWAPPED(loop_descrs[0])) { + Py_SETREF( + loop_descrs[1], PyArray_DescrNewByteorder(loop_descrs[1], NPY_SWAP)); + if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + } + if constexpr (real_part) { + *view_offset = 0; + } + else { + *view_offset = loop_descrs[1]->elsize; + } + return NPY_NO_CASTING; +} + + +/* We shouldn't normally use it, but define a simple loop anyway. */ +template +static int extract_complex_part_loop( + PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + npy_intp N = dimensions[0]; + char *in = data[0]; + char *out = data[1]; + npy_intp istride = strides[0]; + npy_intp ostride = strides[1]; + + if constexpr (!real_part) { + in += sizeof(real_type); + } + + while (N--) { + real_type value = *reinterpret_cast(in); + *reinterpret_cast(out) = value; + in += istride; + out += ostride; + } + return 0; +} + + +template +static int +object_get_comp_strided_loop( + PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *auxdata) +{ + npy_intp N = dimensions[0]; + char *in = data[0]; + char *out = data[1]; + npy_intp istride = strides[0]; + npy_intp ostride = strides[1]; + + while (N--) { + PyObject *obj = *reinterpret_cast(in); + PyObject *attr; + if (PyObject_GetOptionalAttr(obj, npy_interned_str.*component, &attr) < 0) { + return -1; + } + if (attr == NULL) { + if constexpr (component == &npy_interned_str_struct::real) { + attr = Py_NewRef(obj); // just use the old object... + } + else { + // Use long zero as a best bet (also historical value) + attr = PyLong_FromLong(0); + if (attr == NULL) { + return -1; + } + } + } + + Py_XSETREF((*reinterpret_cast(out)), attr); + in += istride; + out += ostride; + } + return 0; +} + + +template +static int +register_one_for_type( + const char *name, PyArray_DTypeMeta *complex_dtype, PyArray_DTypeMeta *real_dtype) +{ + PyArray_DTypeMeta *dtypes[2] = {complex_dtype, real_dtype}; + PyType_Slot meth_slots[] = { + {NPY_METH_resolve_descriptors, (void *)&complex_to_real_resolve_descriptors}, + {NPY_METH_strided_loop, (void *)&extract_complex_part_loop}, + {0, NULL} + }; + PyArrayMethod_Spec meth_spec; + meth_spec.name = "generic_real_imag_loop"; + meth_spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + meth_spec.nin = 1; + meth_spec.nout = 1; + meth_spec.dtypes = dtypes; + meth_spec.slots = meth_slots; + meth_spec.casting = NPY_NO_CASTING; + + PyUFunc_LoopSlot slots[] = { + {name, &meth_spec}, + {0, nullptr} + }; + return PyUFunc_AddLoopsFromSpecs(slots); +} + + +template +static int +register_both_for_type(PyArray_DTypeMeta *complex_dtype, PyArray_DTypeMeta *real_dtype) { + if (register_one_for_type("real", complex_dtype, real_dtype) < 0) { + return -1; + } + if (register_one_for_type("imag", complex_dtype, real_dtype) < 0) { + return -1; + } + return 0; +} + + +template +static int +register_one_object_loop(const char *name) +{ + PyArray_DTypeMeta *dtypes[2] = {&PyArray_ObjectDType, &PyArray_ObjectDType}; + PyType_Slot meth_slots[] = { + {NPY_METH_strided_loop, (void *)&object_get_comp_strided_loop}, + {0, nullptr} + }; + PyArrayMethod_Spec meth_spec; + meth_spec.name = "object_real_imag_loop"; + meth_spec.flags = (NPY_ARRAYMETHOD_FLAGS)( + NPY_METH_NO_FLOATINGPOINT_ERRORS|NPY_METH_REQUIRES_PYAPI); + meth_spec.nin = 1; + meth_spec.nout = 1; + meth_spec.dtypes = dtypes; + meth_spec.slots = meth_slots; + meth_spec.casting = NPY_NO_CASTING; + PyUFunc_LoopSlot slots[] = { + {name, &meth_spec}, + {0, nullptr} + }; + return PyUFunc_AddLoopsFromSpecs(slots); +} + + +template +static int +real_imag_promoter(PyObject *ufunc, + PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + PyBoundArrayMethodObject *meth = NPY_DT_SLOTS(op_dtypes[0])->*slot; + if (meth == NULL) { + return -1; // nothing to do. + } + if (signature[1] != NULL && signature[1] != meth->dtypes[1]) { + // out signature requested, but not compatible (may be unreachable). + return -1; + } + + /* + * Dynamically add the loop to the ufunc, since it seem it was missing. + */ + PyObject *DType_tuple = PyTuple_FromArray((PyObject **)meth->dtypes, 2); + if (DType_tuple == NULL) { + return -1; + } + PyObject *info = PyTuple_Pack(2, DType_tuple, meth->method); + Py_DECREF(DType_tuple); + if (info == NULL) { + return -1; + } + int res = PyUFunc_AddLoop((PyUFuncObject *)ufunc, info, 1); + Py_DECREF(info); + if (res < 0) { + return -1; + } + new_op_dtypes[0] = NPY_DT_NewRef(meth->dtypes[0]); + new_op_dtypes[1] = NPY_DT_NewRef(meth->dtypes[1]); + return 1; +} + + + +template +static int +add_promoter_for_slot(PyObject *ufunc) +{ + PyObject *promoter = PyCapsule_New( + (void *)real_imag_promoter, "numpy._ufunc_promoter", NULL); + if (promoter == NULL) { + return -1; + } + PyObject *dtypes[2] = {(PyObject *)&PyArrayDescr_Type, (PyObject *)&PyArrayDescr_Type}; + PyObject *info = PyTuple_FromArray(dtypes, 2); + if (info == NULL) { + Py_DECREF(promoter); + return -1; + } + int res = PyUFunc_AddPromoter(ufunc, info, promoter); + Py_DECREF(info); + Py_DECREF(promoter); + return res; +} + + +NPY_NO_EXPORT int +init_real_imag_ufuncs(PyObject *umath) +{ + int res = -1; + PyObject *real_ufunc = PyObject_GetAttr(umath, npy_interned_str.real); + PyObject *imag_ufunc = PyObject_GetAttr(umath, npy_interned_str.imag); + if (real_ufunc == NULL || imag_ufunc == NULL) { + goto finish; + } + + if (register_both_for_type(&PyArray_CFloatDType, &PyArray_FloatDType) < 0) { + goto finish; + } + if (register_both_for_type(&PyArray_CDoubleDType, &PyArray_DoubleDType) < 0) { + goto finish; + } + if (register_both_for_type(&PyArray_CLongDoubleDType, &PyArray_LongDoubleDType) < 0) { + goto finish; + } + if (register_one_object_loop<&npy_interned_str_struct::real>("real") < 0) { + goto finish; + } + if (register_one_object_loop<&npy_interned_str_struct::imag>("imag") < 0) { + goto finish; + } + + /* + * The above actually only adds the method to the DType itself. We deal with + * the ufunc by adding a general fall-back method that dynamically registers + * loops based on the above DType method slots. + */ + if (add_promoter_for_slot<&NPY_DType_Slots::real_meth>(real_ufunc) < 0) { + goto finish; + } + if (add_promoter_for_slot<&NPY_DType_Slots::imag_meth>(imag_ufunc) < 0) { + goto finish; + } + res = 0; + finish: + Py_XDECREF(real_ufunc); + Py_XDECREF(imag_ufunc); + + return res; +} diff --git a/numpy/_core/src/umath/real_imag_ufuncs.h b/numpy/_core/src/umath/real_imag_ufuncs.h new file mode 100644 index 000000000000..4e6d569a92cc --- /dev/null +++ b/numpy/_core/src/umath/real_imag_ufuncs.h @@ -0,0 +1,15 @@ +#ifndef _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ +#define _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +NPY_NO_EXPORT int +init_real_imag_ufuncs(PyObject *umath); + +#ifdef __cplusplus +} +#endif + +#endif /* _NPY_CORE_SRC_UMATH_REAL_IMAG_UFUNCS_H_ */ diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 06babeeda0a8..5e80a84c37de 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -106,7 +106,7 @@ get_min_max(int typenum, long long *min, unsigned long long *max) /* * Determine if a Python long is within the typenums range, smaller, or larger. - * + * * Function returns -1 for errors. */ static inline int @@ -344,7 +344,7 @@ add_dtype_loops(PyObject *umath, PyArrayMethod_Spec *spec, PyObject *info) goto fail; } - /* + /* * NOTE: Iterates all type numbers, it would be nice to reduce this. * (that would be easier if we consolidate int DTypes in general.) */ diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index eac1283b95ff..439a2ac308d9 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -31,6 +31,7 @@ #include "string_ufuncs.h" #include "stringdtype_ufuncs.h" #include "special_integer_comparisons.h" +#include "real_imag_ufuncs.h" #include "extobj.h" /* for _extobject_contextvar exposure */ #include "ufunc_type_resolution.h" @@ -274,6 +275,10 @@ int initumath(PyObject *m) return -1; } + if (init_real_imag_ufuncs(m) < 0) { + return -1; + } + if (init_stringdtype_ufuncs(m) < 0) { return -1; } diff --git a/numpy/_core/tests/test_arrayobject.py b/numpy/_core/tests/test_arrayobject.py index f4e268b377b3..0cfff63158dc 100644 --- a/numpy/_core/tests/test_arrayobject.py +++ b/numpy/_core/tests/test_arrayobject.py @@ -3,6 +3,7 @@ import pytest import numpy as np +from numpy._core._rational_tests import rational from numpy.testing import HAS_REFCOUNT, assert_array_equal @@ -93,3 +94,104 @@ def test_cleanup_with_refs_non_contig(): actual_ref_obj = sys.getrefcount(obj) assert actual_ref_dtype == expected_ref_dtype assert actual_ref_obj == actual_ref_dtype + + +@pytest.mark.parametrize("dtype", + list("?bhilqnpBHILQNPefdgSUV") + ["M8[ns]", "m8[ns]", rational]) +def test_real_imag_attributes_non_complex(dtype): + dtype = np.dtype(dtype) + + a = np.array([[1, 2, 3], [4, 5, 6]]).astype(dtype) + assert a.real is a + # One could imagine broadcasting, but doesn't right now: + imag = a.imag + assert imag.strides == a.strides + assert imag.dtype == a.dtype + # This part is rather unclear: + assert (imag == np.zeros((), dtype=a.dtype)).all() + assert imag.flags.writeable is False + + class myarr(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_with = obj + + ma = a.view(myarr) + assert ma.real is ma + assert type(ma.imag) is myarr + assert ma.imag.finalized_with is ma + + +@pytest.mark.parametrize("dtype,real_dt", + [(">c8", ">f4"), ("c16", "f8"), ("clongdouble", "longdouble")]) +@pytest.mark.parametrize("variation", ["transpose", "set_writeable"]) +def test_real_imag_attributes_complex(dtype, real_dt, variation): + a = np.array([[1, 2j, 3], [4, 5j, 6]]).astype(dtype) + real = np.array([[1, 0, 3], [4, 0, 6]], dtype=real_dt) + imag = np.array([[0, 2, 0], [0, 5, 0]], dtype=real_dt) + + if variation == "transpose": + a = a.T + real = real.T + imag = imag.T + elif variation == "set_writeable": + a.flags.writeable = False + + assert_array_equal(a.real, real) + assert_array_equal(a.imag, imag) + assert a.real.dtype == real_dt + assert a.imag.dtype == real_dt + assert np.may_share_memory(a.real, a) + assert np.may_share_memory(a.imag, a) + assert a.real.flags.writeable == a.flags.writeable + assert a.imag.flags.writeable == a.flags.writeable + + class myarr(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_with = obj + + ma = a.view(myarr) + assert ma.real.finalized_with is ma + assert ma.imag.finalized_with is ma + + +def test_real_imag_attributes_object(): + a = np.array([[1, 0.5 + 2j, 3, int], [4, 5j, "string", {}]], dtype=object) + + # NOTE(seberg): doing something for non-numbers is guesswork... + real = np.array([[1, 0.5, 3, int.real], [4, 0, "string", {}]], dtype=object) + imag = np.array([[0, 2, 0, int.imag], [0, 5, 0, 0]], dtype=object) + + assert_array_equal(a.real, real) + assert_array_equal(a.imag, imag) + assert a.real.dtype == object + assert a.imag.dtype == object + assert not np.may_share_memory(a.real, a) + assert not np.may_share_memory(a.imag, a) + assert not a.real.flags.writeable + assert not a.imag.flags.writeable + + # Object returns new arrays via ufuncs, so call wrap + class myarr(np.ndarray): + def __array_wrap__(self, *args, **kwargs): + ret = super().__array_wrap__(*args, **kwargs) + ret.wrap_called = True + return ret + + ma = a.view(myarr) + assert ma.real.wrap_called + assert ma.imag.wrap_called + + +@pytest.mark.parametrize("ufunc,attr", [ + (np._core.umath.real, "real"), (np._core.umath.imag, "imag")]) +def test_real_imag_ufunc_minimal(ufunc, attr): + with pytest.raises(TypeError): + ufunc(np.array([1, 2, 3])) # non-complex or object raises + + arr = np.array([1 + 2j, 3 + 4j]) + res = ufunc(arr) + assert_array_equal(res, getattr(arr, attr), strict=True) + + arr = np.array([1 + 2j, 3 + 4j], dtype=object) + res = ufunc(arr) + assert_array_equal(res, getattr(arr, attr), strict=True) diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index e3c942be0d99..61b5b7229eb7 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -248,21 +248,15 @@ def isreal(x): The function does not work on string arrays. >>> a = np.array([2j, "a"], dtype=np.str_) - >>> np.isreal(a) # Warns about non-elementwise comparison - False + >>> np.isreal(a) # returns the result of `"" == 0` currently. + array([False, False]) - Returns True for all elements in input array of ``dtype=np.object_`` even if - any of the elements is complex. + Returns True for all elements that either have no ``.imag`` attribute + or for which that attribute is zero: >>> a = np.array([1, "2", 3+4j], dtype=np.object_) >>> np.isreal(a) - array([ True, True, True]) - - isreal should not be used with object arrays - - >>> a = np.array([1+2j, 2+1j], dtype=np.object_) - >>> np.isreal(a) - array([ True, True]) + array([ True, True, False]) """ return imag(x) == 0 From 5de81a539d5affd7e5fecb2b1f53f88f23c7eab2 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sun, 29 Mar 2026 11:49:18 +0200 Subject: [PATCH 1544/1718] ENH: Improve performance of ufuncs using NPY_METH_NO_FLOATINGPOINT_ERRORS (#30987) --- .../reference/c-api/types-and-structures.rst | 2 +- numpy/_core/code_generators/generate_umath.py | 40 ++++++++++++++++++- numpy/_core/include/numpy/ufuncobject.h | 13 +++++- numpy/_core/src/umath/legacy_array_method.c | 12 ++++++ numpy/_core/src/umath/ufunc_object.c | 2 +- 5 files changed, 64 insertions(+), 5 deletions(-) diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index a039af130860..2cd423fdd30a 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -985,7 +985,7 @@ PyUFunc_Type and PyUFuncObject PyUFuncGenericFunction *functions; void **data; int ntypes; - int reserved1; + int _ufunc_flags; const char *name; char *types; const char *doc; diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index b0bcbe6d4ec7..b9d8cde3dcf8 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -203,9 +203,11 @@ class Ufunc: type_descriptions : TypeDescription objects signature: a generalized ufunc signature (like for matmul) indexed: add indexed loops (ufunc.at) for these type characters + no_float_errors: if True, the ufunc never raises floating point errors """ def __init__(self, nin, nout, identity, docstring, typereso, - *type_descriptions, signature=None, indexed=''): + *type_descriptions, signature=None, indexed='', + no_float_errors=False): self.nin = nin self.nout = nout if identity is None: @@ -216,6 +218,7 @@ def __init__(self, nin, nout, identity, docstring, typereso, self.type_descriptions = [] self.signature = signature self.indexed = indexed + self.no_float_errors = no_float_errors for td in type_descriptions: self.type_descriptions.extend(td) for td in self.type_descriptions: @@ -434,6 +437,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(P, f='conjugate'), + no_float_errors=True, ), 'fmod': Ufunc(2, 1, None, @@ -501,6 +505,7 @@ def english_upper(s): TD(cmplx, dispatch=[('loops_unary_complex', 'FD')], out=('f', 'd', 'g')), TD(O, f='PyNumber_Absolute'), + no_float_errors=True, ), '_arg': Ufunc(1, 1, None, @@ -515,6 +520,7 @@ def english_upper(s): TD(ints + flts + timedeltaonly, dispatch=[('loops_unary', ints + 'fdg')]), TD(cmplx, f='neg'), TD(O, f='PyNumber_Negative'), + no_float_errors=True, ), 'positive': Ufunc(1, 1, None, @@ -523,6 +529,7 @@ def english_upper(s): TD(ints + flts + timedeltaonly), TD(cmplx, f='pos'), TD(O, f='PyNumber_Positive'), + no_float_errors=True, ), 'sign': Ufunc(1, 1, None, @@ -531,6 +538,7 @@ def english_upper(s): TD(ints + flts, dispatch=[('loops_autovec', ints)]), TD(timedeltaonly, out='d'), TD(cmplx + O), + no_float_errors=True, ), 'greater': Ufunc(2, 1, None, @@ -542,6 +550,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'greater_equal': Ufunc(2, 1, None, @@ -553,6 +562,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'less': Ufunc(2, 1, None, @@ -564,6 +574,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'less_equal': Ufunc(2, 1, None, @@ -575,6 +586,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'equal': Ufunc(2, 1, None, @@ -586,6 +598,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'not_equal': Ufunc(2, 1, None, @@ -597,6 +610,7 @@ def english_upper(s): TD(inexact + times, out='?', dispatch=[('loops_comparison', bints + 'fd')]), TD('O', out='?'), [TypeDescription('O', FullTypeDescr, 'OO', 'O')], + no_float_errors=True, ), 'logical_and': Ufunc(2, 1, True_, @@ -607,6 +621,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalAnd'), + no_float_errors=True, ), 'logical_not': Ufunc(1, 1, None, @@ -617,6 +632,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalNot'), + no_float_errors=True, ), 'logical_or': Ufunc(2, 1, False_, @@ -627,6 +643,7 @@ def english_upper(s): ('loops_autovec', ints), ]), TD(O, f='npy_ObjectLogicalOr'), + no_float_errors=True, ), 'logical_xor': Ufunc(2, 1, False_, @@ -639,6 +656,7 @@ def english_upper(s): ]), # TODO: using obj.logical_xor() seems pretty much useless: TD(P, f='logical_xor'), + no_float_errors=True, ), 'maximum': Ufunc(2, 1, ReorderableNone, @@ -648,6 +666,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMax'), indexed=flts + ints, + no_float_errors=True, ), 'minimum': Ufunc(2, 1, ReorderableNone, @@ -658,6 +677,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', ints + 'fdg')]), TD(O, f='npy_ObjectMin'), indexed=flts + ints, + no_float_errors=True, ), 'clip': Ufunc(3, 1, ReorderableNone, @@ -674,6 +694,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', 'fdg')]), TD(O, f='npy_ObjectMax'), indexed=flts + ints, + no_float_errors=True, ), 'fmin': Ufunc(2, 1, ReorderableNone, @@ -684,6 +705,7 @@ def english_upper(s): TD(no_obj_bool, dispatch=[('loops_minmax', 'fdg')]), TD(O, f='npy_ObjectMin'), indexed=flts + ints, + no_float_errors=True, ), 'logaddexp': Ufunc(2, 1, MinusInfinity, @@ -705,6 +727,7 @@ def english_upper(s): dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_And'), + no_float_errors=True, ), 'bitwise_or': Ufunc(2, 1, Zero, @@ -713,6 +736,7 @@ def english_upper(s): TD('?', cfunc_alias='logical_or', dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Or'), + no_float_errors=True, ), 'bitwise_xor': Ufunc(2, 1, Zero, @@ -722,6 +746,7 @@ def english_upper(s): dispatch=[('loops_comparison', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Xor'), + no_float_errors=True, ), 'invert': Ufunc(1, 1, None, @@ -731,6 +756,7 @@ def english_upper(s): dispatch=[('loops_logical', '?')]), TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Invert'), + no_float_errors=True, ), 'left_shift': Ufunc(2, 1, None, @@ -738,6 +764,7 @@ def english_upper(s): None, TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Lshift'), + no_float_errors=True, ), 'right_shift': Ufunc(2, 1, None, @@ -745,6 +772,7 @@ def english_upper(s): None, TD(ints, dispatch=[('loops_autovec', ints)]), TD(O, f='PyNumber_Rshift'), + no_float_errors=True, ), 'heaviside': Ufunc(2, 1, None, @@ -978,6 +1006,7 @@ def english_upper(s): None, TD(flts, f='fabs', astype={'e': 'f'}), TD(P, f='fabs'), + no_float_errors=True, ), 'floor': Ufunc(1, 1, None, @@ -1040,6 +1069,7 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints), ]), + no_float_errors=True, ), 'isnat': Ufunc(1, 1, None, @@ -1055,6 +1085,7 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints + 'mM'), ]), + no_float_errors=True, ), 'isfinite': Ufunc(1, 1, None, @@ -1064,18 +1095,21 @@ def english_upper(s): ('loops_unary_fp_le', inexactvec), ('loops_autovec', bints), ]), + no_float_errors=True, ), 'signbit': Ufunc(1, 1, None, docstrings.get('numpy._core.umath.signbit'), None, TD(flts, out='?', dispatch=[('loops_unary_fp_le', inexactvec)]), + no_float_errors=True, ), 'copysign': Ufunc(2, 1, None, docstrings.get('numpy._core.umath.copysign'), None, TD(flts), + no_float_errors=True, ), 'nextafter': Ufunc(2, 1, None, @@ -1594,6 +1628,10 @@ def make_ufuncs(funcdict): funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) + if uf.no_float_errors: + mlist.append( + r"((PyUFuncObject *)f)->_ufunc_flags |=" + r" UFUNC_NO_FLOATINGPOINT_ERRORS;") mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) mlist.append(r"""Py_DECREF(f);""") code3list.append('\n'.join(mlist)) diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index cf7064753d0e..f821c6c29411 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -122,8 +122,8 @@ typedef struct _tagPyUFuncObject { /* The number of elements in 'functions' and 'data' */ int ntypes; - /* Used to be unused field 'check_return' */ - int reserved1; + /* Flags for the ufunc (e.g. UFUNC_NO_FLOATINGPOINT_ERRORS) */ + int _ufunc_flags; /* The name of the ufunc */ const char *name; @@ -249,6 +249,15 @@ typedef PyUFuncObject_fields PyUFuncObject; #define UFUNC_OBJ_ISOBJECT 1 #define UFUNC_OBJ_NEEDS_API 2 +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD +/* + * Flag stored in PyUFuncObject._ufunc_flags to indicate that non-object loops + * of this ufunc never raise floating point errors. Used to skip the + * expensive npy_clear_floatstatus/npy_get_floatstatus calls. + */ +#define UFUNC_NO_FLOATINGPOINT_ERRORS 0x1 +#endif /* NPY_INTERNAL_BUILD */ + #if NPY_ALLOW_THREADS #define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 7a85937fcc8f..ed02d31995e7 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -398,6 +398,18 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, } } + /* + * Set NPY_METH_NO_FLOATINGPOINT_ERRORS for non-object loops of ufuncs + * that are known to never raise floating point errors (e.g. comparisons, + * logical operations, abs, neg, copysign, etc.). + * The flag is set on the ufunc via _ufunc_flags during initialization in + * __umath_generated.c (driven by no_float_errors=True in generate_umath.py). + */ + if ((ufunc->_ufunc_flags & UFUNC_NO_FLOATINGPOINT_ERRORS) && + !(flags & NPY_METH_REQUIRES_PYAPI)) { + flags |= NPY_METH_NO_FLOATINGPOINT_ERRORS; + } + PyType_Slot slots[4] = { {NPY_METH_get_loop, &get_wrapped_legacy_ufunc_loop}, {NPY_METH_resolve_descriptors, &simple_legacy_resolve_descriptors}, diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 2ffdb55cf184..002c800d413f 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4891,7 +4891,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi ufunc->userloops = NULL; ufunc->ptr = NULL; ufunc->vectorcall = &ufunc_generic_vectorcall; - ufunc->reserved1 = 0; + ufunc->_ufunc_flags = 0; ufunc->iter_flags = 0; /* Type resolution and inner loop selection functions */ From feeaad6bfc632602bfdac1061cc606f963650187 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Sun, 29 Mar 2026 19:37:03 +0900 Subject: [PATCH 1545/1718] DEP: Deprecate 'generic' unit in np.timedelta64 (#29619) This PR deprecates the generic unit in np.timedelta64. Using this unit can lead to unexpected behavior in some cases. Using generic unit now raises a `DeprecationWarning`. This also affects some integer/float and timedelta mixing code paths (although that could be changed). Co-authored-by: Sebastian Berg --- .../upcoming_changes/29619.deprecation.rst | 17 + numpy/_core/src/multiarray/datetime.c | 19 + numpy/_core/src/multiarray/multiarraymodule.c | 13 +- numpy/_core/tests/test_array_coercion.py | 26 +- numpy/_core/tests/test_arrayprint.py | 2 +- numpy/_core/tests/test_casting_unittests.py | 1 - numpy/_core/tests/test_cython.py | 14 +- numpy/_core/tests/test_datetime.py | 366 +++++++++++------- numpy/_core/tests/test_deprecations.py | 31 ++ numpy/_core/tests/test_dtype.py | 10 +- numpy/_core/tests/test_multiarray.py | 4 + numpy/_core/tests/test_nditer.py | 5 +- numpy/_core/tests/test_numeric.py | 54 +-- numpy/_core/tests/test_regression.py | 3 + numpy/_core/tests/test_stringdtype.py | 4 +- numpy/_core/tests/test_ufunc.py | 21 +- numpy/lib/tests/test_arraysetops.py | 2 +- numpy/lib/tests/test_nanfunctions.py | 24 +- numpy/polynomial/tests/test_classes.py | 14 +- numpy/testing/_private/utils.py | 2 +- numpy/testing/tests/test_utils.py | 14 +- numpy/tests/test_ctypeslib.py | 9 +- numpy/typing/tests/test_typing.py | 1 + 23 files changed, 453 insertions(+), 203 deletions(-) create mode 100644 doc/release/upcoming_changes/29619.deprecation.rst diff --git a/doc/release/upcoming_changes/29619.deprecation.rst b/doc/release/upcoming_changes/29619.deprecation.rst new file mode 100644 index 000000000000..0c2e651f14b3 --- /dev/null +++ b/doc/release/upcoming_changes/29619.deprecation.rst @@ -0,0 +1,17 @@ +Deprecation of the ``generic`` unit in `numpy.timedelta64` +---------------------------------------------------------- + +Using the ``generic`` unit in `numpy.timedelta64` is now deprecated +since this can lead to unexpected behavior such as non-transitive comparisons. +(see `gh-28287 `__ for details). + +As an alternative, please specify an explicit unit such as ``'s'`` (seconds) +or ``'D'`` (days) when constructing `numpy.timedelta64`. + +Due to this change, operations that implicitly rely on the ``generic`` unit are also deprecated. +For example:: + + arr = np.array([1, 2, 3], dtype="m8[s]") + + # `1` is implicitly converted to generic timedelta64 + arr + 1 diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9489e2b92c6a..04a21c31b9b9 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2559,6 +2559,15 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, meta->base = NPY_FR_GENERIC; meta->num = 1; } + /* If output is NaT, skip this warning. */ + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "Using 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. Please use a " + "specific units instead.") < 0) { + return -1; + } + } return 0; } @@ -2575,6 +2584,16 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, if (error_converting(*out)) { return -1; } + + if (meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "Using 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "Please use a specific units instead.") < 0) { + return -1; + } + } + return 0; } /* Timedelta scalar */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index bf8104c5ac42..af24684b84b3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1979,9 +1979,16 @@ array_copyto(PyObject *NPY_UNUSED(ignored), Py_INCREF(DType); if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &DType)) { /* The user passed a Python scalar */ - PyArray_Descr *descr = npy_find_descr_for_scalar( - src_obj, PyArray_DESCR(src), DType, - NPY_DTYPE(PyArray_DESCR(dst))); + PyArray_Descr *descr; + PyArray_DTypeMeta *dst_DType = NPY_DTYPE(PyArray_DESCR(dst)); + bool is_npy_nan = PyFloat_Check(src_obj) && npy_isnan(PyFloat_AsDouble(src_obj)); + if (!is_npy_nan && dst_DType->type_num == NPY_TIMEDELTA) { + descr = PyArray_DESCR(dst); + Py_INCREF(descr); + } else { + descr = npy_find_descr_for_scalar(src_obj, PyArray_DESCR(src), DType, + dst_DType); + } Py_DECREF(DType); if (descr == NULL) { goto fail; diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 96bbb679d6c9..ec8069094d34 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -121,7 +121,7 @@ def scalar_instances(times=True, extended_precision=True, user_dtype=True): if times: # Datetimes and timedelta - yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(2, "ns"), id="timedelta64[ns]") yield param(np.timedelta64(23, "s"), id="timedelta64[s]") yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") @@ -421,12 +421,11 @@ def test_coercion_basic(self, dtype, scalar): assert_array_equal(ass, cast) @pytest.mark.parametrize("dtype", [np.int64, np.float32]) - @pytest.mark.parametrize("scalar", - [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), - param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) - def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + @pytest.mark.parametrize("value, unit", [param(123, "ns", id="timedelta64[ns]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, value, unit): # Only "ns" and "generic" timedeltas can be converted to numbers # so these are slightly special. + scalar = np.timedelta64(value, unit) arr = np.array(scalar, dtype=dtype) cast = np.array(scalar).astype(dtype) ass = np.ones((), dtype=dtype) @@ -435,6 +434,23 @@ def test_coercion_timedelta_convert_to_number(self, dtype, scalar): assert_array_equal(arr, cast) assert_array_equal(cast, cast) + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("value, unit", + [param(12, "generic", id="timedelta64[generic]")]) + def test_coercion_generic_timedelta_convert_to_number(self, dtype, value, unit): + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + scalar = np.timedelta64(value, unit) + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + @pytest.mark.parametrize("dtype", ["S6", "U6"]) @pytest.mark.parametrize(["val", "unit"], [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index e6cbb6f72229..3d31347a806b 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -1244,7 +1244,7 @@ def test_scalar_repr_numbers(dtype, value): (np.str_('a'), "'a'", "np.str_('a')"), (np.datetime64("2012"), "numpy.datetime64('2012')", "np.datetime64('2012')"), - (np.timedelta64(1), "numpy.timedelta64(1)", "np.timedelta64(1)"), + (np.timedelta64(1, 's'), "numpy.timedelta64(1,'s')", "np.timedelta64(1,'s')"), (np.void((True, 2), dtype="?, None: def test_result_type_integers_and_unitless_timedelta64(): # Regression test for gh-20077. The following call of `result_type` # would cause a seg. fault. - td = np.timedelta64(4) - result = np.result_type(0, td) - assert_dtype_equal(result, td.dtype) + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) def test_creating_dtype_with_dtype_class_errors(): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 5f9fe44a7011..ee6863308724 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2833,6 +2833,8 @@ def test_searchsorted_type_specific(self): for dt in types: if dt == 'M': dt = 'M8[D]' + if dt == 'm': + dt = 'm8[s]' if dt == '?': a = np.arange(2, dtype=dt) out = np.arange(2) @@ -2933,6 +2935,8 @@ def test_searchsorted_with_sorter(self): for dt in types: if dt == 'M': dt = 'M8[D]' + if dt == 'm': + dt = 'm8[s]' if dt == '?': a = np.array([1, 0], dtype=dt) # We want the sorter array to be of a type that is different diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 520e638b2edb..90bcd4deb011 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1529,7 +1529,10 @@ def test_iter_copy(): @pytest.mark.parametrize("dtype", np.typecodes["All"]) @pytest.mark.parametrize("loop_dtype", np.typecodes["All"]) -@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") +@pytest.mark.filterwarnings( + "ignore::numpy.exceptions.ComplexWarning", + "ignore::DeprecationWarning", +) def test_iter_copy_casts(dtype, loop_dtype): # Ensure the dtype is never flexible: if loop_dtype.lower() == "m": diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index d32052bda176..8e3465dbb87d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1211,10 +1211,10 @@ def test_promote_types_endian(self): assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) - assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) - assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + assert_equal(np.promote_types('M8[D]', '>M8[D]'), np.dtype('M8[D]')) + assert_equal(np.promote_types('m8[s]', '>m8[s]'), np.dtype('m8[s]')) def test_can_cast_and_promote_usertypes(self): # The rational type defines safe casting for signed integers, @@ -1772,14 +1772,8 @@ def assert_equal_w_dt(a, b, err_msg): err_msg = msg % (np.dtype(dt).name,) if dt != 'V': - if dt != 'M': - m = np.zeros((3, 3), dtype=dt) - n = np.ones(1, dtype=dt) - - m[0, 0] = n[0] - m[1, 0] = n[0] - - else: # np.zeros doesn't work for np.datetime64 + if dt == 'M': + # np.zeros doesn't work for np.datetime64 m = np.array(['1970-01-01'] * 9) m = m.reshape((3, 3)) @@ -1787,6 +1781,24 @@ def assert_equal_w_dt(a, b, err_msg): m[1, 0] = '1970-01-12' m = m.astype(dt) + elif dt == 'm': + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + expected = np.array([2, 0, 0], dtype=np.intp) assert_equal_w_dt(np.count_nonzero(m, axis=0), expected, err_msg=err_msg) @@ -2160,8 +2172,8 @@ def _test_array_equal_parametrizations(): yield (b4, b4.copy(), False, False) yield (b4, b4.copy(), True, True) - t1 = b1.astype("timedelta64") - t2 = b2.astype("timedelta64") + t1 = b1.astype("timedelta64[D]") + t2 = b2.astype("timedelta64[D]") # Timedeltas are particular yield (t1, t1, None, False) @@ -2815,10 +2827,10 @@ def test_clip_value_min_max_flip(self, amin, amax): np.full(10, -2**64 + 1, dtype=object)), # for bugs in NPY_TIMEDELTA_MAX, based on a case # produced by hypothesis - (np.zeros(10, dtype='m8') - 1, - 0, - 0, - np.zeros(10, dtype='m8')), + (np.zeros(10, dtype='m8[s]') - np.timedelta64(1, 's'), + np.timedelta64(0, 's'), + np.timedelta64(0, 's'), + np.zeros(10, dtype='m8[s]')), ]) def test_clip_problem_cases(self, arr, amin, amax, exp): actual = np.clip(arr, amin, amax) @@ -2838,8 +2850,8 @@ def test_clip_scalar_nan_propagation(self, arr, amin, amax): assert_equal(actual, expected) @pytest.mark.parametrize("arr, amin, amax", [ - (np.array([1] * 10, dtype='m8'), - np.timedelta64('NaT'), + (np.array([1] * 10, dtype='m8[s]'), + np.timedelta64('NaT', 's'), np.zeros(10, dtype=np.int32)), ]) def test_NaT_propagation(self, arr, amin, amax): @@ -3207,9 +3219,7 @@ def test_timedelta(self): # Allclose currently works for timedelta64 as long as `atol` is # an integer or also a timedelta64 a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") - assert np.isclose(a, a, atol=0, equal_nan=True).all() assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() - assert np.allclose(a, a, atol=0, equal_nan=True) assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) def test_tol_warnings(self): diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 5da357f32f7a..d04735f88234 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1,4 +1,5 @@ import copy +import datetime import gc import os import pickle @@ -2334,6 +2335,7 @@ def test_scalar_copy(self): np.bytes_: b"a", np.str_: "a", np.datetime64: "2017-08-25", + np.timedelta64: datetime.timedelta(days=1) } for sctype in scalar_types: item = sctype(values.get(sctype, 1)) @@ -2359,6 +2361,7 @@ def test_dtype_scalar_squeeze(self): values = { 'S': b"a", 'M': "2018-06-20", + 'm': datetime.timedelta(days=3), } for ch in np.typecodes['All']: if ch in 'O': diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 281aaf41893e..8f2b654844bc 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1152,7 +1152,7 @@ def test_center_promoter(): np.timedelta64(12358, "s"), np.timedelta64(23, "s"), np.timedelta64(74, "s"), - np.timedelta64("NaT"), + np.timedelta64("NaT", "s"), np.timedelta64(23, "s"), np.timedelta64(73, "s"), np.timedelta64(7, "s"), @@ -1204,7 +1204,7 @@ def test_nat_casts(): all_nats = itertools.product(*zip(s.upper(), s.lower())) all_nats = list(map(''.join, all_nats)) NaT_dt = np.datetime64('NaT') - NaT_td = np.timedelta64('NaT') + NaT_td = np.timedelta64('NaT', 's') for na_object in [np._NoValue, None, np.nan, 'nat', '']: # numpy treats empty string and all case combinations of 'nat' as NaT dtype = StringDType(na_object=na_object) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index ca9789a5a74f..ef2f19162372 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2771,21 +2771,27 @@ def test_invalid_args(self): # minimally check the exception text assert exc.match('loop of ufunc does not support') - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_not_finite(self, nat): try: assert not np.isfinite(nat) except TypeError: pass # ok, just not implemented - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_nan(self, nat): try: assert np.isnan(nat) except TypeError: pass # ok, just not implemented - @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + @pytest.mark.parametrize( + "nat", [np.datetime64("nat", "s"), np.timedelta64("nat", "ns")] + ) def test_nat_is_not_inf(self, nat): try: assert not np.isinf(nat) @@ -2851,7 +2857,14 @@ def test_ufunc_types(ufunc): if 'O' in typ or '?' in typ: continue inp, out = typ.split('->') - args = [np.ones((3, 3), t) for t in inp] + if 'm' in inp: + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + args = [np.ones((3, 3), t) for t in inp] + else: + args = [np.ones((3, 3), t) for t in inp] with warnings.catch_warnings(record=True): warnings.filterwarnings("always") res = ufunc(*args) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 9faf670be96d..ef541cf4892b 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -797,7 +797,7 @@ def test_unique_1d(self): assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - timedelta - nat = np.timedelta64('nat') + nat = np.timedelta64('nat', 's') a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] ua_idx = [2, 0, 1] diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 6ef86bf84ee0..9ac1b22a6ab8 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -945,27 +945,37 @@ def test_result_values(self): @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", _TYPE_CODES) def test_allnans(self, dtype, axis): - mat = np.full((3, 3), np.nan).astype(dtype) - with pytest.warns(RuntimeWarning) as r: + mat = np.full((3, 3), np.nan, dtype=dtype) + with pytest.warns((RuntimeWarning, DeprecationWarning)) as r: output = np.nanmedian(mat, axis=axis) assert output.dtype == mat.dtype assert np.isnan(output).all() + _filtered_record = [ + item.message + for item in r + if "All-NaN slice encountered" in str(item.message) + ] if axis is None: - assert_(len(r) == 1) + assert_(len(_filtered_record) == 1) else: - assert_(len(r) == 3) + assert_(len(_filtered_record) == 3) # Check scalar - scalar = np.array(np.nan).astype(dtype)[()] + scalar = np.full((1, 1), np.nan, dtype=dtype)[0, 0] output_scalar = np.nanmedian(scalar) assert output_scalar.dtype == scalar.dtype assert np.isnan(output_scalar) + _filtered_record = [ + item.message + for item in r + if "All-NaN slice encountered" in str(item.message) + ] if axis is None: - assert_(len(r) == 2) + assert_(len(_filtered_record) == 2) else: - assert_(len(r) == 4) + assert_(len(_filtered_record) == 4) def test_empty(self): mat = np.zeros((0, 3)) diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 156dccf6ea88..43dd788fdc4e 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -315,7 +315,11 @@ def test_truediv(Poly): p2 = p1 * 5 for stype in np.ScalarType: - if not issubclass(stype, Number) or issubclass(stype, bool): + if ( + not issubclass(stype, Number) + or issubclass(stype, bool) + or issubclass(stype, np.timedelta64) + ): continue s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) @@ -328,6 +332,14 @@ def test_truediv(Poly): s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) + for stype in [np.timedelta64]: + s = stype(5, 'D') + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) for s in [(), [], {}, False, np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 81e990bbca3b..1833f5b57cb4 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -955,7 +955,7 @@ def assert_same_inf_values(x, y, infs_mask): # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) # Filter values where the divisor would be zero - nonzero = np.bool(y != 0) + nonzero = np.bool(y != np.zeros_like(y)) nonzero_and_invalid = np.logical_and(invalids, nonzero) if all(~nonzero_and_invalid): diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 6d43343ef98a..906f6ea3b0d8 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -432,7 +432,11 @@ def test_nat_items(self): nadt_s = np.datetime64("NaT", "s") nadt_d = np.datetime64("NaT", "ns") # not a timedelta - natd_no_unit = np.timedelta64("NaT") + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + natd_no_unit = np.timedelta64("NaT") natd_s = np.timedelta64("NaT", "s") natd_d = np.timedelta64("NaT", "ns") @@ -1337,8 +1341,12 @@ def test_report_max_relative_error(self): def test_timedelta(self): # see gh-18286 - a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") - assert_allclose(a, a) + with pytest.warns( + DeprecationWarning, + match="Using 'generic' unit for NumPy timedelta is deprecated", + ): + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) def test_error_message_unsigned(self): """Check the message is formatted correctly when overflow can occur diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 659c3d639e97..31e2268d0957 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -385,16 +385,17 @@ def test_overlapping(self): def test_cannot_convert_to_ctypes(self): _type_to_value = { - np.str_: "aa", - np.bool: True, - np.datetime64: "2026-01-01", + np.str_: ("aa",), + np.bool: (True,), + np.datetime64: ("2026-01-01",), + np.timedelta64: (1, "s") } for _scalar_type in np.sctypeDict.values(): if _scalar_type == np.object_: continue if _scalar_type in _type_to_value: - numpy_scalar = _scalar_type(_type_to_value[_scalar_type]) + numpy_scalar = _scalar_type(*_type_to_value[_scalar_type]) else: numpy_scalar = _scalar_type(1) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index dbe16a37ada4..1c37bb8a1401 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -187,6 +187,7 @@ def test_reveal(path: str) -> None: pytest.fail(reasons, pytrace=False) +@pytest.mark.filterwarnings("ignore::DeprecationWarning") @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.filterwarnings("ignore:numpy.fix is deprecated:DeprecationWarning") From 3af177c5779ecf58b0d24137db68725d9ab35bcc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 29 Mar 2026 09:01:01 -0600 Subject: [PATCH 1546/1718] MAINT: Update main after 2.4.4 release. - Forward port 2.4.4-notes.rst - Forward port 2.4.4-changelog.rst - Update release.rst [skip azp] [skip cirrus] [skip actions] --- doc/changelog/2.4.4-changelog.rst | 29 +++++++++++++++++++++ doc/source/release.rst | 1 + doc/source/release/2.4.4-notes.rst | 41 ++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 doc/changelog/2.4.4-changelog.rst create mode 100644 doc/source/release/2.4.4-notes.rst diff --git a/doc/changelog/2.4.4-changelog.rst b/doc/changelog/2.4.4-changelog.rst new file mode 100644 index 000000000000..110c9e445f8a --- /dev/null +++ b/doc/changelog/2.4.4-changelog.rst @@ -0,0 +1,29 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Haag + +* Denis Prokopenko + +* Harshith J + +* Koki Watanabe +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#30978 `__: MAINT: Prepare 2.4.x for further development +* `#31049 `__: BUG: Add test to reproduce problem described in #30816 (#30818) +* `#31052 `__: BUG: fix FNV-1a 64-bit selection by using NPY_SIZEOF_UINTP (#31035) +* `#31053 `__: BUG: avoid warning on ufunc with where=True and no output +* `#31058 `__: DOC: document caveats of ndarray.resize on 3.14 and newer +* `#31079 `__: TST: fix POWER VSX feature mapping (#30801) +* `#31084 `__: MAINT: numpy.i: Replace deprecated ``sprintf`` with ``snprintf``... + diff --git a/doc/source/release.rst b/doc/source/release.rst index 7ce0933d73e9..5736b4bd7ffc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -6,6 +6,7 @@ Release notes :maxdepth: 2 2.5.0 + 2.4.4 2.4.3 2.4.2 2.4.1 diff --git a/doc/source/release/2.4.4-notes.rst b/doc/source/release/2.4.4-notes.rst new file mode 100644 index 000000000000..568d2a526127 --- /dev/null +++ b/doc/source/release/2.4.4-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.4.4 Release Notes +========================= + +The NumPy 2.4.4 is a patch release that fixes bugs discovered after the 2.4.3 +release. It should finally close issue #30816, the OpenBLAS threading problem +on ARM. + +This release supports Python versions 3.11-3.14 + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Daniel Haag + +* Denis Prokopenko + +* Harshith J + +* Koki Watanabe +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum + + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#30978 `__: MAINT: Prepare 2.4.x for further development +* `#31049 `__: BUG: Add test to reproduce problem described in #30816 (#30818) +* `#31052 `__: BUG: fix FNV-1a 64-bit selection by using NPY_SIZEOF_UINTP (#31035) +* `#31053 `__: BUG: avoid warning on ufunc with where=True and no output +* `#31058 `__: DOC: document caveats of ndarray.resize on 3.14 and newer +* `#31079 `__: TST: fix POWER VSX feature mapping (#30801) +* `#31084 `__: MAINT: numpy.i: Replace deprecated ``sprintf`` with ``snprintf``... From d56649bb2bef8c5035d69de45d32277a08d3b81b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 29 Mar 2026 18:51:55 +0200 Subject: [PATCH 1547/1718] TYP: deprecate unitless ``timedelta64`` construction --- numpy/__init__.pyi | 52 +++++++++++++++++---- numpy/typing/tests/data/fail/type_check.pyi | 3 +- numpy/typing/tests/data/pass/comparisons.py | 4 +- numpy/typing/tests/data/pass/scalars.py | 16 +++---- 4 files changed, 55 insertions(+), 20 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 64956883597b..89c848139c11 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5395,26 +5395,60 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def nbytes(self) -> L[8]: ... @overload - def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... - @overload + def __new__(cls, value: timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _TD64ItemT_co, /) -> Self: ... + @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) def __new__(cls, /) -> timedelta64[L[0]]: ... @overload def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64[None]: ... @overload - def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: L[0], /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit], /) -> timedelta64[L[0]]: ... + @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _IntLike_co, /) -> timedelta64[int]: ... @overload - def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit], /) -> timedelta64[int]: ... @overload def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... @overload + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: dt.timedelta | _IntLike_co, /) -> timedelta64[dt.timedelta]: ... + @overload def __new__( cls, value: dt.timedelta | _IntLike_co, - format: _TimeUnitSpec[_NativeTD64Unit] = ..., + format: _TimeUnitSpec[_NativeTD64Unit], /, ) -> timedelta64[dt.timedelta]: ... @overload - def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit] = ..., /) -> timedelta64: ... + @deprecated( + "Using 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. " + "Please use a specific units instead." + ) + def __new__(cls, value: _ConvertibleToTD64, /) -> timedelta64: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec[_TD64Unit], /) -> timedelta64: ... # inherited at runtime from `signedinteger` def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... @@ -5613,21 +5647,21 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _SupportsGT, /) -> bool_: ... + def __le__(self, other: _SupportsGE, /) -> bool_: ... @overload def __gt__(self, other: _TD64Like_co, /) -> bool_: ... @overload def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... @overload - def __gt__(self, other: _SupportsGT, /) -> bool_: ... + def __gt__(self, other: _SupportsLT, /) -> bool_: ... @overload def __ge__(self, other: _TD64Like_co, /) -> bool_: ... @overload def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _SupportsGT, /) -> bool_: ... + def __ge__(self, other: _SupportsLE, /) -> bool_: ... class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property diff --git a/numpy/typing/tests/data/fail/type_check.pyi b/numpy/typing/tests/data/fail/type_check.pyi index 8b68e996304c..d3cb8d2c0655 100644 --- a/numpy/typing/tests/data/fail/type_check.pyi +++ b/numpy/typing/tests/data/fail/type_check.pyi @@ -9,4 +9,5 @@ np.isrealobj(DTYPE_i8) # type: ignore[arg-type] np.typename(DTYPE_i8) # type: ignore[call-overload] np.typename("invalid") # type: ignore[call-overload] -np.common_type(np.timedelta64()) # type: ignore[arg-type] +_td64: np.timedelta64 +np.common_type(_td64) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index b2e52762c7a8..0782eb392f4d 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -34,8 +34,8 @@ AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) -AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1", "s")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1", "s")]) AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) # Arrays diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 133c5627e6e5..dff5bc005974 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -105,16 +105,16 @@ def __float__(self) -> float: np.datetime64(None) np.datetime64(None, "D") -np.timedelta64() -np.timedelta64(0) +np.timedelta64() # type: ignore[deprecated] +np.timedelta64(0) # type: ignore[deprecated] np.timedelta64(0, "D") np.timedelta64(0, ('ms', 3)) np.timedelta64(0, b"D") -np.timedelta64("3") -np.timedelta64(b"5") -np.timedelta64(np.timedelta64(2)) -np.timedelta64(dt.timedelta(2)) -np.timedelta64(None) +np.timedelta64("3") # type: ignore[deprecated] +np.timedelta64(b"5") # type: ignore[deprecated] +np.timedelta64(np.timedelta64(2)) # type: ignore[deprecated] +np.timedelta64(dt.timedelta(2)) # type: ignore[deprecated] +np.timedelta64(None) # type: ignore[deprecated] np.timedelta64(None, "D") np.void(1) @@ -132,7 +132,7 @@ def __float__(self) -> float: f8 = np.float64() c16 = np.complex128() b = np.bool() -td = np.timedelta64() +td = np.timedelta64(0, "ns") U = np.str_("1") S = np.bytes_("1") AR = np.array(1, dtype=np.float64) From 86378b1a926e28498e77b7ff1711d14eae6755b4 Mon Sep 17 00:00:00 2001 From: aaronzuo Date: Mon, 30 Mar 2026 21:56:50 +0800 Subject: [PATCH 1548/1718] fix dir structure Signed-off-by: aaronzuo --- numpy/f2py/{tests_pure => tests}/test_capi_maps.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename numpy/f2py/{tests_pure => tests}/test_capi_maps.py (100%) diff --git a/numpy/f2py/tests_pure/test_capi_maps.py b/numpy/f2py/tests/test_capi_maps.py similarity index 100% rename from numpy/f2py/tests_pure/test_capi_maps.py rename to numpy/f2py/tests/test_capi_maps.py From 57a247674104858e3322cafe277bc5e5ffe84d94 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 30 Mar 2026 12:54:15 -0400 Subject: [PATCH 1549/1718] MAINT: f2py: Stop setting re._MAXCACHE to 50. The value has been set to 50 for more than 20 years. Computers generally have much more memory now than they did 20 years ago, so we should be able to let `re` use the default. Closes gh-31104. --- numpy/f2py/crackfortran.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 5a43a5d27f1c..1f89aba8a841 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -243,7 +243,6 @@ def outmess(line, flag=1): sys.stdout.write(line) -re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c] = {'typespec': 'real'} From 38fb93db6e83689033b0770c067e5480b315f830 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Mar 2026 19:22:07 +0000 Subject: [PATCH 1550/1718] MAINT: Bump pytest-cov from 7.0.0 to 7.1.0 in /requirements Bumps [pytest-cov](https://github.com/pytest-dev/pytest-cov) from 7.0.0 to 7.1.0. - [Changelog](https://github.com/pytest-dev/pytest-cov/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-cov/compare/v7.0.0...v7.1.0) --- updated-dependencies: - dependency-name: pytest-cov dependency-version: 7.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index f704fab81ede..8ffbe62780a5 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,7 +1,7 @@ Cython hypothesis==6.151.9 pytest==9.0.2 -pytest-cov==7.0.0 +pytest-cov==7.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist From 24dc6539dcad490540b7ee858506fdd8e44af1b1 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 31 Mar 2026 15:23:08 -0600 Subject: [PATCH 1551/1718] BUG: fix heap buffer overflow in timedelta to string casts --- numpy/_core/src/multiarray/datetime.c | 3 +++ numpy/_core/tests/test_datetime.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 04a21c31b9b9..715fdfbdd756 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -4090,6 +4090,9 @@ time_to_string_resolve_descriptors( if (loop_descrs[1] == NULL) { return -1; } + if (given_descrs[1] != NULL) { + size = (size < given_descrs[1]->elsize) ? size : given_descrs[1]->elsize; + } loop_descrs[1]->elsize = size; } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index ff2c964b8d7c..469cf1823a86 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2696,6 +2696,10 @@ def test_limit_str_roundtrip(self, time_unit, sign): limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit + def test_cast_to_truncated_string_doesnt_overflow(self): + a = np.array([1, -2, 1], dtype='timedelta64[D]') + assert_array_equal(a.astype('U1'), ['1', '-', '1']) + def test_datetime_hash_nat(self): nat1 = np.datetime64() nat2 = np.datetime64() From a2850ae6c3da64643103523df310d57ee73448e9 Mon Sep 17 00:00:00 2001 From: nakul-krishnakumar Date: Thu, 2 Apr 2026 01:58:27 +0530 Subject: [PATCH 1552/1718] MAINT: Enable linting with ruff E501 --- numpy/lib/tests/test_format.py | 21 +++++++++-- numpy/linalg/tests/test_linalg.py | 61 ++++++++++++++++++++++++------- ruff.toml | 2 - 3 files changed, 65 insertions(+), 19 deletions(-) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index d9e70c118792..98d1d440b66a 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -72,8 +72,20 @@ >>> >>> >>> NbufferT = [ - ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ([3,2], ( + ... 6j, + ... 6., + ... ('nn', [6j,4j], [6.,4.], [1,2]), + ... 'NN', + ... True + ... ), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], ( + ... 7j, + ... 7., + ... ('oo', [7j,5j], [7.,5.], [2,1]), + ... 'OO', + ... False + ... ), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), ... ] >>> >>> @@ -110,7 +122,8 @@ >>> for arr in basic_arrays + record_arrays: ... f = BytesIO() - ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... # XXX: arr is not a dict, items gets called on it + ... format.write_array_header_1_0(f, arr) ... print(repr(f.getvalue())) ... "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" @@ -273,7 +286,7 @@ "\x16\x02{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" -''' +''' # noqa: E501 import os import sys import warnings diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cd93acaf79c0..a5d173a6dd03 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -157,7 +157,10 @@ def apply_tag(tag, cases): array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_2", array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + array( + [[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], + dtype=cdouble + )), LinalgCase("0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), @@ -189,28 +192,42 @@ def apply_tag(tag, cases): array([2., 1., 3.], dtype=double)), LinalgCase("csingle_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=csingle + ), array([2. + 1j, 1. + 2j], dtype=csingle)), LinalgCase("csingle_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=csingle + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), LinalgCase("cdouble_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=cdouble + ), array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=cdouble + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), LinalgCase("cdouble_nsq_1_2", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=cdouble + ), array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), LinalgCase("cdouble_nsq_2_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=cdouble + ), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], + dtype=cdouble + )), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), @@ -633,7 +650,9 @@ def do(self, a, b, tags): res = linalg.eig(a) eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors assert_allclose(matmul(a, eigenvectors), - np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + np.asarray(eigenvectors) * np.asarray(eigenvalues)[ + ..., None, : + ], rtol=get_rtol(eigenvalues.dtype)) assert_(consistent_subclass(eigenvectors, a)) @@ -735,8 +754,14 @@ def hermitian(mat): axes[-1], axes[-2] = axes[-2], axes[-1] return np.conj(np.transpose(mat, axes=axes)) - assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) - assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_almost_equal( + np.matmul(u, hermitian(u)), + np.broadcast_to(np.eye(u.shape[-1]), u.shape) + ) + assert_almost_equal( + np.matmul(vt, hermitian(vt)), + np.broadcast_to(np.eye(vt.shape[-1]), vt.shape) + ) assert_equal(np.sort(s)[..., ::-1], s) assert_(consistent_subclass(u, a)) assert_(consistent_subclass(vt, a)) @@ -881,7 +906,12 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_almost_equal( + dot(dot(a, a_ginv), a), + a, + single_decimal=5, + double_decimal=11 + ) assert_(consistent_subclass(a_ginv, a)) @@ -895,7 +925,12 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a, hermitian=True) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_almost_equal( + dot(dot(a, a_ginv), a), + a, + single_decimal=5, + double_decimal=11 + ) assert_(consistent_subclass(a_ginv, a)) diff --git a/ruff.toml b/ruff.toml index f204c77545c0..6803ca5dd4a5 100644 --- a/ruff.toml +++ b/ruff.toml @@ -84,8 +84,6 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] -"numpy/lib/tests/test_format.py" = ["E501"] -"numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream From 901a3dc333167bb4e874787676204288eb97afc9 Mon Sep 17 00:00:00 2001 From: nakul-krishnakumar Date: Thu, 2 Apr 2026 02:17:25 +0530 Subject: [PATCH 1553/1718] MAINT: Enable linting with ruff E501 --- numpy/linalg/tests/test_linalg.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index a5d173a6dd03..3a2ee2b85ad5 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -160,7 +160,7 @@ def apply_tag(tag, cases): array( [[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble - )), + )), LinalgCase("0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), @@ -194,25 +194,25 @@ def apply_tag(tag, cases): array( [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle - ), + ), array([2. + 1j, 1. + 2j], dtype=csingle)), LinalgCase("csingle_nsq_2", array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle - ), + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), LinalgCase("cdouble_nsq_1", array( [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble - ), + ), array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_nsq_2", array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble - ), + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), LinalgCase("cdouble_nsq_1_2", array( @@ -224,10 +224,11 @@ def apply_tag(tag, cases): array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble - ), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], - dtype=cdouble - )), + ), + array( + [[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], + dtype=cdouble + )), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), From c34626dc9403ed5594bf7e98411d696ab025106a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Apr 2026 22:47:46 +0000 Subject: [PATCH 1554/1718] MAINT: Bump pyrefly from 0.57.1 to 0.58.0 in /requirements Bumps [pyrefly](https://github.com/facebook/pyrefly) from 0.57.1 to 0.58.0. - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.57.1...0.58.0) --- updated-dependencies: - dependency-name: pyrefly dependency-version: 0.58.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 713bebb044db..c82664af8668 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.19.1 -pyrefly==0.57.1 +pyrefly==0.58.0 From bb16d6c195dcaaacf6cc943aa6212cd42ba885e9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 29 Mar 2026 15:04:49 -0600 Subject: [PATCH 1555/1718] MAINT: Gitignore personal AI config files. This only covers cursor and claude, copilot has other options. Grok (xAI) generated the list of files to ignore. [skip azp] [skip cirrus] [skip actions] --- .gitignore | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/.gitignore b/.gitignore index b54de4091bf3..de556eb17de3 100644 --- a/.gitignore +++ b/.gitignore @@ -109,6 +109,73 @@ doc/source/**/generated/ doc/source/release/notes-towncrier.rst doc/source/.jupyterlite.doit.db +# AI coding agent configuration files # +####################################### +# Personal/local config — not shared with the project. +# Remove entries below if the project decides to check in shared +# project-wide instructions (e.g. a team CLAUDE.md or AGENTS.md). + +# Claude Code (Anthropic) +CLAUDE.md +CLAUDE.local.md +.claude/ +.claudeignore + +# AGENTS.md (cross-tool standard: Codex, Cursor, Cline, Windsurf, etc.) +AGENTS.md +AGENTS.override.md + +# OpenAI Codex +.codex/ + +# Cursor +.cursorrules +.cursorignore +.cursor/ + +# Windsurf / Codeium +.windsurfrules +.windsurf/ +.codeiumignore + +# Warp +WARP.md + +# Google Gemini CLI +GEMINI.md +.gemini/ +.geminiignore + +# Google Jules +JULES.md + +# JetBrains Junie +.junie/ + +# Cline +.clinerules +.clinerules/ + +# Roo Code +.roorules +.roo/ + +# KiloCode +.kilocoderules +.kilocode/ + +# Aider +.aider* +.aider.conf.yml + +# Continue.dev +.continue/ + +# Generic / cross-tool AI ignore files +.aiignore +.aiexclude +.uignore + # Things specific to this project # ################################### benchmarks/results From e08c138ca7eda0aa5a85eab0a1e115e27c8ae15d Mon Sep 17 00:00:00 2001 From: nakul-krishnakumar Date: Thu, 2 Apr 2026 11:23:42 +0530 Subject: [PATCH 1556/1718] Revert "MAINT: Enable linting with ruff E501" This reverts commit 901a3dc333167bb4e874787676204288eb97afc9. --- numpy/linalg/tests/test_linalg.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 3a2ee2b85ad5..a5d173a6dd03 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -160,7 +160,7 @@ def apply_tag(tag, cases): array( [[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble - )), + )), LinalgCase("0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), @@ -194,25 +194,25 @@ def apply_tag(tag, cases): array( [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle - ), + ), array([2. + 1j, 1. + 2j], dtype=csingle)), LinalgCase("csingle_nsq_2", array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle - ), + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), LinalgCase("cdouble_nsq_1", array( [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble - ), + ), array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_nsq_2", array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble - ), + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), LinalgCase("cdouble_nsq_1_2", array( @@ -224,11 +224,10 @@ def apply_tag(tag, cases): array( [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble - ), - array( - [[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], - dtype=cdouble - )), + ), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], + dtype=cdouble + )), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), From f4643677a29deadec963906f00982432d92663bf Mon Sep 17 00:00:00 2001 From: nakul-krishnakumar Date: Thu, 2 Apr 2026 11:35:46 +0530 Subject: [PATCH 1557/1718] revert: MAINT: Enable ruff linting in test_linalg.py --- numpy/linalg/tests/test_linalg.py | 61 +++++++------------------------ ruff.toml | 1 + 2 files changed, 14 insertions(+), 48 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index a5d173a6dd03..cd93acaf79c0 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -157,10 +157,7 @@ def apply_tag(tag, cases): array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_2", array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), - array( - [[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], - dtype=cdouble - )), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), LinalgCase("0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), @@ -192,42 +189,28 @@ def apply_tag(tag, cases): array([2., 1., 3.], dtype=double)), LinalgCase("csingle_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], - dtype=csingle - ), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), array([2. + 1j, 1. + 2j], dtype=csingle)), LinalgCase("csingle_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], - dtype=csingle - ), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), LinalgCase("cdouble_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], - dtype=cdouble - ), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], - dtype=cdouble - ), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), LinalgCase("cdouble_nsq_1_2", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], - dtype=cdouble - ), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), LinalgCase("cdouble_nsq_2_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], - dtype=cdouble - ), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], - dtype=cdouble - )), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), @@ -650,9 +633,7 @@ def do(self, a, b, tags): res = linalg.eig(a) eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors assert_allclose(matmul(a, eigenvectors), - np.asarray(eigenvectors) * np.asarray(eigenvalues)[ - ..., None, : - ], + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], rtol=get_rtol(eigenvalues.dtype)) assert_(consistent_subclass(eigenvectors, a)) @@ -754,14 +735,8 @@ def hermitian(mat): axes[-1], axes[-2] = axes[-2], axes[-1] return np.conj(np.transpose(mat, axes=axes)) - assert_almost_equal( - np.matmul(u, hermitian(u)), - np.broadcast_to(np.eye(u.shape[-1]), u.shape) - ) - assert_almost_equal( - np.matmul(vt, hermitian(vt)), - np.broadcast_to(np.eye(vt.shape[-1]), vt.shape) - ) + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) assert_equal(np.sort(s)[..., ::-1], s) assert_(consistent_subclass(u, a)) assert_(consistent_subclass(vt, a)) @@ -906,12 +881,7 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal( - dot(dot(a, a_ginv), a), - a, - single_decimal=5, - double_decimal=11 - ) + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) assert_(consistent_subclass(a_ginv, a)) @@ -925,12 +895,7 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a, hermitian=True) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal( - dot(dot(a, a_ginv), a), - a, - single_decimal=5, - double_decimal=11 - ) + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) assert_(consistent_subclass(a_ginv, a)) diff --git a/ruff.toml b/ruff.toml index 6803ca5dd4a5..d28e982f9189 100644 --- a/ruff.toml +++ b/ruff.toml @@ -84,6 +84,7 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream From f5f877122ea019b872327677f43f6a883793123d Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Tue, 24 Feb 2026 02:24:41 +0530 Subject: [PATCH 1558/1718] ENH: Add polyvalnd to base polynomial module --- numpy/polynomial/_polytypes.pyi | 45 ++++++++++++++++++++ numpy/polynomial/polynomial.py | 50 ++++++++++++++++++++++- numpy/polynomial/polynomial.pyi | 3 ++ numpy/polynomial/tests/test_polynomial.py | 22 ++++++++++ 4 files changed, 119 insertions(+), 1 deletion(-) diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 46d17ac6353c..ce37bcf59c50 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -293,6 +293,51 @@ class _FuncVal3D(Protocol): c: _SeriesLikeCoef_co, ) -> _SupportsCoefOps[Any]: ... +@type_check_only +class _FuncValND(Protocol): + @overload + def __call__( + self, + /, + pts: Sequence[_FloatLike_co], + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + pts: Sequence[_NumberLike_co], + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeFloat_co], + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeComplex_co], + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_ArrayLikeCoef_co], + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + pts: Sequence[_CoefLike_co], + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + @type_check_only class _FuncVander(Protocol): @overload diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 45abd5009c70..d44a7fe1aaac 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -76,7 +76,7 @@ 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', 'polyvalnd', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', 'polycompanion'] @@ -1011,6 +1011,54 @@ def polyval3d(x, y, z, c): """ return pu._valnd(polyval, c, x, y, z) +def _polyvalnd_dispatcher(pts, c): + return (*pts, c) + +@_array_function_dispatch(_polyvalnd_dispatcher) +def polyvalnd(pts, c): + """ + Evaluate an N-D polynomial at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * x_1^{i_1} * x_2^{i_2} \\dots x_n^{i_n} + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + polyval, polyval2d, polyval3d + + """ + return pu._valnd(polyval, c, *pts) def polygrid3d(x, y, z, c): """ diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index c394d56affed..5c1d420c59ae 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -27,6 +27,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -57,6 +58,7 @@ __all__ = [ "Polynomial", "polyval2d", "polyval3d", + "polyvalnd", "polygrid2d", "polygrid3d", "polyvander2d", @@ -82,6 +84,7 @@ polyint: Final[_FuncInteg] = ... polyval: Final[_FuncVal] = ... polyval2d: Final[_FuncVal2D] = ... polyval3d: Final[_FuncVal3D] = ... +polyvalnd: Final[_FuncValND] = ... @overload def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 4c924a758b06..42a440395d0a 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -297,6 +297,28 @@ def test_polyval3d(self): res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_polyvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = poly.polyvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(poly.polyvalnd((x1,), self.c1d), y1) + def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y From 8c8bd16daccd2e0b869aa82d26a023b41adb7136 Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:34:47 +0530 Subject: [PATCH 1559/1718] ENH: Add N-D evaluation (valnd) to polynomial sibling classes --- numpy/polynomial/chebyshev.py | 49 +++++++++++++++++++++++- numpy/polynomial/chebyshev.pyi | 3 ++ numpy/polynomial/hermite.py | 49 +++++++++++++++++++++++- numpy/polynomial/hermite.pyi | 3 ++ numpy/polynomial/hermite_e.py | 49 +++++++++++++++++++++++- numpy/polynomial/hermite_e.pyi | 3 ++ numpy/polynomial/laguerre.py | 49 +++++++++++++++++++++++- numpy/polynomial/laguerre.pyi | 3 ++ numpy/polynomial/legendre.py | 49 +++++++++++++++++++++++- numpy/polynomial/legendre.pyi | 3 ++ numpy/polynomial/polynomial.py | 1 + numpy/polynomial/tests/test_chebyshev.py | 21 ++++++++++ numpy/polynomial/tests/test_hermite.py | 21 ++++++++++ numpy/polynomial/tests/test_hermite_e.py | 21 ++++++++++ numpy/polynomial/tests/test_laguerre.py | 21 ++++++++++ numpy/polynomial/tests/test_legendre.py | 21 ++++++++++ 16 files changed, 361 insertions(+), 5 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 4dd2a85e15d7..3fdae7cee990 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -44,6 +44,7 @@ chebval chebval2d chebval3d + chebvalnd chebgrid2d chebgrid3d @@ -117,7 +118,7 @@ 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', - 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebvalnd', 'chebgrid2d', 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', 'chebgauss', 'chebweight', 'chebinterpolate'] @@ -1301,6 +1302,52 @@ def chebval3d(x, y, z, c): return pu._valnd(chebval, c, x, y, z) +def chebvalnd(pts, c): + """ + Evaluate an N-D Chebyshev series at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * T_{i_1}(x_1) * T_{i_2}(x_2) \\dots T_{i_n}(x_n) + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional Chebyshev series on points formed + with N-tuples of corresponding values from `pts`. + + See Also + -------- + chebval, chebval2d, chebval3d + + """ + return pu._valnd(chebval, c, *pts) + + def chebgrid3d(x, y, z, c): """ Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index 857ce8f6f377..319dcdc6dc4a 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -26,6 +26,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -62,6 +63,7 @@ __all__ = [ "Chebyshev", "chebval2d", "chebval3d", + "chebvalnd", "chebgrid2d", "chebgrid3d", "chebvander2d", @@ -102,6 +104,7 @@ chebint: Final[_FuncInteg] = ... chebval: Final[_FuncVal] = ... chebval2d: Final[_FuncVal2D] = ... chebval3d: Final[_FuncVal3D] = ... +chebvalnd: Final[_FuncValND] = ... chebgrid2d: Final[_FuncVal2D] = ... chebgrid3d: Final[_FuncVal3D] = ... chebvander: Final[_FuncVander] = ... diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index b2970c914957..cfd602b88911 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -40,6 +40,7 @@ hermval hermval2d hermval3d + hermvalnd hermgrid2d hermgrid3d @@ -85,7 +86,7 @@ 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', - 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermval2d', 'hermval3d', 'hermvalnd', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] hermtrim = pu.trimcoef @@ -1056,6 +1057,52 @@ def hermval3d(x, y, z, c): return pu._valnd(hermval, c, x, y, z) +def hermvalnd(pts, c): + """ + Evaluate an N-D Hermite series at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * H_{i_1}(x_1) * H_{i_2}(x_2) \\dots H_{i_n}(x_n) + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + hermval, hermval2d, hermval3d + + """ + return pu._valnd(hermval, c, *pts) + + def hermgrid3d(x, y, z, c): """ Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 17375c9210c4..a1ddc06bf377 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -22,6 +22,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -54,6 +55,7 @@ __all__ = [ "Hermite", "hermval2d", "hermval3d", + "hermvalnd", "hermgrid2d", "hermgrid3d", "hermvander2d", @@ -84,6 +86,7 @@ hermint: Final[_FuncInteg] = ... hermval: Final[_FuncVal] = ... hermval2d: Final[_FuncVal2D] = ... hermval3d: Final[_FuncVal3D] = ... +hermvalnd: Final[_FuncValND] = ... hermgrid2d: Final[_FuncVal2D] = ... hermgrid3d: Final[_FuncVal3D] = ... hermvander: Final[_FuncVander] = ... diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 730b60804e9a..82e1eb37a192 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -40,6 +40,7 @@ hermeval hermeval2d hermeval3d + hermevalnd hermegrid2d hermegrid3d @@ -85,7 +86,7 @@ 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', - 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermevalnd', 'hermegrid2d', 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', 'hermegauss', 'hermeweight'] @@ -1020,6 +1021,52 @@ def hermeval3d(x, y, z, c): return pu._valnd(hermeval, c, x, y, z) +def hermevalnd(pts, c): + """ + Evaluate an N-D Hermite_e series at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * He_{i_1}(x_1) * He_{i_2}(x_2) \\dots He_{i_n}(x_n) + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d + + """ + return pu._valnd(hermeval, c, *pts) + + def hermegrid3d(x, y, z, c): """ Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index f1ebf9066a4f..ce201c6c1a0a 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -22,6 +22,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -54,6 +55,7 @@ __all__ = [ "HermiteE", "hermeval2d", "hermeval3d", + "hermevalnd", "hermegrid2d", "hermegrid3d", "hermevander2d", @@ -84,6 +86,7 @@ hermeint: Final[_FuncInteg] = ... hermeval: Final[_FuncVal] = ... hermeval2d: Final[_FuncVal2D] = ... hermeval3d: Final[_FuncVal3D] = ... +hermevalnd: Final[_FuncValND] = ... hermegrid2d: Final[_FuncVal2D] = ... hermegrid3d: Final[_FuncVal3D] = ... hermevander: Final[_FuncVander] = ... diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index eb34cbef17ca..226c851cb9f8 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -40,6 +40,7 @@ lagval lagval2d lagval3d + lagvalnd laggrid2d laggrid3d @@ -84,7 +85,7 @@ 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', - 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', 'lagvalnd', 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', 'laggauss', 'lagweight'] @@ -1045,6 +1046,52 @@ def lagval3d(x, y, z, c): return pu._valnd(lagval, c, x, y, z) +def lagvalnd(pts, c): + """ + Evaluate an N-D Laguerre series at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * L_{i_1}(x_1) * L_{i_2}(x_2) \\dots L_{i_n}(x_n) + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + N-tuples of corresponding values from `pts`. + + See Also + -------- + lagval, lagval2d, lagval3d + + """ + return pu._valnd(lagval, c, *pts) + + def laggrid3d(x, y, z, c): """ Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 48fecfd07efe..d6fd5fcd73fb 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -21,6 +21,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -53,6 +54,7 @@ __all__ = [ "Laguerre", "lagval2d", "lagval3d", + "lagvalnd", "laggrid2d", "laggrid3d", "lagvander2d", @@ -83,6 +85,7 @@ lagint: Final[_FuncInteg] = ... lagval: Final[_FuncVal] = ... lagval2d: Final[_FuncVal2D] = ... lagval3d: Final[_FuncVal3D] = ... +lagvalnd: Final[_FuncValND] = ... laggrid2d: Final[_FuncVal2D] = ... laggrid3d: Final[_FuncVal3D] = ... lagvander: Final[_FuncVander] = ... diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index b611aed844e7..62f1574c104a 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -42,6 +42,7 @@ legval legval2d legval3d + legvalnd leggrid2d leggrid3d @@ -88,7 +89,7 @@ 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', - 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', 'legvalnd', 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', 'leggauss', 'legweight'] @@ -1043,6 +1044,52 @@ def legval3d(x, y, z, c): return pu._valnd(legval, c, x, y, z) +def legvalnd(pts, c): + """ + Evaluate an N-D Legendre series at points. + + This function returns the values: + + .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * P_{i_1}(x_1) * P_{i_2}(x_2) \\dots P_{i_n}(x_n) + + The parameters in `pts` are converted to arrays only if they are + tuples or lists, otherwise they are treated as scalars and + they must have the same shape after conversion. In either case, either + the elements of `pts` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than N dimensions, ones are implicitly appended to its + shape to make it N-D. The shape of the result will be c.shape[N:] + + pts[0].shape. + + Parameters + ---------- + pts : tuple or list of array_like, compatible objects + The N-dimensional series is evaluated at the points + ``(x_1, x_2, ..., x_n)`` provided in the `pts` iterable, where + all elements must have the same shape. If any element is a list + or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k,... is contained in ``c[i,j,k,...]``. If `c` has + dimension greater than N, the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional Legendre series on points formed + with N-tuples of corresponding values from `pts`. + + See Also + -------- + legval, legval2d, legval3d + + """ + return pu._valnd(legval, c, *pts) + + def leggrid3d(x, y, z, c): """ Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 75fa47b44d3e..aa0b9918d21c 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -21,6 +21,7 @@ from ._polytypes import ( _FuncVal, _FuncVal2D, _FuncVal3D, + _FuncValND, _FuncVander, _FuncVander2D, _FuncVander3D, @@ -53,6 +54,7 @@ __all__ = [ "Legendre", "legval2d", "legval3d", + "legvalnd", "leggrid2d", "leggrid3d", "legvander2d", @@ -83,6 +85,7 @@ legint: Final[_FuncInteg] = ... legval: Final[_FuncVal] = ... legval2d: Final[_FuncVal2D] = ... legval3d: Final[_FuncVal3D] = ... +legvalnd: Final[_FuncValND] = ... leggrid2d: Final[_FuncVal2D] = ... leggrid3d: Final[_FuncVal3D] = ... legvander: Final[_FuncVander] = ... diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index d44a7fe1aaac..77c1e1e7c291 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -40,6 +40,7 @@ polyval polyval2d polyval3d + polyvalnd polygrid2d polygrid3d diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 14777ac60375..d654bbbba16a 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -185,6 +185,27 @@ def test_chebval3d(self): res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_chebvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, cheb.chebvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = cheb.chebvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(cheb.chebvalnd((x1,), self.c1d), y1) + def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index a289ba0b50cc..aeb6d3649080 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -172,6 +172,27 @@ def test_hermval3d(self): res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_hermvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, herm.hermvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herm.hermvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(herm.hermvalnd((x1,), self.c1d), y1) + def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 233dfb28254a..6ff1d16e489f 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -172,6 +172,27 @@ def test_hermeval3d(self): res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_hermevalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, herme.hermevalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herme.hermevalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermevalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(herme.hermevalnd((x1,), self.c1d), y1) + def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 884f15a9fe8f..1c49b6696b8d 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -169,6 +169,27 @@ def test_lagval3d(self): res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_lagvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, lag.lagvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = lag.lagvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(lag.lagvalnd((x1,), self.c1d), y1) + def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 6c87f44ee707..654a6abdd536 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -173,6 +173,27 @@ def test_legval3d(self): res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) + def test_legvalnd(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) + + # test exceptions + assert_raises(ValueError, leg.legvalnd, (x1, x2, x3[:2]), self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = leg.legvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) + + # test 1D fallback + assert_almost_equal(leg.legvalnd((x1,), self.c1d), y1) + def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y From 9363f0ab63e8f08bf52b9ffbec65145c5ca82256 Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:37:22 +0530 Subject: [PATCH 1560/1718] DOC: Add release note for polyvalnd feature --- doc/release/upcoming_changes/30857.new_feature.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/30857.new_feature.rst diff --git a/doc/release/upcoming_changes/30857.new_feature.rst b/doc/release/upcoming_changes/30857.new_feature.rst new file mode 100644 index 000000000000..a6af45dcbbcc --- /dev/null +++ b/doc/release/upcoming_changes/30857.new_feature.rst @@ -0,0 +1,5 @@ +Added N-D evaluation functions to the polynomial package +-------------------------------------------------------- +New functions ``polyvalnd``, ``chebvalnd``, ``legvalnd``, ``hermvalnd``, +``hermevalnd``, and ``lagvalnd`` have been added to evaluate polynomials +in arbitrary dimensions, analogous to the existing 2D and 3D evaluators. \ No newline at end of file From 5b37d23bbb236f92c35833785948d32f9845a402 Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Wed, 25 Feb 2026 00:43:16 +0530 Subject: [PATCH 1561/1718] MAINT: Fix ruff linting errors for line length and escape sequences --- numpy/polynomial/chebyshev.py | 8 ++++-- numpy/polynomial/hermite.py | 8 ++++-- numpy/polynomial/hermite_e.py | 8 ++++-- numpy/polynomial/laguerre.py | 8 ++++-- numpy/polynomial/legendre.py | 8 ++++-- numpy/polynomial/polynomial.py | 12 ++++++--- numpy/polynomial/tests/test_chebyshev.py | 30 +++++++++++------------ numpy/polynomial/tests/test_polynomial.py | 2 +- 8 files changed, 54 insertions(+), 30 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 3fdae7cee990..2a69cf96127a 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1303,12 +1303,16 @@ def chebval3d(x, y, z, c): def chebvalnd(pts, c): - """ + r""" Evaluate an N-D Chebyshev series at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * T_{i_1}(x_1) * T_{i_2}(x_2) \\dots T_{i_n}(x_n) + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * T_{i_1}(x_1) * T_{i_2}(x_2) \dots T_{i_n}(x_n) The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index cfd602b88911..e28460d0df54 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1058,12 +1058,16 @@ def hermval3d(x, y, z, c): def hermvalnd(pts, c): - """ + r""" Evaluate an N-D Hermite series at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * H_{i_1}(x_1) * H_{i_2}(x_2) \\dots H_{i_n}(x_n) + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * H_{i_1}(x_1) * H_{i_2}(x_2) \dots H_{i_n}(x_n) The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 82e1eb37a192..e9a54dbc396a 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1022,12 +1022,16 @@ def hermeval3d(x, y, z, c): def hermevalnd(pts, c): - """ + r""" Evaluate an N-D Hermite_e series at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * He_{i_1}(x_1) * He_{i_2}(x_2) \\dots He_{i_n}(x_n) + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * He_{i_1}(x_1) * He_{i_2}(x_2) \dots He_{i_n}(x_n) The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 226c851cb9f8..0a3dbcc2228a 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1047,12 +1047,16 @@ def lagval3d(x, y, z, c): def lagvalnd(pts, c): - """ + r""" Evaluate an N-D Laguerre series at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * L_{i_1}(x_1) * L_{i_2}(x_2) \\dots L_{i_n}(x_n) + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * L_{i_1}(x_1) * L_{i_2}(x_2) \dots L_{i_n}(x_n) The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 62f1574c104a..2ebbc97dd27f 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1045,12 +1045,16 @@ def legval3d(x, y, z, c): def legvalnd(pts, c): - """ + r""" Evaluate an N-D Legendre series at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * P_{i_1}(x_1) * P_{i_2}(x_2) \\dots P_{i_n}(x_n) + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * P_{i_1}(x_1) * P_{i_2}(x_2) \dots P_{i_n}(x_n) The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 77c1e1e7c291..ed1a5a92f08f 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -77,8 +77,8 @@ 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', 'polyvalnd', - 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polyvalnd', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', 'polycompanion'] import numpy as np @@ -1017,12 +1017,16 @@ def _polyvalnd_dispatcher(pts, c): @_array_function_dispatch(_polyvalnd_dispatcher) def polyvalnd(pts, c): - """ + r""" Evaluate an N-D polynomial at points. This function returns the values: - .. math:: p(x_1, x_2, \\dots, x_n) = \\sum_{i_1, i_2, \\dots, i_n} c_{i_1, i_2, \\dots, i_n} * x_1^{i_1} * x_2^{i_2} \\dots x_n^{i_n} + .. math:: + + p(x_1, x_2, \dots, x_n) = + \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} + * x_1^{i_1} * x_2^{i_2} \dots x_n^{i_n} The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index d654bbbba16a..ff2e5846852e 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -186,25 +186,25 @@ def test_chebval3d(self): assert_(res.shape == (2, 3)) def test_chebvalnd(self): - x1, x2, x3 = self.x - y1, y2, y3 = self.y - pts = (x1, x2, x3) + x1, x2, x3 = self.x + y1, y2, y3 = self.y + pts = (x1, x2, x3) - # test exceptions - assert_raises(ValueError, cheb.chebvalnd, (x1, x2, x3[:2]), self.c3d) + # test exceptions + assert_raises(ValueError, cheb.chebvalnd, (x1, x2, x3[:2]), self.c3d) - # test values - tgt = y1 * y2 * y3 - res = cheb.chebvalnd(pts, self.c3d) - assert_almost_equal(res, tgt) + # test values + tgt = y1 * y2 * y3 + res = cheb.chebvalnd(pts, self.c3d) + assert_almost_equal(res, tgt) - # test shape - z = np.ones((2, 3)) - res = cheb.chebvalnd((z, z, z), self.c3d) - assert_(res.shape == (2, 3)) + # test shape + z = np.ones((2, 3)) + res = cheb.chebvalnd((z, z, z), self.c3d) + assert_(res.shape == (2, 3)) - # test 1D fallback - assert_almost_equal(cheb.chebvalnd((x1,), self.c1d), y1) + # test 1D fallback + assert_almost_equal(cheb.chebvalnd((x1,), self.c1d), y1) def test_chebgrid2d(self): x1, x2, x3 = self.x diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 42a440395d0a..413803a4e099 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -318,7 +318,7 @@ def test_polyvalnd(self): # test 1D fallback assert_almost_equal(poly.polyvalnd((x1,), self.c1d), y1) - + def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y From 89f607150fc92b6284d03bb9c31ee4f9fc2a0a89 Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Thu, 2 Apr 2026 14:23:53 +0530 Subject: [PATCH 1562/1718] DOC: Update valnd docstrings and math formatting based on review --- numpy/polynomial/chebyshev.py | 7 ++++--- numpy/polynomial/hermite.py | 7 ++++--- numpy/polynomial/hermite_e.py | 7 ++++--- numpy/polynomial/laguerre.py | 7 ++++--- numpy/polynomial/legendre.py | 7 ++++--- numpy/polynomial/polynomial.py | 7 ++++--- 6 files changed, 24 insertions(+), 18 deletions(-) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 2a69cf96127a..653451f2b459 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -1309,10 +1309,11 @@ def chebvalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * T_{i_1}(x_1) * T_{i_2}(x_2) \dots T_{i_n}(x_n) - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * T_{i_1}(x_1) * T_{i_2}(x_2) \dots T_{i_n}(x_n) + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index e28460d0df54..cb03851ca384 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -1064,10 +1064,11 @@ def hermvalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * H_{i_1}(x_1) * H_{i_2}(x_2) \dots H_{i_n}(x_n) - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * H_{i_1}(x_1) * H_{i_2}(x_2) \dots H_{i_n}(x_n) + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index e9a54dbc396a..a579f2d90407 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -1028,10 +1028,11 @@ def hermevalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * He_{i_1}(x_1) * He_{i_2}(x_2) \dots He_{i_n}(x_n) - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * He_{i_1}(x_1) * He_{i_2}(x_2) \dots He_{i_n}(x_n) + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 0a3dbcc2228a..d83d3e53e72a 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -1053,10 +1053,11 @@ def lagvalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * L_{i_1}(x_1) * L_{i_2}(x_2) \dots L_{i_n}(x_n) - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * L_{i_1}(x_1) * L_{i_2}(x_2) \dots L_{i_n}(x_n) + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 2ebbc97dd27f..feedae981b3b 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -1051,10 +1051,11 @@ def legvalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * P_{i_1}(x_1) * P_{i_2}(x_2) \dots P_{i_n}(x_n) - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * P_{i_1}(x_1) * P_{i_2}(x_2) \dots P_{i_n}(x_n) + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index ed1a5a92f08f..19cb12b9d02c 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1023,10 +1023,11 @@ def polyvalnd(pts, c): This function returns the values: .. math:: + p(pts, c) = \sum_{i_1, i_2, \dots, i_n} + c_{i_1, i_2, \dots, i_n} * x_1^{i_1} * x_2^{i_2} \dots x_n^{i_n} - p(x_1, x_2, \dots, x_n) = - \sum_{i_1, i_2, \dots, i_n} c_{i_1, i_2, \dots, i_n} - * x_1^{i_1} * x_2^{i_2} \dots x_n^{i_n} + where :math:`x_1, x_2, \dots, x_n = pts`. + Note that `pts` may also be an `(n, m)` array. The parameters in `pts` are converted to arrays only if they are tuples or lists, otherwise they are treated as scalars and From 4a0422470278dd78012f387bf99d2f32e9bc1350 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 2 Apr 2026 10:55:31 +0200 Subject: [PATCH 1563/1718] TYP: ``cum{sum,prod}`` and ``cumulative_{sum,prod}`` shape-typing --- numpy/_core/fromnumeric.pyi | 241 +++++++++++++----- numpy/typing/tests/data/fail/fromnumeric.pyi | 2 +- .../typing/tests/data/reveal/fromnumeric.pyi | 68 ++--- 3 files changed, 209 insertions(+), 102 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 3e210e757b4e..132c6ae73468 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -918,40 +918,75 @@ def any[ArrayT: np.ndarray]( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -# +# keep in sync with `cumprod` below @overload def cumsum[ScalarT: np.generic]( a: _ArrayLike[ScalarT], - axis: SupportsIndex | None = None, + axis: None = None, + dtype: None = None, + out: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def cumsum[ArrayT: np.ndarray]( + a: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, +) -> ArrayT: ... +@overload +def cumsum[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex, dtype: None = None, out: None = None, ) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: None = None, + out: None = None, +) -> _Array1D[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex, dtype: None = None, out: None = None, ) -> NDArray[Any]: ... @overload def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: SupportsIndex | None, + axis: None, dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def cumsum[ScalarT: np.generic]( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: None = None, *, dtype: _DTypeLike[ScalarT], out: None = None, +) -> _Array1D[ScalarT]: ... +@overload +def cumsum[ScalarT: np.generic]( + a: ArrayLike, + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, ) -> NDArray[ScalarT]: ... @overload def cumsum( a: ArrayLike, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _Array1D[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @@ -971,12 +1006,33 @@ def cumsum[ArrayT: np.ndarray]( out: ArrayT, ) -> ArrayT: ... +# keep in sync with `cumulative_prod` below @overload def cumulative_sum[ScalarT: np.generic]( x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> _Array1D[ScalarT]: ... +@overload +def cumulative_sum[ArrayT: np.ndarray]( + x: ArrayT, + /, + *, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> ArrayT: ... +@overload +def cumulative_sum[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], + /, + *, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, @@ -986,7 +1042,17 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> _Array1D[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, @@ -996,7 +1062,17 @@ def cumulative_sum[ScalarT: np.generic]( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: _DTypeLike[ScalarT], + out: None = None, + include_initial: bool = False, +) -> _Array1D[ScalarT]: ... +@overload +def cumulative_sum[ScalarT: np.generic]( + x: ArrayLike, + /, + *, + axis: SupportsIndex, dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, @@ -1006,7 +1082,17 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> _Array1D[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, @@ -1248,68 +1334,75 @@ def prod[ArrayT: np.ndarray]( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# keep in sync with `cumsum` above @overload -def cumprod( - a: _ArrayLikeBool_co, - axis: SupportsIndex | None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLike[ScalarT], + axis: None = None, dtype: None = None, out: None = None, -) -> NDArray[int_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumprod( - a: _ArrayLikeUInt_co, - axis: SupportsIndex | None = None, +def cumprod[ArrayT: NDArray[np.number | np.bool | np.object_]]( + a: ArrayT, + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[uint64]: ... +) -> ArrayT: ... @overload -def cumprod( - a: _ArrayLikeInt_co, - axis: SupportsIndex | None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLike[ScalarT], + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[int64]: ... +) -> NDArray[ScalarT]: ... @overload def cumprod( - a: _ArrayLikeFloat_co, - axis: SupportsIndex | None = None, + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, dtype: None = None, out: None = None, -) -> NDArray[floating]: ... +) -> _Array1D[Any]: ... @overload def cumprod( - a: _ArrayLikeComplex_co, - axis: SupportsIndex | None = None, + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex, dtype: None = None, out: None = None, -) -> NDArray[complexfloating]: ... +) -> NDArray[Any]: ... @overload -def cumprod( - a: _ArrayLikeObject_co, - axis: SupportsIndex | None = None, - dtype: None = None, +def cumprod[ScalarT: np.number | np.bool | np.object_]( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[object_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumprod[ScalarT: np.generic]( +def cumprod[ScalarT: np.number | np.bool | np.object_]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None, + axis: None = None, + *, dtype: _DTypeLike[ScalarT], out: None = None, -) -> NDArray[ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumprod[ScalarT: np.generic]( +def cumprod[ScalarT: np.number | np.bool | np.object_]( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = None, - *, + axis: SupportsIndex, dtype: _DTypeLike[ScalarT], out: None = None, ) -> NDArray[ScalarT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> _Array1D[Any]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, ) -> NDArray[Any]: ... @@ -1329,73 +1422,73 @@ def cumprod[ArrayT: np.ndarray]( out: ArrayT, ) -> ArrayT: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +# keep in sync with `cumulative_sum` above @overload -def cumulative_prod( - x: _ArrayLikeBool_co, +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( + x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[int_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumulative_prod( - x: _ArrayLikeUInt_co, +def cumulative_prod[ArrayT: NDArray[np.number | np.bool | np.object_]]( + x: ArrayT, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[uint64]: ... +) -> ArrayT: ... @overload -def cumulative_prod( - x: _ArrayLikeInt_co, +def cumulative_prod[ScalarT: np.generic]( + x: _ArrayLike[ScalarT], /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[int64]: ... +) -> NDArray[ScalarT]: ... @overload def cumulative_prod( - x: _ArrayLikeFloat_co, + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: None = None, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[floating]: ... +) -> _Array1D[Any]: ... @overload def cumulative_prod( - x: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: None = None, out: None = None, include_initial: bool = False, -) -> NDArray[complexfloating]: ... +) -> NDArray[Any]: ... @overload -def cumulative_prod( - x: _ArrayLikeObject_co, +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, - dtype: None = None, + axis: None = None, + dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, -) -> NDArray[object_]: ... +) -> _Array1D[ScalarT]: ... @overload -def cumulative_prod[ScalarT: np.generic]( +def cumulative_prod[ScalarT: np.number | np.bool | np.object_]( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: SupportsIndex, dtype: _DTypeLike[ScalarT], out: None = None, include_initial: bool = False, @@ -1405,7 +1498,17 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: SupportsIndex | None = None, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> _Array1D[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex, dtype: DTypeLike | None = None, out: None = None, include_initial: bool = False, diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index c3f060679089..261f58570a6c 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -122,7 +122,7 @@ np.prod(AR_U) # type: ignore[arg-type] np.cumprod(a, axis=1.0) # type: ignore[call-overload] np.cumprod(a, out=False) # type: ignore[call-overload] -np.cumprod(AR_U) # type: ignore[arg-type] +np.cumprod(AR_U) # type: ignore[type-var] np.size(a, axis=1.0) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 4474f375b716..5afaf6cc84a6 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -204,22 +204,26 @@ assert_type(np.any(AR_b, keepdims=True), Any) assert_type(np.any(AR_f4, keepdims=True), Any) assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumsum(b), npt.NDArray[np.bool]) -assert_type(np.cumsum(f4), npt.NDArray[np.float32]) -assert_type(np.cumsum(f), npt.NDArray[Any]) -assert_type(np.cumsum(AR_b), npt.NDArray[np.bool]) -assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) -assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) -assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumsum(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumsum(f), np.ndarray[tuple[int]]) +assert_type(np.cumsum(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumsum(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumsum(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumsum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumsum(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) -assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) -assert_type(np.cumulative_sum(f), npt.NDArray[Any]) -assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) -assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) -assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) -assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_sum(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_sum(f), np.ndarray[tuple[int]]) +assert_type(np.cumulative_sum(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_sum(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_sum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumulative_sum(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.ptp(b), np.bool) @@ -267,26 +271,26 @@ assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) assert_type(np.prod(AR_f4, dtype=float), Any) assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) -assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) -assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) -assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) -assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumprod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumprod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumprod(f), np.ndarray[tuple[int]]) +assert_type(np.cumprod(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumprod(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumprod(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumprod(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumprod(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) -assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) -assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) -assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) -assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) -assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) -assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) -assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_prod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_prod(f), np.ndarray[tuple[int]]) +assert_type(np.cumulative_prod(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.cumulative_prod(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.cumulative_prod(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.cumulative_prod(f, dtype=float), np.ndarray[tuple[int]]) +assert_type(np.cumulative_prod(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.ndim(b), int) From 8a0557f40b6085751b3ba321c6cfe699b28c67d7 Mon Sep 17 00:00:00 2001 From: Aleksei Nikiforov <103434461+AlekseiNikiforovIBM@users.noreply.github.com> Date: Thu, 2 Apr 2026 15:40:22 +0200 Subject: [PATCH 1564/1718] CI/BUG: add native jobs for s390x, fix bug in `pack_inner` (#30819) --- .github/workflows/linux-ppc64le.yml | 42 +++++++++++++++----- .github/workflows/linux_qemu.yml | 19 --------- numpy/_core/src/common/simd/vec/arithmetic.h | 2 +- numpy/_core/src/multiarray/compiled_base.c | 25 ++++++++++++ numpy/_core/tests/test_cpu_features.py | 2 +- numpy/f2py/_backends/meson.build.template | 5 +++ 6 files changed, 63 insertions(+), 32 deletions(-) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ppc64le.yml index 52c23e593f02..83d0baeadfd9 100644 --- a/.github/workflows/linux-ppc64le.yml +++ b/.github/workflows/linux-ppc64le.yml @@ -1,4 +1,4 @@ -name: Native ppc64le Linux Test +name: Linux IBM tests on: pull_request: @@ -20,21 +20,41 @@ concurrency: cancel-in-progress: true jobs: - native_ppc64le: - # This job runs only in the main NumPy repository. - # It requires a native ppc64le GHA runner, which is not available on forks. + native_ibm: + # These jobs runs only in the main NumPy repository. + # It requires a native ppc64le and s390x GHA runners, which are not available on forks. # For more details, see: https://github.com/numpy/numpy/issues/29125 if: github.repository == 'numpy/numpy' - runs-on: ubuntu-24.04-ppc64le-p10 + runs-on: ${{ matrix.config.runner }} strategy: fail-fast: false matrix: config: - - name: "GCC" + - name: "ppc64le/gcc - baseline(default)" args: "-Dallow-noblas=false" - - name: "clang" + runner: ubuntu-24.04-ppc64le-p10 + compiler: "gcc" + - name: "ppc64le/clang - baseline(default)" args: "-Dallow-noblas=false" + runner: ubuntu-24.04-ppc64le-p10 + compiler: "clang" + - name: "s390x/gcc - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-s390x + compiler: "gcc" + - name: "s390x/clang - baseline(default)" + args: "-Dallow-noblas=false" + runner: ubuntu-24.04-s390x + compiler: "clang" + - name: "s390x/gcc - baseline(Z15/VXE2)" + args: "-Dallow-noblas=false -Dcpu-baseline=vxe2" + runner: ubuntu-24.04-s390x + compiler: "gcc" + - name: "s390x/clang - baseline(Z15/VXE2)" + args: "-Dallow-noblas=false -Dcpu-baseline=vxe2" + runner: ubuntu-24.04-s390x + compiler: "clang" name: "${{ matrix.config.name }}" steps: @@ -53,11 +73,11 @@ jobs: echo "/home/runner/.local/bin" >> $GITHUB_PATH - name: Install clang - if: matrix.config.name == 'clang' + if: matrix.config.compiler == 'clang' run: | - sudo apt install -y clang - export CC=clang - export CXX=clang++ + sudo apt install -y clang-20 + echo CC=clang-20 >> $GITHUB_ENV + echo CXX=clang++-20 >> $GITHUB_ENV - name: Meson Build run: | diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index bba244cd12e3..f71eb304a9bb 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -48,25 +48,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "s390x", - "s390x-linux-gnu", - "s390x/ubuntu:22.04", - "-Dallow-noblas=true", - # Skipping TestRationalFunctions.test_gcd_overflow test - # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. - # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", - "s390x" - ] - - [ - "s390x - baseline(Z13)", - "s390x-linux-gnu", - "s390x/ubuntu:22.04", - "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", - "s390x" - ] - [ "riscv64", "riscv64-linux-gnu", diff --git a/numpy/_core/src/common/simd/vec/arithmetic.h b/numpy/_core/src/common/simd/vec/arithmetic.h index 85f4d6b26d68..8eccd491297a 100644 --- a/numpy/_core/src/common/simd/vec/arithmetic.h +++ b/numpy/_core/src/common/simd/vec/arithmetic.h @@ -286,7 +286,7 @@ NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) // divide each signed 64-bit element by a precomputed divisor (round towards zero) NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) { - npyv_b64 overflow = npyv_and_b64(vec_cmpeq(a, npyv_setall_s64(-1LL << 63)), (npyv_b64)divisor.val[1]); + npyv_b64 overflow = npyv_and_b64(vec_cmpeq(a, npyv_setall_s64(0x8000000000000000LL)), (npyv_b64)divisor.val[1]); npyv_s64 d = vec_sel(divisor.val[0], npyv_setall_s64(1), overflow); return vec_div(a, d); } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 19ac45ad170a..ba68304a8082 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -1611,9 +1611,29 @@ pack_inner(const char *inptr, #else npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + #if NPY_SIMD_WIDTH == 16 + arr[0] = npy_bswap8(arr[0]); + #elif NPY_SIMD_WIDTH == 32 + arr[0] = npy_bswap8(arr[0]); + arr[1] = npy_bswap8(arr[1]); + #else + arr[0] = npy_bswap8(arr[0]); + arr[1] = npy_bswap8(arr[1]); + arr[2] = npy_bswap8(arr[2]); + arr[3] = npy_bswap8(arr[3]); + #endif + #endif memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + bb[0] = npy_bswap8(bb[0]); + bb[1] = npy_bswap8(bb[1]); + bb[2] = npy_bswap8(bb[2]); + bb[3] = npy_bswap8(bb[3]); + #endif for(int i = 0; i < 4; i++) { for (int j = 0; j < vstep; j++) { memcpy(outptr, (char*)&bb[i] + j, 1); @@ -1628,6 +1648,11 @@ pack_inner(const char *inptr, va = npyv_rev64_u8(va); } npy_uint64 bb = npyv_tobits_b8(npyv_cmpneq_u8(va, v_zero)); + + #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + bb = npy_bswap8(bb); + #endif + for (int i = 0; i < vstep; ++i) { memcpy(outptr, (char*)&bb + i, 1); outptr += out_stride; diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 9cdd3f1d61d1..c349484d5237 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -431,7 +431,7 @@ class Test_ZARCH_Features(AbstractTest): features = ["VX", "VXE", "VXE2"] def load_flags(self): - self.load_flags_auxv() + self.load_flags_cpuinfo("features") is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 58c6758cc503..e01598f185e7 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -7,6 +7,11 @@ project('${modulename}', 'buildtype=${buildtype}' ]) fc = meson.get_compiler('fortran') +cc = meson.get_compiler('c') + +add_project_arguments( + cc.get_supported_arguments( '-fno-strict-aliasing'), language : 'c' +) py = import('python').find_installation('''${python}''', pure: false) py_dep = py.dependency() From a0338f2f999d4b61fdcb6bf4646e5c77ed159991 Mon Sep 17 00:00:00 2001 From: Shreya Ghorui Date: Thu, 2 Apr 2026 20:23:02 +0530 Subject: [PATCH 1565/1718] BUG: avoid precision loss when printing user-defined floating dtypes (#30798) Uses `str()` as a default for formatting user dtypes for whch the `dtype.type`'s module isn't `numpy`. The NumPy variable-width string dtype is specifically checked to avoid it (as it's `__module__` is also not `numpy` exactly.) Co-authored-by: Sebastian Berg --- numpy/_core/arrayprint.py | 8 ++++++++ numpy/_core/tests/test_arrayprint.py | 30 ++++++++++++++++++++++++++++ requirements/test_requirements.txt | 2 ++ 3 files changed, 40 insertions(+) diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 6b666cf407ff..6cab73beaa4f 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -530,8 +530,16 @@ def _get_format_function(data, **options): dtype_ = data.dtype dtypeobj = dtype_.type formatdict = _get_formatdict(data, **options) + if dtypeobj is None: return formatdict["numpystr"]() + elif (getattr(dtypeobj, "__module__", None) != "numpy" + and not issubclass(dtypeobj, str)): + # Use `str()` as a default format for non-NumPy dtypes. This should be + # improved. We use `str` assuming that `repr` is likely to duplicate + # information that is contained in the dtype. + # (Do this early, because e.g. quaddtype subclasses floating.) + return formatdict['void']() elif issubclass(dtypeobj, _nt.bool): return formatdict['bool']() elif issubclass(dtypeobj, _nt.integer): diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 3d31347a806b..685156f51502 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -8,10 +8,12 @@ import numpy as np from numpy._core.arrayprint import _typelessdata +from numpy._utils import _pep440 from numpy.testing import ( HAS_REFCOUNT, IS_WASM, assert_, + assert_array_equal, assert_equal, assert_raises, assert_raises_regex, @@ -1321,3 +1323,31 @@ def test_multithreaded_array_printing(): # reasons this test makes sure it is set up in a thread-safe manner run_threaded(TestPrintOptions().test_floatmode, 500) + + +def test_user_defined_floating_dtype_printing_does_not_corrupt_precision(): + """ + Ensure that array printing does not use NumPy Dragon4 formatting + for user-defined floating dtypes, which would silently truncate + precision to float64. + """ + # Quaddtype (<=1.0.0) may have a bug that leads to test failures elsewhere + # (this may also be an interplay of numpy/quaddtype but let's hope new + # quaddtype versions will fix it.) + from importlib.metadata import version + + try: + quaddtype_version = version("numpy_quaddtype") + except Exception: + pytest.skip("numpy_quaddtype not installed") + else: + if _pep440.Version(quaddtype_version) <= _pep440.Version("1.0.0"): + pytest.skip("critical bug in quaddtype during import") + + numpy_quaddtype = pytest.importorskip("numpy_quaddtype") + + pi_str = "3.14159265358979323846264338327950288" + arr = np.array([pi_str], dtype=QuadPrecDType()) + res = np.array(str(arr).strip("[] "), dtype=QuadPrecDType()) + # Check that the string representation round-trips correctly. + assert_array_equal(res, arr) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 8ffbe62780a5..6ade6d771052 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -9,3 +9,5 @@ pytest-timeout # for optional f2py encoding detection charset-normalizer tzdata +# Ensure we install quaddtype in CI for some wheel setups (mac): +numpy_quaddtype ; python_version<"3.15" and sys_platform=="darwin" From 14ace212068de09548930b9dd48970d704683a36 Mon Sep 17 00:00:00 2001 From: Shirong Wang Date: Thu, 2 Apr 2026 23:45:32 +0800 Subject: [PATCH 1566/1718] Merge pull request #30952 from jeanwsr/f2py-doc-win DOC: f2py: update info about conda compilers --- doc/source/f2py/windows/conda.rst | 6 ------ doc/source/f2py/windows/index.rst | 5 ++--- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/doc/source/f2py/windows/conda.rst b/doc/source/f2py/windows/conda.rst index 08a79b29dacd..75ee627f8e6d 100644 --- a/doc/source/f2py/windows/conda.rst +++ b/doc/source/f2py/windows/conda.rst @@ -25,10 +25,4 @@ Now we will setup a ``conda`` environment. ``conda`` pulls packages from ``msys2``, however, the UX is sufficiently different enough to warrant a separate discussion. -.. warning:: - As of 30-01-2022, the `MSYS2 binaries`_ shipped with ``conda`` are **outdated** and this approach is **not preferred**. - - - -.. _MSYS2 binaries: https://github.com/conda-forge/conda-forge.github.io/issues/1044 diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index aa7851da5dd2..af505ed3d2ba 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -58,8 +58,7 @@ Windows Subsystem for Linux Windows applications, but is significantly more complicated. Conda - Windows support for compilers in ``conda`` is facilitated by pulling MSYS2 - binaries, however these `are outdated`_, and therefore not recommended (as of 30-01-2022). + Windows support for GNU compilers in ``conda`` is provided by `conda-forge`_ channel. PGI Compilers (commercial) Unmaintained but sufficient if an existing license is present. Works @@ -208,7 +207,7 @@ path using a hash. This needs to be added to the ``PATH`` variable. .. _JeanHeyd Meneide: https://thephd.dev/binary-banshees-digital-demons-abi-c-c++-help-me-god-please .. _legacy version of Flang: https://github.com/flang-compiler/flang .. _native Windows support: https://developer.nvidia.com/nvidia-hpc-sdk-downloads#collapseFour -.. _are outdated: https://github.com/conda-forge/conda-forge.github.io/issues/1044 +.. _conda-forge: https://conda-forge.org/docs/maintainer/infrastructure/#compilers-supplied-by-conda-forge .. _now deprecated: https://github.com/numpy/numpy/pull/20875 .. _LLVM Flang: https://releases.llvm.org/11.0.0/tools/flang/docs/ReleaseNotes.html .. _SciPy's documentation: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies From fc3f5f5d4e9fc4d36ba103abb3f1f8ba87ae474d Mon Sep 17 00:00:00 2001 From: nakul-krishnakumar Date: Fri, 3 Apr 2026 00:42:09 +0530 Subject: [PATCH 1567/1718] update according to code review --- numpy/lib/tests/test_format.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 98d1d440b66a..13429e988daf 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -72,20 +72,10 @@ >>> >>> >>> NbufferT = [ - ... ([3,2], ( - ... 6j, - ... 6., - ... ('nn', [6j,4j], [6.,4.], [1,2]), - ... 'NN', - ... True - ... ), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ... ([4,3], ( - ... 7j, - ... 7., - ... ('oo', [7j,5j], [7.,5.], [2,1]), - ... 'OO', - ... False - ... ), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', + ... ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', + ... ('OO', 7j), [[7.,5.],[7.,5.]], 9), ... ] >>> >>> From 6376466ebcf039a6328f579f9bfaba68db06222d Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 2 Apr 2026 23:59:58 +0200 Subject: [PATCH 1568/1718] TYP: ``transpose`` and ``matrix_transpose`` shape-typing --- numpy/_core/fromnumeric.pyi | 17 ++++++++--------- numpy/ma/core.pyi | 6 ++++-- numpy/typing/tests/data/reveal/fromnumeric.pyi | 4 ++++ 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 132c6ae73468..27f48f7fe5c3 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -311,18 +311,17 @@ def swapaxes[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis1: SupportsIndex, @overload def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... -# keep in sync with `ma.core.transpose` +# @overload -def transpose[ScalarT: np.generic]( - a: _ArrayLike[ScalarT], - axes: _ShapeLike | None = None, -) -> NDArray[ScalarT]: ... +def transpose[ArrayT: np.ndarray](a: ArrayT, axes: _ShapeLike | None = None) -> ArrayT: ... @overload -def transpose( - a: ArrayLike, - axes: _ShapeLike | None = None, -) -> NDArray[Any]: ... +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... +@overload +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# +@overload +def matrix_transpose[ArrayT: np.ndarray](x: ArrayT, /) -> ArrayT: ... @overload def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d7b92e12065b..13df91ac4c36 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -3571,9 +3571,11 @@ def putmask(a: np.ndarray, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: # keep in sync with `_core.fromnumeric.transpose` @overload -def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> _MaskedArray[ScalarT]: ... +def transpose[ArrayT: np.ndarray](a: ArrayT, axes: _ShapeLike | None = None) -> ArrayT: ... @overload -def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> _MaskedArray[Incomplete]: ... +def transpose[ScalarT: np.generic](a: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> _MaskedArray[ScalarT]: ... +@overload # `_MaskedArray | np.ndarray` is equivalent to `np.ndarray` +def transpose(a: ArrayLike, axes: _ShapeLike | None = None) -> np.ndarray: ... # keep in sync with `_core.fromnumeric.reshape` @overload # shape: index diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5afaf6cc84a6..89060b456349 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -9,6 +9,8 @@ class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_c16: npt.NDArray[np.complex128] AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] @@ -68,6 +70,8 @@ assert_type(np.transpose(f4), npt.NDArray[np.float32]) assert_type(np.transpose(f), npt.NDArray[Any]) assert_type(np.transpose(AR_b), npt.NDArray[np.bool]) assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) +assert_type(np.transpose(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.transpose(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) From aa1d556eadd2f565d11ed851a834bf4e3a43c5f2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 00:20:49 +0200 Subject: [PATCH 1569/1718] TYP: ``sort`` and ``argsort`` shape-typing --- numpy/_core/fromnumeric.pyi | 56 +++++++++++++++++-- numpy/typing/tests/data/fail/fromnumeric.pyi | 4 +- .../typing/tests/data/reveal/fromnumeric.pyi | 29 ++++++++-- 3 files changed, 77 insertions(+), 12 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 132c6ae73468..5b9f0ff329c6 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -414,33 +414,81 @@ def argpartition[ShapeT: _Shape]( # @overload +def sort[ArrayT: np.ndarray]( + a: ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> ArrayT: ... +@overload def sort[ScalarT: np.generic]( a: _ArrayLike[ScalarT], - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, ) -> NDArray[ScalarT]: ... @overload +def sort[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> _Array1D[ScalarT]: ... +@overload def sort( a: ArrayLike, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, ) -> NDArray[Any]: ... +@overload +def sort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> _Array1D[Any]: ... +# +@overload +def argsort[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload def argsort( a: ArrayLike, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _SortKind | None = None, order: str | Sequence[str] | None = None, *, stable: bool | None = None, -) -> NDArray[intp]: ... +) -> NDArray[np.intp]: ... +@overload +def argsort( + a: ArrayLike, + axis: None, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> _Array1D[np.intp]: ... +# @overload def argmax( a: ArrayLike, diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 261f58570a6c..acf5002fab42 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -46,8 +46,8 @@ np.sort(A, axis="bob") # type: ignore[call-overload] np.sort(A, kind="bob") # type: ignore[call-overload] np.sort(A, order=range(5)) # type: ignore[arg-type] -np.argsort(A, axis="bob") # type: ignore[arg-type] -np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, axis="bob") # type: ignore[call-overload] +np.argsort(A, kind="bob") # type: ignore[call-overload] np.argsort(A, order=range(5)) # type: ignore[arg-type] np.argmax(A, axis="bob") # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5afaf6cc84a6..5e34890cd6da 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -9,6 +9,8 @@ class NDArraySubclass(np.ndarray[tuple[Any, ...], np.dtype[np.complex128]]): ... AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_c16: npt.NDArray[np.complex128] AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] @@ -86,12 +88,27 @@ assert_type(np.argpartition(f, 0, axis=None), np.ndarray[tuple[int], np.dtype[np assert_type(np.argpartition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.argpartition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) -assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) -assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) -assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) - -assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) -assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) +assert_type(np.sort([2, 1]), npt.NDArray[Any]) +assert_type(np.sort(AR_b), npt.NDArray[np.bool]) +assert_type(np.sort(AR_f4), npt.NDArray[np.float32]) +assert_type(np.sort(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.sort([2, 1], axis=None), np.ndarray[tuple[int]]) +assert_type(np.sort(AR_b, axis=None), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.sort(AR_f4, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_1d, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sort(AR_f4_2d, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.argsort([2, 1]), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_b), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argsort([2, 1], axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_b, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_1d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argsort(AR_f4_2d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.argmax(AR_b), np.intp) assert_type(np.argmax(AR_f4), np.intp) From 09d26055d17f53d1844dd06811ae9514c47da530 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 2 Apr 2026 23:27:25 +0200 Subject: [PATCH 1570/1718] TYP: ``clip`` and ``[a]round`` shape-typing --- numpy/_core/fromnumeric.pyi | 12 ++++++------ numpy/typing/tests/data/fail/fromnumeric.pyi | 2 +- numpy/typing/tests/data/reveal/fromnumeric.pyi | 2 ++ 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 47bffa826fc9..6a42651fa87d 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -711,8 +711,8 @@ def compress[ArrayT: np.ndarray]( # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload -def clip[ScalarT: np.generic]( - a: ScalarT, +def clip[ScalarOrArrayT: np.generic | np.ndarray]( + a: ScalarOrArrayT, a_min: ArrayLike | _NoValueType | None = ..., a_max: ArrayLike | _NoValueType | None = ..., out: None = None, @@ -721,7 +721,7 @@ def clip[ScalarT: np.generic]( max: ArrayLike | _NoValueType | None = ..., dtype: None = None, **kwargs: Unpack[_UFuncKwargs], -) -> ScalarT: ... +) -> ScalarOrArrayT: ... @overload def clip( a: _ScalarLike_co, @@ -1583,11 +1583,11 @@ def around( out: None = None, ) -> float16: ... @overload -def around[NumberOrObjectT: np.number | np.object_]( - a: NumberOrObjectT, +def around[ScalarOrArrayT: np.number | np.object_ | NDArray[np.number | np.object_]]( + a: ScalarOrArrayT, decimals: SupportsIndex = 0, out: None = None, -) -> NumberOrObjectT: ... +) -> ScalarOrArrayT: ... @overload def around( a: _ComplexLike_co | object_, diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index acf5002fab42..7f98ab4602c2 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -128,7 +128,7 @@ np.size(a, axis=1.0) # type: ignore[arg-type] np.around(a, decimals=1.0) # type: ignore[call-overload] np.around(a, out=type) # type: ignore[call-overload] -np.around(AR_U) # type: ignore[arg-type] +np.around(AR_U) # type: ignore[type-var] np.mean(a, axis=1.0) # type: ignore[call-overload] np.mean(a, out=False) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index e26bba7c8401..042690fb4367 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -185,6 +185,7 @@ assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool]) assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) +assert_type(np.clip(AR_f4_1d, 0, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.sum(b), np.bool) assert_type(np.sum(f4), np.float32) @@ -333,6 +334,7 @@ assert_type(np.around(AR_i8), npt.NDArray[np.int64]) assert_type(np.around(AR_f4), npt.NDArray[np.float32]) assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.around(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.mean(AR_b), np.floating) assert_type(np.mean(AR_i8), np.floating) From b8232c5b6de09dfbd75ddfd09002d14bc4effdd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Apr 2026 17:55:06 +0000 Subject: [PATCH 1571/1718] MAINT: Bump github/codeql-action from 4.32.6 to 4.35.1 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.32.6 to 4.35.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/0d579ffd059c29b07949a3cce3983f0780820c98...c10b8064de6f491fea524254123dbe5e09572f13) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.35.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 18334c5d76b9..7ac309dfff1a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 + uses: github/codeql-action/init@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 + uses: github/codeql-action/autobuild@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 + uses: github/codeql-action/analyze@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 7081eb640485..04c95da1cb9e 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@0d579ffd059c29b07949a3cce3983f0780820c98 # v2.1.27 + uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v2.1.27 with: sarif_file: results.sarif From c9137919564daa91c132e0700a68453482bed3a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Apr 2026 17:55:31 +0000 Subject: [PATCH 1572/1718] MAINT: Bump ruff from 0.15.7 to 0.15.8 in /requirements Bumps [ruff](https://github.com/astral-sh/ruff) from 0.15.7 to 0.15.8. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.15.7...0.15.8) --- updated-dependencies: - dependency-name: ruff dependency-version: 0.15.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 2dfcf6006ce6..99bc7635cd67 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.7 +ruff==0.15.8 GitPython>=3.1.30 spin From 206b77ec052475ca212a7d79b0fa9d3a12bafb8c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 21:42:15 +0200 Subject: [PATCH 1573/1718] TYP: ``inner`` shape-typing and transparent dtypes --- numpy/_core/multiarray.pyi | 41 ++++++++++++- numpy/typing/tests/data/reveal/multiarray.pyi | 57 ++++++++++++++++++- 2 files changed, 95 insertions(+), 3 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 35111cf42225..33c901113f6c 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -8,6 +8,7 @@ from typing import ( Final, Generic, Literal as L, + Never, Protocol, Self, SupportsIndex, @@ -183,6 +184,12 @@ _ArrayT_co = TypeVar("_ArrayT_co", bound=np.ndarray, default=np.ndarray, covaria type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = ndarray[tuple[int, int], dtype[ScalarT]] +# workaround for mypy's and pyright's typing spec non-compliance regarding overloads +type _ArrayJustND[ScalarT: np.generic] = ndarray[tuple[Never, Never, Never], dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] # Valid time units type _UnitKind = L[ @@ -210,6 +217,7 @@ type _RollKind = L[ # `raise` is deliberately excluded ] type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 +type _InnerScalar = np.number | np.bool | np.timedelta64 # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here @@ -671,7 +679,38 @@ def concatenate[OutT: np.ndarray]( ) -> OutT: ... # keep in sync with `ma.core.inner` -def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... +@overload # (?d T, Nd T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayJustND[ScalarT], b: _ArrayLike[ScalarT], /) -> NDArray[ScalarT] | Any: ... +@overload # (Nd T, ?d T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayLike[ScalarT], b: _ArrayJustND[ScalarT], /) -> NDArray[ScalarT] | Any: ... +@overload # (1d T, 1d T) -> 0d T +def inner[ScalarT: _InnerScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT], /) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def inner(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_InnerScalar], /) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def inner(a: _ToArray1D[_InnerScalar], b: _Array1D[np.object_], /) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def inner(a: Sequence[bool], b: Sequence[bool], /) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def inner(a: list[int], b: Sequence[int], /) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def inner(a: Sequence[int], b: list[int], /) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def inner(a: list[float], b: Sequence[float], /) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def inner(a: Sequence[float], b: list[float], /) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def inner(a: list[complex], b: Sequence[complex], /) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def inner(a: Sequence[complex], b: list[complex], /) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray1D[ScalarT], b: _Array2D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array1D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array2D[ScalarT], /) -> _Array2D[ScalarT]: ... +@overload # fallback +def inner(a: ArrayLike, b: ArrayLike, /) -> Any: ... # keep in sync with `ma.core.where` @overload diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index fefb6e2fbb5d..1dd3a662314a 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -8,14 +8,23 @@ class SubClass[ScalarT: np.generic](np.ndarray[tuple[Any, ...], np.dtype[ScalarT subclass: SubClass[np.float64] +AR_f4_nd: npt.NDArray[np.float32] +AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] +AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] AR_i8: npt.NDArray[np.int64] AR_u1: npt.NDArray[np.uint8] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] +AR_O_nd: npt.NDArray[np.object_] +AR_O_1d: np.ndarray[tuple[int], np.dtype[np.object_]] +AR_O_2d: np.ndarray[tuple[int, int], np.dtype[np.object_]] -AR_LIKE_f: list[float] +AR_LIKE_b: list[bool] AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] m: np.timedelta64 M: np.datetime64 @@ -59,7 +68,51 @@ assert_type(b_i8_f8_f8.numiter, int) assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) assert_type(b_i8_f8_f8.size, int) -assert_type(np.inner(AR_f8, AR_i8), Any) +# + +assert_type(np.inner(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.inner(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.inner(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.inner(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.inner(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_1d, AR_f4_nd), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.inner(AR_f4_2d, AR_f4_nd), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_1d), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_2d), npt.NDArray[np.float32] | Any) +assert_type(np.inner(AR_f4_nd, AR_f4_nd), npt.NDArray[np.float32] | Any) + +assert_type(np.inner(AR_O_1d, AR_O_1d), Any) +assert_type(np.inner(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_1d, AR_O_nd), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.inner(AR_O_2d, AR_O_nd), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_1d), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_2d), npt.NDArray[np.object_] | Any) +assert_type(np.inner(AR_O_nd, AR_O_nd), npt.NDArray[np.object_] | Any) + +assert_type(np.inner(AR_u1, AR_u1), npt.NDArray[np.uint8] | Any) +assert_type(np.inner(AR_i8, AR_i8), npt.NDArray[np.int64] | Any) +assert_type(np.inner(AR_f8, AR_f8), npt.NDArray[np.float64] | Any) +assert_type(np.inner(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) + +# assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) From 07f410177d56af92c153c44c6e602acfe75a6b46 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 21:47:35 +0200 Subject: [PATCH 1574/1718] TYP: sync ``inner`` with ``ma.inner`` --- numpy/ma/core.pyi | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 13df91ac4c36..2fd55419f8cf 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -302,6 +302,7 @@ _AnyNumericScalarT = TypeVar( ) # fmt: skip type _RealNumber = np.floating | np.integer +type _InnerScalar = np.number | np.bool | np.timedelta64 type _Ignored = object @@ -326,6 +327,9 @@ type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[Scalar # Workaround for https://github.com/microsoft/pyright/issues/10232 type _ArrayNoD[ScalarT: np.generic] = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[ScalarT]] +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] + type _ConvertibleToInt = SupportsInt | SupportsIndex | _CharLike_co type _ConvertibleToFloat = SupportsFloat | SupportsIndex | _CharLike_co type _ConvertibleToComplex = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co @@ -3804,7 +3808,38 @@ def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int, out: ArrayT) -> Arra def round_[ArrayT: np.ndarray](a: ArrayLike, decimals: int = 0, *, out: ArrayT) -> ArrayT: ... # keep in sync with `_core.multiarray.inner` -def inner(a: ArrayLike, b: ArrayLike) -> Incomplete: ... +@overload # (?d T, Nd T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayNoD[ScalarT], b: _ArrayLike[ScalarT]) -> _MaskedArray[ScalarT] | Any: ... +@overload # (Nd T, ?d T) -> 0d|Nd T (workaround) +def inner[ScalarT: _InnerScalar | np.object_](a: _ArrayLike[ScalarT], b: _ArrayNoD[ScalarT]) -> _MaskedArray[ScalarT] | Any: ... +@overload # (1d T, 1d T) -> 0d T +def inner[ScalarT: _InnerScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT]) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def inner(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_InnerScalar]) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def inner(a: _ToArray1D[_InnerScalar], b: _Array1D[np.object_]) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def inner(a: Sequence[bool], b: Sequence[bool]) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def inner(a: list[int], b: Sequence[int]) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def inner(a: Sequence[int], b: list[int]) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def inner(a: list[float], b: Sequence[float]) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def inner(a: Sequence[float], b: list[float]) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def inner(a: list[complex], b: Sequence[complex]) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def inner(a: Sequence[complex], b: list[complex]) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray1D[ScalarT], b: _Array2D[ScalarT]) -> _Masked1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array1D[ScalarT]) -> _Masked1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d _Masked1D +def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array2D[ScalarT]) -> _Masked2D[ScalarT]: ... +@overload # fallback +def inner(a: ArrayLike, b: ArrayLike) -> Any: ... innerproduct = inner From 1dab0a7f0c81a923fa34574f471b136a4c2b050c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 22:20:05 +0200 Subject: [PATCH 1575/1718] TYP: ``fromiter`` shape-typing and more dtype overloads --- numpy/_core/multiarray.pyi | 64 +++++++++++++++---- .../tests/data/reveal/array_constructors.pyi | 10 ++- 2 files changed, 61 insertions(+), 13 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 35111cf42225..0024877a6f40 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -994,23 +994,65 @@ def fromfile( like: _SupportsArrayFunc | None = ..., ) -> _Array1D[Any]: ... -@overload +# +@overload # dtype= def fromiter[ScalarT: np.generic]( - iter: Iterable[Any], + iter: Iterable[_ScalarLike_co], dtype: _DTypeLike[ScalarT], - count: SupportsIndex = ..., + count: SupportsIndex = -1, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[ScalarT]: ... -@overload + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # dtype=None def fromiter( - iter: Iterable[Any], - dtype: DTypeLike | None, - count: SupportsIndex = ..., + iter: Iterable[_ScalarLike_co], + dtype: None, + count: SupportsIndex = -1, *, - like: _SupportsArrayFunc | None = ..., -) -> NDArray[Any]: ... + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # dtype=bool +def fromiter( + iter: Iterable[_ScalarLike_co], + dtype: type[bool], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.bool]: ... +@overload # dtype=int +def fromiter( + iter: Iterable[_ScalarLike_co], + dtype: type[int], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_ | Any]: ... +@overload # dtype=float +def fromiter( + iter: Iterable[_ScalarLike_co], + dtype: type[float], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # dtype=complex +def fromiter( + iter: Iterable[_ScalarLike_co], + dtype: type[complex], + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.complex128 | Any]: ... +@overload # dtype= +def fromiter( + iter: Iterable[_ScalarLike_co], + dtype: DTypeLike, + count: SupportsIndex = -1, + *, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +# @overload def frombuffer( buffer: Buffer, diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index ffe0834e3309..4f6ba485cb58 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -107,8 +107,14 @@ with open("test.txt") as f: assert_type(np.fromfile(b"test.txt", sep=" "), _Array1D[np.float64]) assert_type(np.fromfile(Path("test.txt"), sep=" "), _Array1D[np.float64]) -assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromiter("12345", float), npt.NDArray[Any]) +assert_type(np.fromiter("12345", np.float32), _Array1D[np.float32]) +assert_type(np.fromiter("12345", np.float64), _Array1D[np.float64]) +assert_type(np.fromiter("12345", bool), _Array1D[np.bool]) +assert_type(np.fromiter("12345", int), _Array1D[np.int_ | Any]) +assert_type(np.fromiter("12345", float), _Array1D[np.float64 | Any]) +assert_type(np.fromiter("12345", complex), _Array1D[np.complex128 | Any]) +assert_type(np.fromiter("12345", None), _Array1D[np.float64]) +assert_type(np.fromiter("12345", object), _Array1D[Any]) assert_type(np.frombuffer(A), _Array1D[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), _Array1D[np.int64]) From c08e4a2aaa75a7b34291c0ba40b8702a39582108 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 22:53:47 +0200 Subject: [PATCH 1576/1718] TYP: ``asarray`` shape-typing --- numpy/_core/multiarray.pyi | 187 ++++++++++++++++-- .../tests/data/reveal/array_constructors.pyi | 28 ++- 2 files changed, 196 insertions(+), 19 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index b41780b77354..4f11354e2eae 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -781,35 +781,186 @@ def unpackbits( def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... -@overload +# +@overload # ndarray +def asarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # ndarray, dtype: +def asarray[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # ndarray, dtype: +def asarray[ShapeT: _Shape]( + a: np.ndarray[ShapeT], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT]: ... +@overload # 1d bool +def asarray( + a: Sequence[bool], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.bool]: ... +@overload # 1d ~int +def asarray( + a: list[int], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # 1d ~float +def asarray( + a: list[float], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64]: ... +@overload # 1d ~complex +def asarray( + a: list[complex], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.complex128]: ... +@overload # 1d _, dtype: +def asarray[ScalarT: np.generic]( + a: Sequence[complex | np.generic], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[ScalarT]: ... +@overload # 1d _, dtype: +def asarray( + a: Sequence[complex | np.generic], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Any]: ... +@overload # 2d bool +def asarray( + a: Sequence[Sequence[bool]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.bool]: ... +@overload # 2d ~int +def asarray( + a: Sequence[list[int]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.int_]: ... +@overload # 2d ~float +def asarray( + a: Sequence[list[float]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.float64]: ... +@overload # 2d ~complex +def asarray( + a: Sequence[list[complex]], + dtype: None = None, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 2d _, dtype: +def asarray[ScalarT: np.generic]( + a: Sequence[Sequence[complex | np.generic]], + dtype: _DTypeLike[ScalarT], + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[ScalarT]: ... +@overload # 2d _, dtype: +def asarray( + a: Sequence[Sequence[complex | np.generic]], + dtype: DTypeLike, + order: _OrderKACF = None, + *, + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[Any]: ... +@overload # known array-like def asarray[ScalarT: np.generic]( a: _ArrayLike[ScalarT], dtype: None = None, - order: _OrderKACF = ..., + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... -@overload +@overload # array-like, dtype: def asarray[ScalarT: np.generic]( - a: Any, + a: ArrayLike, dtype: _DTypeLike[ScalarT], - order: _OrderKACF = ..., + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... -@overload +@overload # fallback def asarray( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., + a: ArrayLike, + dtype: DTypeLike | None = None, + order: _OrderKACF = None, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., - like: _SupportsArrayFunc | None = ..., + device: L["cpu"] | None = None, + copy: bool | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... @overload diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 4f6ba485cb58..04dfe94792b8 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -7,6 +7,7 @@ import numpy.typing as npt from numpy._typing import _AnyShape type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... @@ -21,6 +22,16 @@ C: list[int] D: SubClass[np.float64 | np.int64] E: IntoSubClass[np.float64 | np.int64] +_f32_1d: _Array1D[np.float32] +_py_b_1d: list[bool] +_py_b_2d: list[list[bool]] +_py_i_1d: list[int] +_py_i_2d: list[list[int]] +_py_f_1d: list[float] +_py_f_2d: list[list[float]] +_py_c_1d: list[complex] +_py_c_2d: list[list[complex]] + mixed_shape: tuple[int, np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -70,9 +81,24 @@ assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) assert_type(np.asarray(A), npt.NDArray[np.float64]) assert_type(np.asarray(B), npt.NDArray[np.float64]) -assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(C), _Array1D[np.int_]) assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.asarray(_f32_1d), _Array1D[np.float32]) +assert_type(np.asarray(_f32_1d, dtype=np.float64), _Array1D[np.float64]) +assert_type(np.asarray(_f32_1d, dtype="f8"), _Array1D[Any]) +assert_type(np.asarray(_py_b_1d), _Array1D[np.bool_]) +assert_type(np.asarray(_py_b_2d), _Array2D[np.bool_]) +assert_type(np.asarray(_py_i_1d), _Array1D[np.int_]) +assert_type(np.asarray(_py_i_2d), _Array2D[np.int_]) +assert_type(np.asarray(_py_f_1d), _Array1D[np.float64]) +assert_type(np.asarray(_py_f_2d), _Array2D[np.float64]) +assert_type(np.asarray(_py_c_1d), _Array1D[np.complex128]) +assert_type(np.asarray(_py_c_2d), _Array2D[np.complex128]) +assert_type(np.asarray(_py_i_1d, dtype=np.float32), _Array1D[np.float32]) +assert_type(np.asarray(_py_i_1d, dtype="f4"), _Array1D[Any]) +assert_type(np.asarray(_py_i_2d, dtype=np.float32), _Array2D[np.float32]) +assert_type(np.asarray(_py_i_2d, dtype="f4"), _Array2D[Any]) assert_type(np.asanyarray(A), npt.NDArray[np.float64]) assert_type(np.asanyarray(B), SubClass[np.float64]) From f6909f110da8aea0377b2674e75a5951fd5ffb3e Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 22:59:50 +0200 Subject: [PATCH 1577/1718] TYP: ``ascontiguousarray`` and ``asfortranarray`` preliminary shape-typing --- numpy/_core/multiarray.pyi | 46 +++++++++++++------ .../tests/data/reveal/array_constructors.pyi | 2 + 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 4f11354e2eae..718a78e0cc14 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -944,7 +944,7 @@ def asarray[ScalarT: np.generic]( ) -> NDArray[ScalarT]: ... @overload # array-like, dtype: def asarray[ScalarT: np.generic]( - a: ArrayLike, + a: object, dtype: _DTypeLike[ScalarT], order: _OrderKACF = None, *, @@ -954,7 +954,7 @@ def asarray[ScalarT: np.generic]( ) -> NDArray[ScalarT]: ... @overload # fallback def asarray( - a: ArrayLike, + a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None, *, @@ -963,6 +963,7 @@ def asarray( like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# @overload def asanyarray[ArrayT: np.ndarray]( a: ArrayT, # Preserve subclass-information @@ -1004,50 +1005,67 @@ def asanyarray( like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... +# +@overload +def ascontiguousarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload def ascontiguousarray[ScalarT: np.generic]( a: _ArrayLike[ScalarT], dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... @overload def ascontiguousarray[ScalarT: np.generic]( - a: Any, + a: object, dtype: _DTypeLike[ScalarT], *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... @overload def ascontiguousarray( - a: Any, - dtype: DTypeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# +@overload +def asfortranarray[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload def asfortranarray[ScalarT: np.generic]( a: _ArrayLike[ScalarT], dtype: None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... @overload def asfortranarray[ScalarT: np.generic]( - a: Any, + a: object, dtype: _DTypeLike[ScalarT], *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[ScalarT]: ... @overload def asfortranarray( - a: Any, - dtype: DTypeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, *, - like: _SupportsArrayFunc | None = ..., + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... +# def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... # `sep` is a de facto mandatory argument, as its default value is deprecated diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 04dfe94792b8..68954a60b7f2 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -111,12 +111,14 @@ assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(_f32_1d), _Array1D[np.float32]) assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.asfortranarray(_f32_1d), _Array1D[np.float32]) assert_type(np.fromstring("1 1 1", sep=" "), _Array1D[np.float64]) assert_type(np.fromstring(b"1 1 1", sep=" "), _Array1D[np.float64]) From 19484f0ae79581b1dbb00f8817ff2dd317505e64 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 3 Apr 2026 23:10:43 +0200 Subject: [PATCH 1578/1718] TYP: revert ``fromiter`` narrowed input type --- numpy/_core/multiarray.pyi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index b41780b77354..33bb826b5372 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1036,7 +1036,7 @@ def fromfile( # @overload # dtype= def fromiter[ScalarT: np.generic]( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: _DTypeLike[ScalarT], count: SupportsIndex = -1, *, @@ -1044,7 +1044,7 @@ def fromiter[ScalarT: np.generic]( ) -> _Array1D[ScalarT]: ... @overload # dtype=None def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: None, count: SupportsIndex = -1, *, @@ -1052,7 +1052,7 @@ def fromiter( ) -> _Array1D[np.float64]: ... @overload # dtype=bool def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: type[bool], count: SupportsIndex = -1, *, @@ -1060,7 +1060,7 @@ def fromiter( ) -> _Array1D[np.bool]: ... @overload # dtype=int def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: type[int], count: SupportsIndex = -1, *, @@ -1068,7 +1068,7 @@ def fromiter( ) -> _Array1D[np.int_ | Any]: ... @overload # dtype=float def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: type[float], count: SupportsIndex = -1, *, @@ -1076,7 +1076,7 @@ def fromiter( ) -> _Array1D[np.float64 | Any]: ... @overload # dtype=complex def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: type[complex], count: SupportsIndex = -1, *, @@ -1084,7 +1084,7 @@ def fromiter( ) -> _Array1D[np.complex128 | Any]: ... @overload # dtype= def fromiter( - iter: Iterable[_ScalarLike_co], + iter: Iterable[object], dtype: DTypeLike, count: SupportsIndex = -1, *, From 106ecff89a80130a587c3ac07ea5d4fcbc68428e Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Sat, 4 Apr 2026 05:10:01 +0200 Subject: [PATCH 1579/1718] MAINT: Replace %-formatting with f-strings in remaining modules (UP031) --- benchmarks/benchmarks/bench_linalg.py | 3 +- doc/source/conf.py | 6 ++-- numpy/ctypeslib/_ctypeslib.py | 4 +-- numpy/lib/tests/test_io.py | 6 ++-- numpy/linalg/_linalg.py | 12 +++---- numpy/linalg/lapack_lite/fortran.py | 4 +-- numpy/linalg/tests/test_linalg.py | 3 +- numpy/ma/core.py | 2 +- numpy/random/_examples/cffi/extending.py | 2 +- numpy/random/tests/test_generator_mt19937.py | 4 +-- numpy/random/tests/test_random.py | 3 +- numpy/random/tests/test_randomstate.py | 3 +- numpy/testing/_private/extbuild.py | 36 +++++++++----------- numpy/testing/_private/utils.py | 23 ++++++------- numpy/tests/test_ctypeslib.py | 2 +- ruff.toml | 1 - tools/c_coverage/c_coverage_report.py | 12 +++---- tools/check_installed_files.py | 4 +-- tools/refguide_check.py | 6 ++-- 19 files changed, 66 insertions(+), 70 deletions(-) diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 49a7ae84fde6..6c8f4b15e102 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -252,7 +252,8 @@ class MatmulStrided(Benchmark): def __init__(self): self.args_map = { - 'matmul_m%03d_p%03d_n%03d_bs%02d' % arg: arg for arg in self.args + f'matmul_m{arg[0]:03}_p{arg[1]:03}_n{arg[2]:03}_bs{arg[3]:02}': arg + for arg in self.args } self.params = [list(self.args_map.keys())] diff --git a/doc/source/conf.py b/doc/source/conf.py index a36cb6bdecb9..e4909c11bd63 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -583,7 +583,7 @@ def linkcode_resolve(domain, info): fn = relpath(fn, start=dirname(numpy.__file__)) if lineno: - linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) + linespec = f"#L{lineno}-L{lineno + len(source) - 1}" else: linespec = "" @@ -593,8 +593,8 @@ def linkcode_resolve(domain, info): if 'dev' in numpy.__version__: return f"https://github.com/numpy/numpy/blob/main/numpy/{fn}{linespec}" else: - return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % ( - numpy.__version__, fn, linespec) + return (f"https://github.com/numpy/numpy/blob/v{numpy.__version__}/" + f"numpy/{fn}{linespec}") from pygments.lexer import inherit diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index a18e11810418..2ac905f166fc 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -193,7 +193,7 @@ def from_param(cls, obj): raise TypeError(f"array must have data type {cls._dtype_}") if cls._ndim_ is not None \ and obj.ndim != cls._ndim_: - raise TypeError("array must have %d dimension(s)" % cls._ndim_) + raise TypeError(f"array must have {cls._ndim_} dimension(s)") if cls._shape_ is not None \ and obj.shape != cls._shape_: raise TypeError(f"array must have shape {str(cls._shape_)}") @@ -333,7 +333,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): else: name = dtype.str if ndim is not None: - name += "_%dd" % ndim + name += f"_{ndim}d" if shape is not None: name += "_" + "x".join(str(x) for x in shape) if flags is not None: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 0725da3041a7..3112b6aeff32 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1019,7 +1019,7 @@ def test_dtype_with_object(self): def test_uint64_type(self): tgt = (9223372043271415339, 9223372043271415853) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=np.uint64) assert_equal(res, tgt) @@ -1027,7 +1027,7 @@ def test_uint64_type(self): def test_int64_type(self): tgt = (-9223372036854775807, 9223372036854775807) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=np.int64) assert_equal(res, tgt) @@ -1069,7 +1069,7 @@ def test_default_float_converter_exception(self): def test_from_complex(self): tgt = (complex(1, 1), complex(1, -1)) c = TextIO() - c.write("%s %s" % tgt) + c.write(f'{tgt[0]} {tgt[1]}') c.seek(0) res = np.loadtxt(c, dtype=complex) assert_equal(res, tgt) diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index c0a42f302055..f73e658dd82a 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -239,22 +239,22 @@ def _to_native_byte_order(*arrays): def _assert_2d(*arrays): for a in arrays: if a.ndim != 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'two-dimensional') def _assert_stacked_2d(*arrays): for a in arrays: if a.ndim < 2: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'at least two-dimensional') def _assert_stacked_square(*arrays): for a in arrays: try: m, n = a.shape[-2:] except ValueError: - raise LinAlgError('%d-dimensional array given. Array must be ' - 'at least two-dimensional' % a.ndim) + raise LinAlgError(f'{a.ndim}-dimensional array given. Array must be ' + 'at least two-dimensional') if m != n: raise LinAlgError('Last 2 dimensions of the array must be square') diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py index 22eb666ef26f..6cf26f2adf2d 100644 --- a/numpy/linalg/lapack_lite/fortran.py +++ b/numpy/linalg/lapack_lite/fortran.py @@ -104,8 +104,8 @@ def fortranSourceLines(fo): break yield numberingiter.lineno, ''.join(lines) else: - raise ValueError("jammed: continuation line not expected: %s:%d" % - (fo.name, numberingiter.lineno)) + raise ValueError("jammed: continuation line not expected: " + f"{fo.name}:{numberingiter.lineno}") def getDependencies(filename): """For a Fortran source file, return a list of routines declared as EXTERNAL diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index cd93acaf79c0..f576b9ef0df4 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -329,7 +329,8 @@ def _stride_comb_iter(x): xi[...] = x xi = xi.view(x.__class__) assert_(np.all(xi == x)) - yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + parts = [f"{j:+}" for j in repeats] + yield xi, "stride_" + "_".join(parts) # generate also zero strides if possible if x.ndim >= 1 and x.shape[-1] == 1: diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 28200da59675..f384770149b2 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -1989,7 +1989,7 @@ def masked_where(condition, a, copy=True): (cshape, ashape) = (cond.shape, a.shape) if cshape and cshape != ashape: raise IndexError("Inconsistent shape between the condition and the input" - " (got %s and %s)" % (cshape, ashape)) + f" (got {cshape} and {ashape})") if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py index ad4c9acbdceb..08cf50491cc8 100644 --- a/numpy/random/_examples/cffi/extending.py +++ b/numpy/random/_examples/cffi/extending.py @@ -32,7 +32,7 @@ interface = rng.bit_generator.cffi n = 100 -vals_cffi = ffi.new('double[%d]' % n) +vals_cffi = ffi.new(f'double[{n}]') lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) # reset the state diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 2f066fb725b9..13fa26d443b9 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -462,7 +462,7 @@ def test_full_range(self, endpoint): except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " - "message:\n\n%s" % str(e)) + f"message:\n\n{e}") def test_full_range_array(self, endpoint): # Test for ticket #1690 @@ -477,7 +477,7 @@ def test_full_range_array(self, endpoint): except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " - "message:\n\n%s" % str(e)) + f"message:\n\n{e}") def test_in_bounds_fuzz(self, endpoint): # Don't use fixed seed diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index f110aa892b31..f9ea843e12b5 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -210,8 +210,7 @@ def test_full_range(self): rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) + f"but one was with the following message:\n\n{e}") def test_in_bounds_fuzz(self): # Don't use fixed seed diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 63ffb5a86389..1ff148b53a75 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -333,8 +333,7 @@ def test_full_range(self): rng.randint(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " - "but one was with the following " - "message:\n\n%s" % str(e)) + f"but one was with the following message:\n\n{e}") def test_in_bounds_fuzz(self): # Don't use fixed seed diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 2a724b73cfc3..25b691cf07cb 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -139,8 +139,7 @@ def _make_methods(functions, modname): signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' else: signature = '(PyObject *self, PyObject *args)' - methods_table.append( - "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + methods_table.append(f'{{"{funcname}", (PyCFunction){cfuncname}, {flags}}},') func_code = f""" static PyObject* {cfuncname}{signature} {{ @@ -149,37 +148,36 @@ def _make_methods(functions, modname): """ codes.append(func_code) - body = "\n".join(codes) + """ - static PyMethodDef methods[] = { - %(methods)s - { NULL } - }; - static struct PyModuleDef moduledef = { + methods_str = '\n'.join(methods_table) + body = "\n".join(codes) + f""" + static PyMethodDef methods[] = {{ + {methods_str} + {{ NULL }} + }}; + static struct PyModuleDef moduledef = {{ PyModuleDef_HEAD_INIT, - "%(modname)s", /* m_name */ + "{modname}", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ methods, /* m_methods */ - }; - """ % {'methods': '\n'.join(methods_table), 'modname': modname} + }}; + """ return body def _make_source(name, init, body): """ Combines the code fragments into source code ready to be compiled """ - code = """ + code = f""" #include - %(body)s + {body} PyMODINIT_FUNC - PyInit_%(name)s(void) { - %(init)s - } - """ % { - 'name': name, 'init': init, 'body': body, - } + PyInit_{name}(void) {{ + {init} + }} + """ return code diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 1833f5b57cb4..6b9bf6bca5eb 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -589,7 +589,7 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): usecomplex = False def _build_err_msg(): - header = ('Arrays are not almost equal to %d decimals' % decimal) + header = (f'Arrays are not almost equal to {decimal} decimals') return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header) @@ -712,7 +712,7 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', sc_actual = 0.0 msg = build_err_msg( [actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % significant, + header=f'Items are not equal to {significant} significant digits:', verbose=verbose) try: # If one of desired/actual is not finite, handle it specially here: @@ -784,8 +784,8 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): if robust_any_difference(x_id, y_id): msg = build_err_msg( [x, y], - err_msg + '\n%s location mismatch:' - % (hasval), verbose=verbose, header=header, + err_msg + f'\n{hasval} location mismatch:', + verbose=verbose, header=header, names=names, precision=precision) raise AssertionError(msg) @@ -1222,9 +1222,8 @@ def compare(x, y): return z < 1.5 * 10.0**(-decimal) - assert_array_compare(compare, actual, desired, err_msg=err_msg, - verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal), + assert_array_compare(compare, actual, desired, err_msg=err_msg, verbose=verbose, + header=(f'Arrays are not almost equal to {decimal} decimals'), precision=decimal) @@ -1465,7 +1464,8 @@ def rundocs(filename=None, raise_on_error=True): runner.run(test, out=out) if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + err_msg = '\n'.join(msg) + raise AssertionError(f"Some doctests failed:\n{err_msg}") def check_support_sve(__cache=[]): @@ -1570,7 +1570,7 @@ def decorate_methods(cls, decorator, testmatch=None): """ if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + testmatch = re.compile(rf'(?:^|[\\b_\\.{os.sep}-])[Tt]est') else: testmatch = re.compile(testmatch) cls_attr = cls.__dict__ @@ -1887,9 +1887,8 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): import numpy as np ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g " - "ULP (max difference is %g ULP)" % - (maxulp, np.max(ret))) + raise AssertionError(f"Arrays are not almost equal up to {maxulp:g} " + f"ULP (max difference is {np.max(ret):g} ULP)") return ret diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 31e2268d0957..2458c393db04 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -65,7 +65,7 @@ def test_basic2(self): np._core._multiarray_umath.__file__) except ImportError as e: msg = ("ctypes is not available on this python: skipping the test" - " (import error was: %s)" % str(e)) + f" (import error was: {e})") print(msg) diff --git a/ruff.toml b/ruff.toml index f204c77545c0..f0f34c2e77dd 100644 --- a/ruff.toml +++ b/ruff.toml @@ -74,7 +74,6 @@ ignore = [ "F841", # Local variable is assigned to but never used # pyupgrade "UP015" , # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index d188b3280e95..76846c22f564 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -32,9 +32,9 @@ def wrap(self, source, outfile): for i, (c, t) in enumerate(HtmlFormatter.wrap(self, source, outfile)): as_functions = self.lines.get(i - 1, None) if as_functions is not None: - yield 0, ('
[%2d]' % - (quoteattr('as ' + ', '.join(as_functions)), - len(as_functions))) + title = quoteattr('as ' + ', '.join(as_functions)) + count = len(as_functions) + yield 0, f'
[{count:2}]' else: yield 0, ' ' yield c, t @@ -107,9 +107,9 @@ def write_html(self, root): fd.write("") paths = sorted(self.files.keys()) for path in paths: - fd.write('

%s

' % - (self.clean_path(path), - escape(path[len(self.prefix):]))) + href = self.clean_path(path) + label = escape(path[len(self.prefix):]) + fd.write(f'

{label}

') fd.write("") diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index fd66b68a43fc..9f2fc0fc0ad3 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -46,8 +46,8 @@ def main(install_dir, tests_check): if tests_check == "--no-tests": if len(installed_test_files) > 0: - raise Exception("Test files aren't expected to be installed in %s" - ", found %s" % (INSTALLED_DIR, installed_test_files)) + raise Exception("Test files aren't expected to be installed in " + f"{INSTALLED_DIR}, found {installed_test_files}") print("----------- No test files were installed --------------") else: # Check test files detected in repo are installed diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 486789414373..6b79ba037e2c 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -460,7 +460,7 @@ def resolve(name, is_label=False): if not success: output += " " + "-" * 72 + "\n" for lineno, line in enumerate(text.splitlines()): - output += " %-4d %s\n" % (lineno + 1, line) + output += f" {lineno + 1:<4} {line}\n" output += " " + "-" * 72 + "\n\n" if dots: @@ -522,8 +522,8 @@ def check_rest(module, names, dots=True): m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: - msg = ("Docstring contains a non-printable character %r! " - "Maybe forgot r\"\"\"?" % (m.group(1),)) + msg = (f"Docstring contains a non-printable character {m.group(1)!r}! " + "Maybe forgot r\"\"\"?") results.append((full_name, False, msg)) continue From 88052204859eb0e533d5d65802c0287d866f80bf Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sat, 4 Apr 2026 16:17:36 +1100 Subject: [PATCH 1580/1718] CI: pin pkgconf --- .github/workflows/windows.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 032beb8ff815..028cc221ecd8 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -43,7 +43,7 @@ jobs: - name: Install pkg-config run: | - python -m pip install pkgconf + python -m pip install pkgconf==2.5.1.post1 echo "PKG_CONFIG_PATH=${{ github.workspace }}/.openblas" >> $env:GITHUB_ENV - name: Install NumPy (Clang-cl) @@ -153,7 +153,7 @@ jobs: - name: pkg-config run: | - python -m pip install pkgconf + python -m pip install pkgconf==2.5.1.post1 - name: Dependencies run: | From 873f5025bd9e06a52178ebb058216f8a872fe62a Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 4 Apr 2026 12:38:50 +0200 Subject: [PATCH 1581/1718] TYP: ``dot`` shape-typing --- numpy/_core/multiarray.pyi | 59 ++++++++++++++++--- numpy/typing/tests/data/reveal/multiarray.pyi | 44 ++++++++++++-- 2 files changed, 91 insertions(+), 12 deletions(-) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index cfb48d38b6a4..d0844a245590 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -19,7 +19,7 @@ from typing import ( from typing_extensions import CapsuleType, TypeVar import numpy as np -from numpy import ( # type: ignore[attr-defined] # Python >=3.12 +from numpy import ( _CastingKind, _CopyMode, _ModeKind, @@ -218,6 +218,7 @@ type _RollKind = L[ # `raise` is deliberately excluded type _ArangeScalar = np.integer | np.floating | np.datetime64 | np.timedelta64 type _InnerScalar = np.number | np.bool | np.timedelta64 +type _DotScalar = np.number | np.bool # The datetime functions perform unsafe casts to `datetime64[D]`, # so a lot of different argument types are allowed here @@ -712,6 +713,56 @@ def inner[ScalarT: _InnerScalar | np.object_](a: _ToArray2D[ScalarT], b: _Array2 @overload # fallback def inner(a: ArrayLike, b: ArrayLike, /) -> Any: ... +# keep in sync with `ma.core.dot` +@overload # (?d _, Nd _) -> 0d|Nd _ (workaround) +def dot(a: _ArrayJustND[_DotScalar | np.object_], b: _ArrayLike[_DotScalar | np.object_], out: None = None) -> Any: ... +@overload # (Nd _, ?d _) -> 0d|Nd _ (workaround) +def dot(a: _ArrayLike[_DotScalar | np.object_], b: _ArrayJustND[_DotScalar | np.object_], out: None = None) -> Any: ... +@overload # (1d T, 1d T) -> 0d T +def dot[ScalarT: _DotScalar](a: _ToArray1D[ScalarT], b: _ToArray1D[ScalarT], out: None = None) -> ScalarT: ... +@overload # (1d object_, 1d _) -> 0d object +def dot(a: _Array1D[np.object_], b: _Array1D[np.object_] | _ToArray1D[_DotScalar], out: None = None) -> Any: ... +@overload # (1d _, 1d object_) -> 0d object +def dot(a: _ToArray1D[_DotScalar], b: _Array1D[np.object_], out: None = None) -> Any: ... +@overload # (1d bool, 1d bool) -> bool_ +def dot(a: Sequence[bool], b: Sequence[bool], out: None = None) -> np.bool: ... +@overload # (1d ~int, 1d +int) -> int_ +def dot(a: list[int], b: Sequence[int], out: None = None) -> np.int_: ... +@overload # (1d +int, 1d ~int) -> int_ +def dot(a: Sequence[int], b: list[int], out: None = None) -> np.int_: ... +@overload # (1d ~float, 1d +float) -> float64 +def dot(a: list[float], b: Sequence[float], out: None = None) -> np.float64: ... +@overload # (1d +float, 1d ~float) -> float64 +def dot(a: Sequence[float], b: list[float], out: None = None) -> np.float64: ... +@overload # (1d ~complex, 1d +complex) -> complex128 +def dot(a: list[complex], b: Sequence[complex], out: None = None) -> np.complex128: ... +@overload # (1d +complex, 1d ~complex) -> complex128 +def dot(a: Sequence[complex], b: list[complex], out: None = None) -> np.complex128: ... +@overload # (1d T, 2d T) -> 1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray1D[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> _Array1D[ScalarT]: ... +@overload # (2d T, 1d T) -> 1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ToArray1D[ScalarT], out: None = None +) -> _Array1D[ScalarT]: ... +@overload # (2d T, 2d T) -> 2d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> _Array2D[ScalarT]: ... +@overload # (2d T, ?d T) -> >=1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ToArray2D[ScalarT], b: _ArrayLike[ScalarT], out: None = None +) -> NDArray[ScalarT]: ... +@overload # (?d T, 2d T) -> >=1d T +def dot[ScalarT: _DotScalar | np.object_]( + a: _ArrayLike[ScalarT], b: _ToArray2D[ScalarT], out: None = None +) -> NDArray[ScalarT]: ... +@overload +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... +@overload +def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... + # keep in sync with `ma.core.where` @overload def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... @@ -725,12 +776,6 @@ def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind def min_scalar_type(a: ArrayLike, /) -> dtype: ... def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... -# keep in sync with `ma.core.dot` -@overload -def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... -@overload -def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... - @overload def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... @overload diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 1dd3a662314a..f3d7c0749fc7 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -114,6 +114,45 @@ assert_type(np.inner(AR_c16, AR_c16), npt.NDArray[np.complex128] | Any) # +assert_type(np.dot(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.dot(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.dot(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_1d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_1d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_2d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_nd), Any) + +assert_type(np.dot(AR_O_1d, AR_O_1d), Any) +assert_type(np.dot(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_1d, AR_O_nd), Any) +assert_type(np.dot(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_nd), Any) +assert_type(np.dot(AR_O_nd, AR_O_1d), Any) +assert_type(np.dot(AR_O_nd, AR_O_2d), Any) +assert_type(np.dot(AR_O_nd, AR_O_nd), Any) + +# + assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) @@ -130,11 +169,6 @@ assert_type(np.result_type(int, [1]), np.dtype) assert_type(np.result_type(AR_f8, AR_u1), np.dtype) assert_type(np.result_type(AR_f8, np.complex128), np.dtype) -assert_type(np.dot(AR_LIKE_f, AR_i8), Any) -assert_type(np.dot(AR_u1, 1), Any) -assert_type(np.dot(1.5j, 1), Any) -assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) - assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) assert_type(np.vdot(AR_u1, 1), np.signedinteger) assert_type(np.vdot(1.5j, 1), np.complexfloating) From 025cf8dab69b3749f1146e1be44c3bc96039295b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 4 Apr 2026 16:54:23 +0200 Subject: [PATCH 1582/1718] TYP: deprecate ``timedelta64`` unitless arithmetic --- numpy/__init__.pyi | 78 ++++++++++++++----- numpy/typing/tests/data/pass/arithmetic.py | 12 +-- numpy/typing/tests/data/pass/comparisons.py | 8 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 20 ++--- .../typing/tests/data/reveal/comparisons.pyi | 12 +-- 5 files changed, 86 insertions(+), 44 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 075b2f4a0c98..fa6a89c51b51 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5464,9 +5464,11 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] # @overload - def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + def __add__(self: timedelta64[Never], x: timedelta64[int | dt.timedelta], /) -> timedelta64: ... + @overload + @overload + def __add__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... @overload def __add__(self: timedelta64[int | dt.timedelta], x: timedelta64[Never], /) -> timedelta64: ... @overload @@ -5481,15 +5483,20 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] ) -> AnyDateOrTimeT: ... @overload def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( - self: timedelta64[AnyItemT], x: timedelta64[AnyItemT] | _IntLike_co, / + self: timedelta64[AnyItemT], x: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... + @overload + @deprecated("Adding bare integers to NumPy timedelta is deprecated, and will raise an error in the future.") + def __add__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], x: _IntLike_co, / ) -> timedelta64[AnyItemT]: ... __radd__ = __add__ # @overload - def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + def __sub__(self: timedelta64[Never], b: timedelta64[int | dt.timedelta], /) -> timedelta64: ... @overload - def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + def __sub__(self: timedelta64[None], b: timedelta64, /) -> timedelta64[None]: ... @overload def __sub__(self: timedelta64[int | dt.timedelta], b: timedelta64[Never], /) -> timedelta64: ... @overload @@ -5502,15 +5509,20 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... @overload def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( - self: timedelta64[AnyItemT], b: timedelta64[AnyItemT] | _IntLike_co, / + self: timedelta64[AnyItemT], b: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... + @overload + @deprecated("Subtracting bare integers from NumPy timedelta is deprecated, and will raise an error in the future.") + def __sub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], b: _IntLike_co, / ) -> timedelta64[AnyItemT]: ... # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. # This confuses mypy, so we ignore the [misc] errors it reports. @overload - def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta] | _IntLike_co, /) -> timedelta64: ... + def __rsub__(self: timedelta64[Never], a: timedelta64[int | dt.timedelta], /) -> timedelta64: ... @overload - def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + def __rsub__(self: timedelta64[None], a: timedelta64, /) -> timedelta64[None]: ... @overload def __rsub__[AnyDateT: (dt.datetime, dt.date)](self: timedelta64[dt.timedelta], a: AnyDateT, /) -> AnyDateT: ... @overload @@ -5518,8 +5530,13 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] self: timedelta64[dt.timedelta], a: timedelta64[AnyItemT], / ) -> timedelta64[AnyItemT]: ... @overload - def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( # type: ignore[misc] - self: timedelta64[AnyItemT], a: timedelta64[AnyItemT] | _IntLike_co, / + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], a: timedelta64[AnyItemT], / + ) -> timedelta64[AnyItemT]: ... + @overload + @deprecated("Subtracting NumPy timedelta from bare integers is deprecated, and will raise an error in the future.") + def __rsub__[AnyItemT: (dt.timedelta, int, None, _TD64Item)]( + self: timedelta64[AnyItemT], a: _IntLike_co, / ) -> timedelta64[AnyItemT]: ... @overload def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] @@ -5635,31 +5652,56 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + # these mypy `has_type` errors appear to be false positives + @overload + def __lt__(self, other: timedelta64, /) -> bool_: ... # type: ignore[has-type] + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __lt__(self, other: _IntLike_co, /) -> bool_: ... @overload - def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __lt__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... @overload - def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + def __lt__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... @overload def __lt__(self, other: _SupportsGT, /) -> bool_: ... @overload - def __le__(self, other: _TD64Like_co, /) -> bool_: ... + def __le__(self, other: timedelta64, /) -> bool_: ... # type: ignore[has-type] + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __le__(self, other: _IntLike_co, /) -> bool_: ... + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __le__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... @overload - def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + def __le__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... @overload def __le__(self, other: _SupportsGE, /) -> bool_: ... @overload - def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + def __gt__(self, other: timedelta64, /) -> bool_: ... @overload - def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __gt__(self, other: _IntLike_co, /) -> bool_: ... + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __gt__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... @overload def __gt__(self, other: _SupportsLT, /) -> bool_: ... @overload - def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + def __ge__(self, other: timedelta64, /) -> bool_: ... + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __ge__(self, other: _IntLike_co, /) -> bool_: ... + @overload + @deprecated("Comparing NumPy timedelta with bare integers is deprecated, and will raise an error in the future.") + def __ge__(self, other: _ArrayLikeInt_co, /) -> NDArray[bool_]: ... @overload - def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + def __ge__(self, other: _ArrayLike[timedelta64] | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index e347ec096e21..4d5a7ab57933 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -392,13 +392,13 @@ def __rpow__(self, value: Any) -> Object: dt - i8 td + td -td + i -td + i4 -td + i8 +td + i # type: ignore[deprecated] +td + i4 # type: ignore[deprecated] +td + i8 # type: ignore[deprecated] td - td -td - i -td - i4 -td - i8 +td - i # type: ignore[deprecated] +td - i4 # type: ignore[deprecated] +td - i8 # type: ignore[deprecated] td / f td / f4 td / f8 diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index 0782eb392f4d..47507079b43d 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -99,11 +99,11 @@ dt > dt td > td -td > i -td > i4 -td > i8 +td > i # type: ignore[deprecated] +td > i4 # type: ignore[deprecated] +td > i8 # type: ignore[deprecated] td > AR_i -td > SEQ +td > SEQ # type: ignore[deprecated] # boolean diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 68fa5b5230a6..7170325d182c 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -419,11 +419,11 @@ assert_type(M8_none - M8, np.timedelta64[None]) # type: ignore[assert-type] assert_type(M8_none - m8, np.datetime64[None]) # type: ignore[assert-type] assert_type(m8 + m8, np.timedelta64) -assert_type(m8 + i, np.timedelta64) -assert_type(m8 + i8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) # type: ignore[deprecated] +assert_type(m8 + i8, np.timedelta64) # type: ignore[deprecated] assert_type(m8 - m8, np.timedelta64) -assert_type(m8 - i, np.timedelta64) -assert_type(m8 - i8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) # type: ignore[deprecated] +assert_type(m8 - i8, np.timedelta64) # type: ignore[deprecated] assert_type(m8 * f, np.timedelta64) assert_type(m8 * f4, np.timedelta64) assert_type(m8 * np.True_, np.timedelta64) @@ -436,15 +436,15 @@ assert_type(m8 % m8, np.timedelta64) assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) # type: ignore[assert-type] assert_type(m8_none + m8, np.timedelta64[None]) -assert_type(m8_none + i, np.timedelta64[None]) -assert_type(m8_none + i8, np.timedelta64[None]) -assert_type(m8_none - i, np.timedelta64[None]) -assert_type(m8_none - i8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none + i8, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none - i, np.timedelta64[None]) # type: ignore[deprecated] +assert_type(m8_none - i8, np.timedelta64[None]) # type: ignore[deprecated] -assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + i, np.timedelta64[int]) # type: ignore[deprecated] assert_type(m8_int + m8_delta, np.timedelta64[int]) assert_type(m8_int + m8, np.timedelta64) -assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - i, np.timedelta64[int]) # type: ignore[deprecated] assert_type(m8_int - m8_delta, np.timedelta64[int]) assert_type(m8_int - m8_int, np.timedelta64[int]) assert_type(m8_int - m8_none, np.timedelta64[None]) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 6df5a3d94314..4d359929fd00 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -42,15 +42,15 @@ assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) assert_type(dt > dt, np.bool) assert_type(td > td, np.bool) -assert_type(td > i, np.bool) -assert_type(td > i4, np.bool) -assert_type(td > i8, np.bool) +assert_type(td > i, np.bool) # type: ignore[deprecated] +assert_type(td > i4, np.bool) # type: ignore[deprecated] +assert_type(td > i8, np.bool) # type: ignore[deprecated] -assert_type(td > AR, npt.NDArray[np.bool]) -assert_type(td > SEQ, npt.NDArray[np.bool]) +assert_type(td > AR, npt.NDArray[np.bool]) # type: ignore[deprecated] +assert_type(td > SEQ, npt.NDArray[np.bool]) # type: ignore[deprecated] assert_type(AR > SEQ, npt.NDArray[np.bool]) assert_type(AR > td, npt.NDArray[np.bool]) -assert_type(SEQ > td, npt.NDArray[np.bool]) +assert_type(SEQ > td, npt.NDArray[np.bool]) # type: ignore[deprecated] assert_type(SEQ > AR, npt.NDArray[np.bool]) # boolean From ca2823345a1535e6c0161417c08dcdd4a13baa8d Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 4 Apr 2026 18:08:55 +0200 Subject: [PATCH 1583/1718] TYP: ``atleast_{1,2,3}d`` shape-typing --- numpy/_core/shape_base.pyi | 23 +++++++++++++++++++ .../tests/data/reveal/array_constructors.pyi | 17 ++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index b41602ae8d47..9e4ac855c557 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -16,8 +16,19 @@ __all__ = [ "vstack", ] +type _Array0D[ScalarT: np.generic] = np.ndarray[tuple[()], np.dtype[ScalarT]] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] + # keep in sync with `numpy.ma.extras.atleast_1d` @overload +def atleast_1d[ArrayT: _Array1D[Any] | _Array2D[Any] | _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_1d[ScalarT: np.generic](a0: _Array0D[ScalarT], /) -> _Array1D[ScalarT]: ... +@overload +def atleast_1d[ScalarT: np.generic](a0: ScalarT, /) -> _Array1D[ScalarT]: ... +@overload def atleast_1d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def atleast_1d[ScalarT1: np.generic, ScalarT2: np.generic]( @@ -36,6 +47,12 @@ def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_2d` @overload +def atleast_2d[ArrayT: _Array2D[Any] | _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_2d[ScalarT: np.generic](a0: _Array0D[ScalarT] | _Array1D[ScalarT], /) -> _Array2D[ScalarT]: ... +@overload +def atleast_2d[ScalarT: np.generic](a0: ScalarT, /) -> _Array2D[ScalarT]: ... +@overload def atleast_2d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def atleast_2d[ScalarT1: np.generic, ScalarT2: np.generic]( @@ -54,6 +71,12 @@ def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray # keep in sync with `numpy.ma.extras.atleast_3d` @overload +def atleast_3d[ArrayT: _Array3D[Any]](a0: ArrayT, /) -> ArrayT: ... +@overload +def atleast_3d[ScalarT: np.generic](a0: _Array0D[ScalarT] | _Array1D[ScalarT] | _Array2D[ScalarT], /) -> _Array3D[ScalarT]: ... +@overload +def atleast_3d[ScalarT: np.generic](a0: ScalarT, /) -> _Array3D[ScalarT]: ... +@overload def atleast_3d[ScalarT: np.generic](a0: _ArrayLike[ScalarT], /) -> NDArray[ScalarT]: ... @overload def atleast_3d[ScalarT1: np.generic, ScalarT2: np.generic]( diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 68954a60b7f2..910755ab0877 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -8,6 +8,7 @@ from numpy._typing import _AnyShape type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] class SubClass[ScalarT: np.generic](np.ndarray[_AnyShape, np.dtype[ScalarT]]): ... @@ -22,7 +23,11 @@ C: list[int] D: SubClass[np.float64 | np.int64] E: IntoSubClass[np.float64 | np.int64] +_f32_0d: np.float32 _f32_1d: _Array1D[np.float32] +_f32_2d: _Array2D[np.float32] +_f32_3d: _Array3D[np.float32] + _py_b_1d: list[bool] _py_b_2d: list[list[bool]] _py_i_1d: list[int] @@ -270,6 +275,10 @@ assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtyp assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.atleast_1d(_f32_0d), _Array1D[np.float32]) +assert_type(np.atleast_1d(_f32_1d), _Array1D[np.float32]) +assert_type(np.atleast_1d(_f32_2d), _Array2D[np.float32]) +assert_type(np.atleast_1d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) @@ -278,10 +287,18 @@ assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_2d(_f32_0d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_1d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_2d), _Array2D[np.float32]) +assert_type(np.atleast_2d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_3d(_f32_0d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_1d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_2d), _Array3D[np.float32]) +assert_type(np.atleast_3d(_f32_3d), _Array3D[np.float32]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) From b52dc9616e8c26b759d1f6d2b52fbffbf73b2b8b Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 4 Apr 2026 18:54:43 +0200 Subject: [PATCH 1584/1718] TYP: ``fromregex`` shape-typing --- numpy/lib/_npyio_impl.pyi | 6 ++++-- numpy/typing/tests/data/reveal/npyio.pyi | 12 +++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index efe7c0886719..07da3cd357c1 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -48,6 +48,8 @@ type _FNameRead = StrPath | SupportsRead[str] | SupportsRead[bytes] type _FNameWriteBytes = StrPath | SupportsWrite[bytes] type _FNameWrite = _FNameWriteBytes | SupportsWrite[str] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + @type_check_only class _SupportsReadSeek[T](SupportsRead[T], Protocol): def seek(self, offset: int, whence: int, /) -> object: ... @@ -186,14 +188,14 @@ def fromregex[ScalarT: np.generic]( regexp: str | bytes | Pattern[Any], dtype: _DTypeLike[ScalarT], encoding: str | None = None, -) -> NDArray[ScalarT]: ... +) -> _Array1D[ScalarT]: ... @overload def fromregex( file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: DTypeLike | None, encoding: str | None = None, -) -> NDArray[Any]: ... +) -> _Array1D[Any]: ... @overload def genfromtxt( diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index e3eaa45a5fa1..d230f5a3d640 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -18,6 +18,8 @@ npz_file: np.lib.npyio.NpzFile AR_i8: npt.NDArray[np.int64] AR_LIKE_f8: list[float] +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] + class BytesWriter: def write(self, data: bytes) -> None: ... @@ -68,11 +70,11 @@ assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) -assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) -assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) -assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) -assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_file, "test", np.float64), _Array1D[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), _Array1D[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), _Array1D[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), _Array1D[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), _Array1D[np.float64]) assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) From f05310478b07064ac9ecc7d715294215222877b2 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sat, 4 Apr 2026 19:33:44 +0200 Subject: [PATCH 1585/1718] TYP: ``nonzero`` and single-arg ``where`` shape-typing (#31143) --- numpy/__init__.pyi | 16 ++++++- numpy/_core/fromnumeric.pyi | 20 +++++++- numpy/_core/multiarray.pyi | 21 ++++++-- numpy/typing/tests/data/fail/fromnumeric.pyi | 2 +- .../typing/tests/data/reveal/fromnumeric.pyi | 11 +++-- numpy/typing/tests/data/reveal/ma.pyi | 3 +- numpy/typing/tests/data/reveal/multiarray.pyi | 48 ++++++++++++++++++- .../typing/tests/data/reveal/ndarray_misc.pyi | 7 ++- 8 files changed, 112 insertions(+), 16 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fa6a89c51b51..2705793a5115 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -739,7 +739,10 @@ type _Truthy = L[True, 1] | bool_[L[True]] type _1D = tuple[int] type _2D = tuple[int, int] +type _3D = tuple[int, int, int] + type _2Tuple[T] = tuple[T, T] +type _3Tuple[T] = tuple[T, T, T] type _ArrayUInt_co = NDArray[unsignedinteger | bool_] type _ArrayInt_co = NDArray[integer | bool_] @@ -2414,8 +2417,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def dot[ArrayT: ndarray](self, b: ArrayLike, /, out: ArrayT) -> ArrayT: ... - # `nonzero()` raises for 0d arrays/generics - def nonzero(self) -> tuple[ndarray[tuple[int], _dtype[intp]], ...]: ... + # keep in sync with `_core.fromnumeric.nonzero` + @overload # ?d (workaround) + def nonzero(self: ndarray[tuple[Never, Never, Never, Never]]) -> tuple[ndarray[_1D, _dtype[intp]], ...]: ... + @overload # 1d + def nonzero(self: ndarray[_1D]) -> tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 2d + def nonzero(self: ndarray[_2D]) -> _2Tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 3d + def nonzero(self: ndarray[_3D]) -> _3Tuple[ndarray[_1D, _dtype[intp]]]: ... + @overload # 3d + def nonzero(self) -> tuple[ndarray[_1D, _dtype[intp]], ...]: ... @overload def searchsorted( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 6a42651fa87d..23d61b97500e 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -129,6 +129,14 @@ type _3D = tuple[int, int, int] type _4D = tuple[int, int, int, int] type _Array1D[ScalarT: np.generic] = np.ndarray[_1D, np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[_2D, np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[_3D, np.dtype[ScalarT]] +# workaround for mypy's and pyright's typing spec non-compliance regarding overloads +type _ArrayJustND[ScalarT: np.generic] = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] +type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] ### @@ -658,7 +666,17 @@ def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Ar @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[_1D]: ... -def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... +# keep in sync with the 1-arg overloads of `_core.multiarray.where` +@overload # ?d (workaround) +def nonzero(a: _ArrayJustND[Any]) -> tuple[_Array1D[np.intp], ...]: ... +@overload # 1d +def nonzero(a: _ToArray1D[Any]) -> tuple[_Array1D[np.intp]]: ... +@overload # 2d +def nonzero(a: _ToArray2D[Any]) -> tuple[_Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # 3d +def nonzero(a: _ToArray3D[Any]) -> tuple[_Array1D[np.intp], _Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # Nd (fallback) +def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... # this prevents `Any` from being returned with Pyright @overload diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index d0844a245590..226859eef5fe 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -185,11 +185,13 @@ _ArrayT_co = TypeVar("_ArrayT_co", bound=np.ndarray, default=np.ndarray, covaria type _Array[ShapeT: _Shape, ScalarT: np.generic] = ndarray[ShapeT, dtype[ScalarT]] type _Array1D[ScalarT: np.generic] = ndarray[tuple[int], dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = ndarray[tuple[int, int], dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = ndarray[tuple[int, int, int], dtype[ScalarT]] # workaround for mypy's and pyright's typing spec non-compliance regarding overloads -type _ArrayJustND[ScalarT: np.generic] = ndarray[tuple[Never, Never, Never], dtype[ScalarT]] +type _ArrayJustND[ScalarT: np.generic] = ndarray[tuple[Never, Never, Never, Never], dtype[ScalarT]] type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] +type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] # Valid time units type _UnitKind = L[ @@ -763,12 +765,23 @@ def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... @overload def dot[OutT: np.ndarray](a: ArrayLike, b: ArrayLike, out: OutT) -> OutT: ... -# keep in sync with `ma.core.where` -@overload -def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... +# keep in sync with `ma.core.where` and the 1-arg overloads with `_core.fromnumeric.nonzerp` +@overload # (?d) (workaround) +def where(condition: _ArrayJustND[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], ...]: ... +@overload # (1d) +def where(condition: _ToArray1D[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp]]: ... +@overload # (2d) +def where(condition: _ToArray2D[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # (3d) +def where( + condition: _ToArray3D[Any], x: None = None, y: None = None, / +) -> tuple[_Array1D[np.intp], _Array1D[np.intp], _Array1D[np.intp]]: ... +@overload # (Nd) (fallback) +def where(condition: _ArrayLike[Any], x: None = None, y: None = None, /) -> tuple[_Array1D[np.intp], ...]: ... @overload def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... +# def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 7f98ab4602c2..5655ede23e6e 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -75,7 +75,7 @@ np.trace(A, axis2=[]) # type: ignore[call-overload] np.ravel(a, order="bob") # type: ignore[call-overload] -np.nonzero(0) # type: ignore[arg-type] +np.nonzero(0) # type: ignore[call-overload] np.compress([True], A, axis=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 042690fb4367..2048ace97d46 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -11,6 +11,7 @@ AR_b: npt.NDArray[np.bool] AR_f4: npt.NDArray[np.float32] AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] AR_c16: npt.NDArray[np.complex128] AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] @@ -155,10 +156,12 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) -assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(np.nonzero(AR_f4), tuple[_Int1D, ...]) +assert_type(np.nonzero(AR_f4_1d), tuple[_Int1D]) +assert_type(np.nonzero(AR_f4_2d), tuple[_Int1D, _Int1D]) +assert_type(np.nonzero(AR_f4_3d), tuple[_Int1D, _Int1D, _Int1D]) assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 944983b97fa1..18bd3acd916d 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -424,8 +424,7 @@ assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) -assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], _Array1D[np.intp]]) assert_type(MAR_f8.trace(), Any) assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index f3d7c0749fc7..2c8bab482ff1 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -11,6 +11,7 @@ subclass: SubClass[np.float64] AR_f4_nd: npt.NDArray[np.float32] AR_f4_1d: np.ndarray[tuple[int], np.dtype[np.float32]] AR_f4_2d: np.ndarray[tuple[int, int], np.dtype[np.float32]] +AR_f4_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float32]] AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] AR_i8: npt.NDArray[np.int64] @@ -153,7 +154,52 @@ assert_type(np.dot(AR_O_nd, AR_O_nd), Any) # -assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_b), np.bool) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_b, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_b), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_i), np.int_) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_i, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_b), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_i), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_f), np.float64) +assert_type(np.dot(AR_LIKE_f, AR_LIKE_c), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_b), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_i), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_f), np.complex128) +assert_type(np.dot(AR_LIKE_c, AR_LIKE_c), np.complex128) + +assert_type(np.dot(AR_f4_1d, AR_f4_1d), np.float32) +assert_type(np.dot(AR_f4_1d, AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_1d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_2d, AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.dot(AR_f4_2d, AR_f4_nd), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_1d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_2d), Any) +assert_type(np.dot(AR_f4_nd, AR_f4_nd), Any) + +assert_type(np.dot(AR_O_1d, AR_O_1d), Any) +assert_type(np.dot(AR_O_1d, AR_O_2d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_1d, AR_O_nd), Any) +assert_type(np.dot(AR_O_2d, AR_O_1d), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_2d), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.dot(AR_O_2d, AR_O_nd), Any) +assert_type(np.dot(AR_O_nd, AR_O_1d), Any) +assert_type(np.dot(AR_O_nd, AR_O_2d), Any) +assert_type(np.dot(AR_O_nd, AR_O_nd), Any) + +# + +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(np.where([True, True, False]), tuple[_Int1D,]) +assert_type(np.where(AR_f4_1d), tuple[_Int1D]) +assert_type(np.where(AR_f4_2d), tuple[_Int1D, _Int1D]) +assert_type(np.where(AR_f4_3d), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.where(AR_f4_nd), tuple[_Int1D, ...]) assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index fa2c6020919f..b58472f08d49 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -171,7 +171,12 @@ assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) assert_type(AR_f8.dot(1, out=B), SubClass) -assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + +assert_type(AR_f8.nonzero(), tuple[_Int1D, ...]) +assert_type(AR_f8_1d.nonzero(), tuple[_Int1D]) +assert_type(AR_f8_2d.nonzero(), tuple[_Int1D, _Int1D]) +assert_type(AR_f8_3d.nonzero(), tuple[_Int1D, _Int1D, _Int1D]) assert_type(AR_f8.searchsorted(1), np.intp) assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) From 3da6343c331a58419b3dbe9cc34b034f894688eb Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 5 Apr 2026 07:05:58 +1000 Subject: [PATCH 1586/1718] CI: refactor windows_arm64_steps --- requirements/delvewheel_requirements.txt | 1 + requirements/pkgconf_requirements.txt | 1 + tools/wheels/cibw_before_build.sh | 6 ++++-- 3 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 requirements/delvewheel_requirements.txt create mode 100644 requirements/pkgconf_requirements.txt diff --git a/requirements/delvewheel_requirements.txt b/requirements/delvewheel_requirements.txt new file mode 100644 index 000000000000..7058ed7384af --- /dev/null +++ b/requirements/delvewheel_requirements.txt @@ -0,0 +1 @@ +delvewheel==1.11.2 ; sys_platform == 'win32' diff --git a/requirements/pkgconf_requirements.txt b/requirements/pkgconf_requirements.txt new file mode 100644 index 000000000000..6d366c39a7f2 --- /dev/null +++ b/requirements/pkgconf_requirements.txt @@ -0,0 +1 @@ +pkgconf==2.5.1.post1 \ No newline at end of file diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 381c329a5372..2133e09375e7 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -48,6 +48,8 @@ EOF fi if [[ $RUNNER_OS == "Windows" ]]; then - # delvewheel is the equivalent of delocate/auditwheel for windows. - python -m pip install delvewheel wheel + python -m pip install -r $PROJECT_DIR/requirements/delvewheel_requirements.txt + # pkgconf - carries out the role of pkg-config. + # Alternative is pkgconfiglite that you have to install with choco + python -m pip install -r $PROJECT_DIR/requirements/pkgconf.txt fi From e9827645c55b13d2f1cd98be751939fbc95bd4a3 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 5 Apr 2026 07:06:26 +1000 Subject: [PATCH 1587/1718] CI: refactor windows_arm64_steps --- .github/windows_arm64_steps/action.yml | 22 ---------------------- .github/workflows/wheels.yml | 11 +++++++---- 2 files changed, 7 insertions(+), 26 deletions(-) delete mode 100644 .github/windows_arm64_steps/action.yml diff --git a/.github/windows_arm64_steps/action.yml b/.github/windows_arm64_steps/action.yml deleted file mode 100644 index 8ecb3b8a0cdd..000000000000 --- a/.github/windows_arm64_steps/action.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Build Dependencies(Win-ARM64) -description: "Setup LLVM for Win-ARM64 builds" - -runs: - using: "composite" - steps: - - name: Install LLVM with checksum verification - shell: pwsh - run: | - Invoke-WebRequest https://github.com/llvm/llvm-project/releases/download/llvmorg-20.1.6/LLVM-20.1.6-woa64.exe -UseBasicParsing -OutFile LLVM-woa64.exe - $expectedHash = "92f69a1134e32e54b07d51c6e24d9594852f6476f32c3d70471ae00fffc2d462" - $fileHash = (Get-FileHash -Path "LLVM-woa64.exe" -Algorithm SHA256).Hash - if ($fileHash -ne $expectedHash) { - Write-Error "Checksum verification failed. The downloaded file may be corrupted or tampered with." - exit 1 - } - Start-Process -FilePath ".\LLVM-woa64.exe" -ArgumentList "/S" -Wait - echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "CC=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CXX=clang-cl" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "FC=flang-new" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 8fb5173f7716..95b454f7027b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -60,14 +60,17 @@ jobs: with: architecture: 'x86' - - name: Setup LLVM for Windows ARM64 - if: ${{ matrix.buildplat[1] == 'win_arm64' }} - uses: ./.github/windows_arm64_steps + - name: win_arm64 - set environment variables + if: matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'ARM64' + run: | + echo "C:\Program Files\LLVM\bin" >> $env:GIHUB_PATH + echo "CC=clang-cl" >> $env:GITHUB_ENV + echo "CXX=clang-cl" >> $env:GITHUB_ENV + echo "FC=flang" >> $env:GITHUB_ENV - name: pkg-config-for-win if: runner.os == 'windows' run: | - choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite $CIBW = "${{ github.workspace }}/.openblas" # pkgconfig needs a complete path, and not just "./openblas since the # build is run in a tmp dir (?) From 7c6283a1fb0bb745e17af925e97b1daa4cca59c8 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 5 Apr 2026 07:18:03 +1000 Subject: [PATCH 1588/1718] CI: change the platform detection --- .github/workflows/wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 95b454f7027b..6c899e82b41c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -61,7 +61,7 @@ jobs: architecture: 'x86' - name: win_arm64 - set environment variables - if: matrix.buildplat[1] == 'win' && matrix.buildplat[2] == 'ARM64' + if: ${{ matrix.buildplat[1] == 'win_arm64' }} run: | echo "C:\Program Files\LLVM\bin" >> $env:GIHUB_PATH echo "CC=clang-cl" >> $env:GITHUB_ENV From e495f00151207fab28b548926125ad07a3bdb497 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 5 Apr 2026 07:25:54 +1000 Subject: [PATCH 1589/1718] CI: correct the pkgconf_requirements name --- tools/wheels/cibw_before_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 2133e09375e7..5a8e69f04cdf 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -51,5 +51,5 @@ if [[ $RUNNER_OS == "Windows" ]]; then python -m pip install -r $PROJECT_DIR/requirements/delvewheel_requirements.txt # pkgconf - carries out the role of pkg-config. # Alternative is pkgconfiglite that you have to install with choco - python -m pip install -r $PROJECT_DIR/requirements/pkgconf.txt + python -m pip install -r $PROJECT_DIR/requirements/pkgconf_requirements.txt fi From 939e08482329d4adbb55be6cb540e67f3d9aa095 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 5 Apr 2026 00:38:40 +0200 Subject: [PATCH 1590/1718] TYP: ``mean`` and ``ndarray.mean`` shape-typing and improved dtypes (#31151) --- numpy/__init__.pyi | 143 +++++++++++++++ numpy/_core/fromnumeric.pyi | 170 ++++++++++++------ .../typing/tests/data/reveal/fromnumeric.pyi | 43 +++-- .../typing/tests/data/reveal/ndarray_misc.pyi | 19 +- 4 files changed, 300 insertions(+), 75 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2705793a5115..c1ea3af3cad2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2341,6 +2341,149 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> ArrayT: ... + # + @override # type: ignore[override] + @overload # +integer | ~object_ + def mean( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def mean( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def mean( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def mean[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def mean[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def mean[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def mean[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def mean[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def mean( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def mean( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def mean( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + # @overload def partition( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 23d61b97500e..85532a6cbc8b 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -55,6 +55,7 @@ from numpy._typing import ( _ShapeLike, _SupportsArray, ) +from numpy._typing._array_like import _DualArrayLike __all__ = [ "all", @@ -138,6 +139,8 @@ type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] +type _ArrayLikeNumeric_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_ | np.timedelta64], complex] + ### # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @@ -1644,70 +1647,80 @@ def around[ArrayT: np.ndarray]( out: ArrayT, ) -> ArrayT: ... -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 -@overload +# +@overload # +integer | ~object_ | +builtins.float def mean( - a: _ArrayLikeFloat_co, + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], axis: None = None, dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: def mean( - a: _ArrayLikeComplex_co, - axis: None = None, + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating: ... -@overload +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def mean[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) def mean( - a: _ArrayLike[np.timedelta64], + a: _NestedSequence[list[complex]] | list[complex], axis: None = None, dtype: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> np.timedelta64: ... -@overload -def mean[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: ArrayT, - keepdims: bool | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def mean[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... -@overload -def mean[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +) -> ScalarT: ... +@overload # ~inexact | timedelta64 | object_, axis: +def mean[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, - out: ArrayT, - keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... -@overload -def mean[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[ScalarT], +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def mean[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, out: None = None, - keepdims: Literal[False] | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload +) -> ArrayT: ... +@overload # dtype: ScalarT def mean[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a: _ArrayLikeNumeric_co, axis: None = None, *, dtype: _DTypeLike[ScalarT], @@ -1715,47 +1728,88 @@ def mean[ScalarT: np.generic]( keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ScalarT: ... -@overload -def mean[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, +@overload # dtype: ScalarT (keyword), keepdims=True +def mean[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, dtype: _DTypeLike[ScalarT], - out: None, - keepdims: Literal[True, 1], + out: None = None, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def mean[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def mean[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., ) -> NDArray[ScalarT]: ... -@overload +@overload # axis: , dtype: ScalarT def mean[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], dtype: _DTypeLike[ScalarT], out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT +def mean[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, *, + out: ArrayT, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT | NDArray[ScalarT]: ... -@overload -def mean[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, +) -> ArrayT: ... +@overload # fallback +def mean( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., *, - dtype: _DTypeLike[ScalarT], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def mean( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT | NDArray[ScalarT]: ... -@overload +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True def mean( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, dtype: DTypeLike | None = None, out: None = None, - keepdims: bool | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... +) -> NDArray[Any]: ... +# @overload def std( a: _ArrayLikeComplex_co, diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 2048ace97d46..525826a9dc53 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,5 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" +import datetime as dt from typing import Any, assert_type import numpy as np @@ -17,7 +18,9 @@ AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass -AR_m: npt.NDArray[np.timedelta64] +AR_m_ns: npt.NDArray[np.timedelta64[int]] +AR_m_s: npt.NDArray[np.timedelta64[dt.timedelta]] +AR_m_nat: npt.NDArray[np.timedelta64[None]] AR_0d: np.ndarray[tuple[()]] AR_1d: np.ndarray[tuple[int]] AR_nd: np.ndarray @@ -339,23 +342,35 @@ assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.around(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.mean(AR_b), np.floating) -assert_type(np.mean(AR_i8), np.floating) -assert_type(np.mean(AR_f4), np.floating) -assert_type(np.mean(AR_m), np.timedelta64) -assert_type(np.mean(AR_c16), np.complexfloating) -assert_type(np.mean(AR_O), Any) -assert_type(np.mean(AR_f4, axis=0), Any) -assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_b), np.float64) +assert_type(np.mean(AR_i8), np.float64) +assert_type(np.mean(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.mean(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4), np.float32) +assert_type(np.mean(AR_c16), np.complex128) +assert_type(np.mean(AR_O), np.float64) +assert_type(np.mean(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.mean(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.mean(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.mean(AR_m_ns), np.timedelta64[int]) +assert_type(np.mean(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.mean(AR_m_nat), np.timedelta64[None]) +assert_type(np.mean(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.mean(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.mean(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.mean(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.mean(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.mean(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) -assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) -assert_type(np.mean(AR_f4, None, np.float64), np.float64) assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) -assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) -assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) -assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.mean(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.std(AR_b), np.floating) assert_type(np.std(AR_i8), np.floating) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b58472f08d49..89560d5d74db 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -109,10 +109,23 @@ assert_type(AR_f8.max(keepdims=True), Any) assert_type(AR_f8.max(out=B), SubClass) assert_type(f8.mean(), Any) -assert_type(AR_f8.mean(), Any) -assert_type(AR_f8.mean(axis=0), Any) -assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(), np.float64) +assert_type(AR_f8.mean(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.mean(dtype=np.float32), np.float32) +assert_type(AR_f8.mean(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.mean(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.mean(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) assert_type(AR_f8.mean(out=B), SubClass) +assert_type(AR_f8_2d.mean(), np.float64) +assert_type(AR_f8_2d.mean(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.mean(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.mean(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.mean(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.mean(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(f8.min(), Any) assert_type(AR_f8.min(), Any) From adb35e4938b09b6686052819b34b47e72ab078ee Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 5 Apr 2026 15:36:15 +0200 Subject: [PATCH 1591/1718] MAINT: bump ``mypy`` to ``1.20.0`` (#31153) --- environment.yml | 2 +- numpy/__init__.pyi | 1 + numpy/_core/defchararray.pyi | 33 ++-- numpy/_core/multiarray.pyi | 10 +- numpy/_core/strings.pyi | 35 ++--- numpy/matlib.pyi | 2 +- numpy/typing/tests/data/fail/char.pyi | 3 - numpy/typing/tests/data/fail/strings.pyi | 3 - numpy/typing/tests/data/reveal/arithmetic.pyi | 10 +- numpy/typing/tests/data/reveal/char.pyi | 56 +++---- numpy/typing/tests/data/reveal/strings.pyi | 56 +++---- requirements/typing_requirements.txt | 2 +- tools/stubtest/allowlist.txt | 142 ------------------ 13 files changed, 101 insertions(+), 254 deletions(-) diff --git a/environment.yml b/environment.yml index a3bdfb0c6641..0985b755ff06 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.19.1 + - mypy=1.20.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index c1ea3af3cad2..e25a81449647 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2116,6 +2116,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, ufunc: ufunc, method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, *inputs: Any, **kwargs: Any, ) -> Any: ... diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index d4fd68f0a81b..fa21bf218f6c 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,5 +1,5 @@ from collections.abc import Buffer -from typing import Any, Literal as L, Self, SupportsIndex, SupportsInt, overload +from typing import Any, Final, Literal as L, Self, SupportsIndex, SupportsInt, overload from typing_extensions import TypeVar, deprecated import numpy as np @@ -17,6 +17,7 @@ from numpy._typing import ( _Shape, _ShapeLike, _SupportsArray, + _UFunc_Nin1_Nout1, ) __all__ = [ @@ -386,13 +387,13 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +def mod(a: U_co, values: Any) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +def mod(a: S_co, values: Any) -> NDArray[np.bytes_]: ... @overload -def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +def mod(a: _StringDTypeSupportsArray, values: Any) -> _StringDTypeArray: ... @overload -def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... +def mod(a: T_co, values: Any) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[str_]: ... @@ -614,15 +615,17 @@ def index(a: S_co, sub: S_co, start: i_co = 0, end: i_co | None = None) -> NDArr @overload def index(a: T_co, sub: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.int_]: ... -def isalpha(a: UST_co) -> NDArray[np.bool]: ... -def isalnum(a: UST_co) -> NDArray[np.bool]: ... -def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... -def isdigit(a: UST_co) -> NDArray[np.bool]: ... -def islower(a: UST_co) -> NDArray[np.bool]: ... -def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... -def isspace(a: UST_co) -> NDArray[np.bool]: ... -def istitle(a: UST_co) -> NDArray[np.bool]: ... -def isupper(a: UST_co) -> NDArray[np.bool]: ... +isalpha: Final[_UFunc_Nin1_Nout1[L["isalpha"], L[0], L[False]]] = ... +isalnum: Final[_UFunc_Nin1_Nout1[L["isalnum"], L[0], L[False]]] = ... +isdecimal: Final[_UFunc_Nin1_Nout1[L["isdecimal"], L[0], L[False]]] = ... +isdigit: Final[_UFunc_Nin1_Nout1[L["isdigit"], L[0], L[False]]] = ... +islower: Final[_UFunc_Nin1_Nout1[L["islower"], L[0], L[False]]] = ... +isnumeric: Final[_UFunc_Nin1_Nout1[L["isnumeric"], L[0], L[False]]] = ... +isspace: Final[_UFunc_Nin1_Nout1[L["isspace"], L[0], L[False]]] = ... +istitle: Final[_UFunc_Nin1_Nout1[L["istitle"], L[0], L[False]]] = ... +isupper: Final[_UFunc_Nin1_Nout1[L["isupper"], L[0], L[False]]] = ... + +str_len: Final[_UFunc_Nin1_Nout1[L["str_len"], L[0], L[0]]] = ... @overload def rfind(a: U_co, sub: U_co, start: i_co = 0, end: i_co | None = None) -> NDArray[int_]: ... @@ -645,8 +648,6 @@ def startswith(a: S_co, prefix: S_co, start: i_co = 0, end: i_co | None = None) @overload def startswith(a: T_co, prefix: T_co, start: i_co = 0, end: i_co | None = None) -> NDArray[np.bool]: ... -def str_len(A: UST_co) -> NDArray[int_]: ... - # Overload 1 and 2: str- or bytes-based array-likes # overload 3 and 4: arbitrary object with unicode=False (-> bytes_) # overload 5 and 6: arbitrary object with unicode=True (-> str_) diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 226859eef5fe..e35e2375db1c 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1708,8 +1708,8 @@ class flagsobj: def num(self) -> int: ... @property def owndata(self) -> bool: ... - def __getitem__(self, key: _GetItemKeys) -> bool: ... - def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + def __getitem__(self, key: _GetItemKeys, /) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool, /) -> None: ... @final class flatiter(Generic[_ArrayT_co]): @@ -1814,10 +1814,10 @@ class nditer: # @overload - def __getitem__(self, index: SupportsIndex) -> NDArray[Incomplete]: ... + def __getitem__(self, index: SupportsIndex, /) -> NDArray[Incomplete]: ... @overload - def __getitem__(self, index: slice) -> tuple[NDArray[Incomplete], ...]: ... - def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def __getitem__(self, index: slice, /) -> tuple[NDArray[Incomplete], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike, /) -> None: ... # def __copy__(self) -> Self: ... diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 11b1a893b408..f5f3f13122dc 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -14,6 +14,20 @@ from numpy._typing import ( _SupportsArray, ) +from .defchararray import ( + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + mod, + str_len, +) + __all__ = [ "add", "capitalize", @@ -76,27 +90,6 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... @overload def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... -@overload -def mod(a: U_co, value: object) -> NDArray[np.str_]: ... -@overload -def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... -@overload -def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... -@overload -def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... - -def isalpha(x: UST_co) -> NDArray[np.bool]: ... -def isalnum(a: UST_co) -> NDArray[np.bool]: ... -def isdigit(x: UST_co) -> NDArray[np.bool]: ... -def isspace(x: UST_co) -> NDArray[np.bool]: ... -def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... -def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... -def islower(a: UST_co) -> NDArray[np.bool]: ... -def istitle(a: UST_co) -> NDArray[np.bool]: ... -def isupper(a: UST_co) -> NDArray[np.bool]: ... - -def str_len(x: UST_co) -> NDArray[np.int_]: ... - @overload def find( a: U_co, diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 0904c2744015..06a6806e373a 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -2,7 +2,7 @@ from typing import Any, Literal, overload import numpy as np import numpy.typing as npt -from numpy import ( # type: ignore[deprecated] # noqa: F401 +from numpy import ( # noqa: F401 False_, ScalarType, True_, diff --git a/numpy/typing/tests/data/fail/char.pyi b/numpy/typing/tests/data/fail/char.pyi index 3dbe5eda296e..91909d118884 100644 --- a/numpy/typing/tests/data/fail/char.pyi +++ b/numpy/typing/tests/data/fail/char.pyi @@ -58,6 +58,3 @@ np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] - -np.char.isdecimal(AR_S) # type: ignore[arg-type] -np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index 0633db94c817..11c525a332a0 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -37,8 +37,5 @@ np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] -np.strings.isdecimal(AR_S) # type: ignore[arg-type] -np.strings.isnumeric(AR_S) # type: ignore[arg-type] - np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 7170325d182c..8edf2d7fed9e 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -510,7 +510,7 @@ assert_type(f16 + c16, np.complexfloating) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) -assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(c8 + c16, np.complex128) assert_type(f4 + c16, np.complexfloating) assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) @@ -520,7 +520,7 @@ assert_type(f + c16, np.complex128) assert_type(AR_f + c16, npt.NDArray[np.complex128]) assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) -assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + c16, np.complex128) assert_type(c8 + f8, np.complex64 | np.complex128) assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) @@ -561,16 +561,16 @@ assert_type(f8 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f8, np.floating) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.floating) +assert_type(f4 + f8, np.float64) assert_type(i4 + f8, np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) -assert_type(c + f8, np.complex128 | np.float64) +assert_type(c + f8, complex) assert_type(f + f8, np.float64) assert_type(AR_f + f8, npt.NDArray[np.float64]) assert_type(f4 + f16, np.floating) -assert_type(f4 + f8, np.floating) +assert_type(f4 + f8, np.float64) assert_type(f4 + i8, np.floating) assert_type(f4 + f4, np.float32) assert_type(f4 + i4, np.floating) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index b6866a6d9f96..725807b9e8e3 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -162,43 +162,43 @@ assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_U), np.ndarray) +assert_type(np.char.isalpha(AR_S), np.ndarray) +assert_type(np.char.isalpha(AR_T), np.ndarray) -assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_U), np.ndarray) +assert_type(np.char.isalnum(AR_S), np.ndarray) +assert_type(np.char.isalnum(AR_T), np.ndarray) -assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_U), np.ndarray) +assert_type(np.char.isdecimal(AR_T), np.ndarray) -assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_U), np.ndarray) +assert_type(np.char.isdigit(AR_S), np.ndarray) +assert_type(np.char.isdigit(AR_T), np.ndarray) -assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_U), np.ndarray) +assert_type(np.char.islower(AR_S), np.ndarray) +assert_type(np.char.islower(AR_T), np.ndarray) -assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_U), np.ndarray) +assert_type(np.char.isnumeric(AR_T), np.ndarray) -assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_U), np.ndarray) +assert_type(np.char.isspace(AR_S), np.ndarray) +assert_type(np.char.isspace(AR_T), np.ndarray) -assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_U), np.ndarray) +assert_type(np.char.istitle(AR_S), np.ndarray) +assert_type(np.char.istitle(AR_T), np.ndarray) -assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) -assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) -assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_U), np.ndarray) +assert_type(np.char.isupper(AR_S), np.ndarray) +assert_type(np.char.isupper(AR_T), np.ndarray) -assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) -assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) -assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_U), np.ndarray) +assert_type(np.char.str_len(AR_S), np.ndarray) +assert_type(np.char.str_len(AR_T), np.ndarray) assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 37b34916378e..eb441a6f61ab 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -149,43 +149,43 @@ assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_U), np.ndarray) +assert_type(np.strings.isalpha(AR_S), np.ndarray) +assert_type(np.strings.isalpha(AR_T), np.ndarray) -assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_U), np.ndarray) +assert_type(np.strings.isalnum(AR_S), np.ndarray) +assert_type(np.strings.isalnum(AR_T), np.ndarray) -assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_U), np.ndarray) +assert_type(np.strings.isdecimal(AR_T), np.ndarray) -assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_U), np.ndarray) +assert_type(np.strings.isdigit(AR_S), np.ndarray) +assert_type(np.strings.isdigit(AR_T), np.ndarray) -assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_U), np.ndarray) +assert_type(np.strings.islower(AR_S), np.ndarray) +assert_type(np.strings.islower(AR_T), np.ndarray) -assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_U), np.ndarray) +assert_type(np.strings.isnumeric(AR_T), np.ndarray) -assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_U), np.ndarray) +assert_type(np.strings.isspace(AR_S), np.ndarray) +assert_type(np.strings.isspace(AR_T), np.ndarray) -assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_U), np.ndarray) +assert_type(np.strings.istitle(AR_S), np.ndarray) +assert_type(np.strings.istitle(AR_T), np.ndarray) -assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) -assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) -assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_U), np.ndarray) +assert_type(np.strings.isupper(AR_S), np.ndarray) +assert_type(np.strings.isupper(AR_T), np.ndarray) -assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) -assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) -assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_U), np.ndarray) +assert_type(np.strings.str_len(AR_S), np.ndarray) +assert_type(np.strings.str_len(AR_T), np.ndarray) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index c82664af8668..3fc1cb905b31 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -2,5 +2,5 @@ -r test_requirements.txt -mypy==1.19.1 +mypy==1.20.0 pyrefly==0.58.0 diff --git a/tools/stubtest/allowlist.txt b/tools/stubtest/allowlist.txt index bafd2c795f88..e113dac7dc47 100644 --- a/tools/stubtest/allowlist.txt +++ b/tools/stubtest/allowlist.txt @@ -7,10 +7,6 @@ numpy\.conftest.* numpy\.random\._generator\.__test__ numpy(\.\w+)?\.tests.* -# system-dependent extended precision types -numpy(\..+)?\.float(96|128) -numpy(\..+)?\.complex(192|256) - # system-dependent SIMD constants numpy\._core\._simd\.\w+ @@ -44,141 +40,3 @@ numpy\.(\w+\.)*generic\.__hash__ # intentionally missing deprecated module stubs numpy\.typing\.mypy_plugin - -# false positive "... is not a Union" errors -numpy\.typing\.ArrayLike -numpy\.typing\.DTypeLike -numpy\.typing\.NDArray - -# ufuncs, see https://github.com/python/mypy/issues/20223 -numpy\.(\w+\.)*abs -numpy\.(\w+\.)*absolute -numpy\.(\w+\.)*acos -numpy\.(\w+\.)*acosh -numpy\.(\w+\.)*add -numpy\.(\w+\.)*arccos -numpy\.(\w+\.)*arccosh -numpy\.(\w+\.)*arcsin -numpy\.(\w+\.)*arcsinh -numpy\.(\w+\.)*arctan -numpy\.(\w+\.)*arctan2 -numpy\.(\w+\.)*arctanh -numpy\.(\w+\.)*asin -numpy\.(\w+\.)*asinh -numpy\.(\w+\.)*atan -numpy\.(\w+\.)*atan2 -numpy\.(\w+\.)*atanh -numpy\.(\w+\.)*bitwise_and -numpy\.(\w+\.)*bitwise_count -numpy\.(\w+\.)*bitwise_invert -numpy\.(\w+\.)*bitwise_left_shift -numpy\.(\w+\.)*bitwise_not -numpy\.(\w+\.)*bitwise_or -numpy\.(\w+\.)*bitwise_right_shift -numpy\.(\w+\.)*bitwise_xor -numpy\.(\w+\.)*cbrt -numpy\.(\w+\.)*ceil -numpy\.(\w+\.)*conj -numpy\.(\w+\.)*conjugate -numpy\.(\w+\.)*copysign -numpy\.(\w+\.)*cos -numpy\.(\w+\.)*cosh -numpy\.(\w+\.)*deg2rad -numpy\.(\w+\.)*degrees -numpy\.(\w+\.)*divide -numpy\.(\w+\.)*divmod -numpy\.(\w+\.)*equal -numpy\.(\w+\.)*exp -numpy\.(\w+\.)*exp2 -numpy\.(\w+\.)*expm1 -numpy\.(\w+\.)*fabs -numpy\.(\w+\.)*float_power -numpy\.(\w+\.)*floor -numpy\.(\w+\.)*floor_divide -numpy\.(\w+\.)*fmax -numpy\.(\w+\.)*fmin -numpy\.(\w+\.)*fmod -numpy\.(\w+\.)*frexp -numpy\.(\w+\.)*gcd -numpy\.(\w+\.)*greater -numpy\.(\w+\.)*greater_equal -numpy\.(\w+\.)*heaviside -numpy\.(\w+\.)*hypot -numpy\.(\w+\.)*invert -numpy\.(\w+\.)*isfinite -numpy\.(\w+\.)*isinf -numpy\.(\w+\.)*isnan -numpy\.(\w+\.)*isnat -numpy\.(\w+\.)*lcm -numpy\.(\w+\.)*ldexp -numpy\.(\w+\.)*left_shift -numpy\.(\w+\.)*less -numpy\.(\w+\.)*less_equal -numpy\.(\w+\.)*log -numpy\.(\w+\.)*log10 -numpy\.(\w+\.)*log1p -numpy\.(\w+\.)*log2 -numpy\.(\w+\.)*logaddexp -numpy\.(\w+\.)*logaddexp2 -numpy\.(\w+\.)*logical_and -numpy\.(\w+\.)*logical_not -numpy\.(\w+\.)*logical_or -numpy\.(\w+\.)*logical_xor -numpy\.(\w+\.)*matmul -numpy\.(\w+\.)*matvec -numpy\.(\w+\.)*maximum -numpy\.(\w+\.)*minimum -numpy\.(\w+\.)*mod -numpy\.(\w+\.)*modf -numpy\.(\w+\.)*multiply -numpy\.(\w+\.)*negative -numpy\.(\w+\.)*nextafter -numpy\.(\w+\.)*not_equal -numpy\.(\w+\.)*positive -numpy\.(\w+\.)*pow -numpy\.(\w+\.)*power -numpy\.(\w+\.)*rad2deg -numpy\.(\w+\.)*radians -numpy\.(\w+\.)*reciprocal -numpy\.(\w+\.)*remainder -numpy\.(\w+\.)*right_shift -numpy\.(\w+\.)*rint -numpy\.(\w+\.)*sign -numpy\.(\w+\.)*signbit -numpy\.(\w+\.)*sin -numpy\.(\w+\.)*sinh -numpy\.(\w+\.)*spacing -numpy\.(\w+\.)*sqrt -numpy\.(\w+\.)*square -numpy\.(\w+\.)*subtract -numpy\.(\w+\.)*tan -numpy\.(\w+\.)*tanh -numpy\.(\w+\.)*true_divide -numpy\.(\w+\.)*trunc -numpy\.(\w+\.)*vecdot -numpy\.(\w+\.)*vecmat -numpy\.(\w+\.)*isalnum -numpy\.(\w+\.)*isalpha -numpy\.(\w+\.)*isdecimal -numpy\.(\w+\.)*isdigit -numpy\.(\w+\.)*islower -numpy\.(\w+\.)*isnumeric -numpy\.(\w+\.)*isspace -numpy\.(\w+\.)*istitle -numpy\.(\w+\.)*isupper -numpy\.(\w+\.)*str_len -numpy\._core\._methods\.umr_bitwise_count -numpy\._core\._umath_tests\.always_error -numpy\._core\._umath_tests\.always_error_gufunc -numpy\._core\._umath_tests\.always_error_unary -numpy\._core\._umath_tests\.conv1d_full -numpy\._core\._umath_tests\.cross1d -numpy\._core\._umath_tests\.euclidean_pdist -numpy\._core\._umath_tests\.indexed_negative -numpy\._core\._umath_tests\.inner1d -numpy\._core\._umath_tests\.inner1d_no_doc -numpy\._core\._umath_tests\.matrix_multiply -numpy\.linalg\._umath_linalg\.qr_complete -numpy\.linalg\._umath_linalg\.qr_reduced -numpy\.linalg\._umath_linalg\.solve -numpy\.linalg\._umath_linalg\.solve1 From 5af314d0a5c5455302a9c2c34b61c9c36cf2f74e Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 5 Apr 2026 15:38:42 +0200 Subject: [PATCH 1592/1718] TYP: ``std`` and ``var`` shape-typing and improved dtypes (#31154) --- numpy/__init__.pyi | 370 +++++++++++++++++ numpy/_core/fromnumeric.pyi | 393 ++++++++++++++---- .../typing/tests/data/reveal/fromnumeric.pyi | 68 ++- .../typing/tests/data/reveal/ndarray_misc.pyi | 53 ++- 4 files changed, 778 insertions(+), 106 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e25a81449647..9700a5a0b08c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2485,6 +2485,376 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ndarray[_ShapeT_co]: ... + # keep in sync with `ndarray.mean` above + @override # type: ignore[override] + @overload # +integer | ~object_ + def std( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def std( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def std( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def std[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def std[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def std[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def std[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def std[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def std( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def std( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `ndarray.std` above + @override # type: ignore[override] + @overload # +integer | ~object_ + def var( + self: NDArray[integer | bool_ | object_], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> float64: ... + @overload # +integer, axis: + def var( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[float64]: ... + @overload # +integer, keepdims=True + def var( + self: NDArray[integer | bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[float64]]: ... + @overload # ~inexact | timedelta64 + def var[ScalarT: inexact | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~inexact | timedelta64, axis: + def var[ScalarT: inexact | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~inexact | timedelta64 | object_, keepdims=True + def var[ArrayT: NDArray[inexact | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # dtype: ScalarT + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def var[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT + def var[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ddof: float = 0, + keepdims: py_bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def var( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def var( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: L[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + # @overload def partition( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 85532a6cbc8b..5457cd571185 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1809,10 +1809,10 @@ def mean( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> NDArray[Any]: ... -# -@overload +# keep in sync with `mean` above +@overload # +integer | ~object_ | +builtins.float def std( - a: _ArrayLikeComplex_co, + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], axis: None = None, dtype: None = None, out: None = None, @@ -1820,38 +1820,90 @@ def std( keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: +def std( + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def std[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def std[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[ScalarT], +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def std[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ScalarT: ... -@overload +@overload # ~inexact | timedelta64 | object_, axis: +def std[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def std[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # dtype: ScalarT def std[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a: _ArrayLikeNumeric_co, axis: None = None, *, dtype: _DTypeLike[ScalarT], @@ -1859,52 +1911,118 @@ def std[ScalarT: np.generic]( ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ScalarT: ... -@overload -def std( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +@overload # dtype: ScalarT (keyword), keepdims=True +def std[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def std[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def std[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: ArrayT, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def std[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def std[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> ArrayT: ... -@overload +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT def std[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, dtype: DTypeLike | None = None, *, out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ArrayT: ... +@overload # fallback +def std( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def std( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def std( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[Any]: ... -@overload +# keep in sync with `std` above +@overload # +integer | ~object_ | +builtins.float def var( - a: _ArrayLikeComplex_co, + a: _DualArrayLike[np.dtype[np.integer | np.bool | np.object_], float], axis: None = None, dtype: None = None, out: None = None, @@ -1912,38 +2030,90 @@ def var( keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.float64: ... +@overload # +integer | +builtins.float, axis: +def var( + a: _DualArrayLike[np.dtype[np.integer | np.bool], float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeFloat_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # +integer, keepdims=True +def var[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.integer | np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> floating: ... -@overload +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # ~complex (`list` ensures invariance to avoid overlap with the previous overload) def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, dtype: None = None, out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def var[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[ScalarT], +) -> np.complex128: ... +@overload # ~inexact | timedelta64 +def var[ScalarT: np.inexact | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, out: None = None, ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ScalarT: ... -@overload +@overload # ~inexact | timedelta64 | object_, axis: +def var[ScalarT: np.inexact | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~inexact | timedelta64 | object_, keepdims=True +def var[ArrayT: NDArray[np.inexact | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> ArrayT: ... +@overload # dtype: ScalarT def var[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a: _ArrayLikeNumeric_co, axis: None = None, *, dtype: _DTypeLike[ScalarT], @@ -1951,48 +2121,113 @@ def var[ScalarT: np.generic]( ddof: float = 0, keepdims: Literal[False] | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ScalarT: ... -@overload -def var( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, +@overload # dtype: ScalarT (keyword), keepdims=True +def var[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def var[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> Any: ... -@overload -def var[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: ArrayT, +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def var[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, ddof: float = 0, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def var[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., -) -> ArrayT: ... -@overload +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT def var[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, dtype: DTypeLike | None = None, *, out: ArrayT, ddof: float = 0, keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., correction: float | _NoValueType = ..., ) -> ArrayT: ... +@overload # fallback +def var( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def var( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def var( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> NDArray[Any]: ... max = amax min = amin diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 525826a9dc53..a995bce2d6f2 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -372,24 +372,64 @@ assert_type(np.mean(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple assert_type(np.mean(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.std(AR_b), np.floating) -assert_type(np.std(AR_i8), np.floating) -assert_type(np.std(AR_f4), np.floating) -assert_type(np.std(AR_c16), np.floating) -assert_type(np.std(AR_O), Any) -assert_type(np.std(AR_f4, axis=0), Any) -assert_type(np.std(AR_f4, keepdims=True), Any) +# same as above +assert_type(np.std(AR_b), np.float64) +assert_type(np.std(AR_i8), np.float64) +assert_type(np.std(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.std(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_f4), np.float32) +assert_type(np.std(AR_c16), np.complex128) +assert_type(np.std(AR_O), np.float64) +assert_type(np.std(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.std(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.std(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.std(AR_m_ns), np.timedelta64[int]) +assert_type(np.std(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.std(AR_m_nat), np.timedelta64[None]) +assert_type(np.std(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.std(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.std(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.std(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.std(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.std(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.std(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.std(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.std(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.var(AR_b), np.floating) -assert_type(np.var(AR_i8), np.floating) -assert_type(np.var(AR_f4), np.floating) -assert_type(np.var(AR_c16), np.floating) -assert_type(np.var(AR_O), Any) -assert_type(np.var(AR_f4, axis=0), Any) -assert_type(np.var(AR_f4, keepdims=True), Any) +# same as above +assert_type(np.var(AR_b), np.float64) +assert_type(np.var(AR_i8), np.float64) +assert_type(np.var(AR_i8, axis=0), npt.NDArray[np.float64]) +assert_type(np.var(AR_i8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_i8, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_f4), np.float32) +assert_type(np.var(AR_c16), np.complex128) +assert_type(np.var(AR_O), np.float64) +assert_type(np.var(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.var(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.var(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.var(AR_m_ns), np.timedelta64[int]) +assert_type(np.var(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.var(AR_m_nat), np.timedelta64[None]) +assert_type(np.var(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.var(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.var(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.var(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.var(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.var(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.var(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.var(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.var(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 89560d5d74db..6d0563f0c3cb 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -127,6 +127,46 @@ assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8_2d.mean(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(AR_f8_2d.mean(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +# same as above +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), np.float64) +assert_type(AR_f8.std(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.std(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.std(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.std(dtype=np.float32), np.float32) +assert_type(AR_f8.std(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.std(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.std(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.std(out=B), SubClass) +assert_type(AR_f8_2d.std(), np.float64) +assert_type(AR_f8_2d.std(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.std(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.std(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.std(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.std(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.std(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.std(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +# same as above +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), np.float64) +assert_type(AR_f8.var(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.var(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.var(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.var(dtype=np.float32), np.float32) +assert_type(AR_f8.var(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.var(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.var(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.var(out=B), SubClass) +assert_type(AR_f8_2d.var(), np.float64) +assert_type(AR_f8_2d.var(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.var(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.var(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.var(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.var(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.var(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.var(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + assert_type(f8.min(), Any) assert_type(AR_f8.min(), Any) assert_type(AR_f8.min(axis=0), Any) @@ -149,13 +189,6 @@ assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) - -assert_type(f8.std(), Any) -assert_type(AR_f8.std(), Any) -assert_type(AR_f8.std(axis=0), Any) -assert_type(AR_f8.std(keepdims=True), Any) -assert_type(AR_f8.std(out=B), SubClass) - assert_type(f8.sum(), Any) assert_type(AR_f8.sum(), Any) assert_type(AR_f8.sum(axis=0), Any) @@ -168,12 +201,6 @@ assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) assert_type(AR_f8.take(0, out=B), SubClass) assert_type(AR_f8.take([0], out=B), SubClass) -assert_type(f8.var(), Any) -assert_type(AR_f8.var(), Any) -assert_type(AR_f8.var(axis=0), Any) -assert_type(AR_f8.var(keepdims=True), Any) -assert_type(AR_f8.var(out=B), SubClass) - assert_type(AR_f8.argpartition(0), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition(0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) From d6dc07e5887bd16b9ab2f53911b1d1e2c493b0ff Mon Sep 17 00:00:00 2001 From: Amir Sarabadani Date: Sun, 5 Apr 2026 16:42:02 +0200 Subject: [PATCH 1593/1718] DOC: Fix typos This is done via a script I wrote to report typos and after I checked them multiple times manually, I applied the correct ones. --- numpy/_core/einsumfunc.py | 2 +- numpy/_core/tests/test_simd.py | 4 ++-- numpy/f2py/tests/util.py | 6 +++--- numpy/lib/_npyio_impl.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index ea9f5630cbbe..7b19808a94f2 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -968,7 +968,7 @@ def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): @functools.lru_cache(2**12) def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): """Cached parsing of a two term einsum equation into the necessary - sequence of arguments for contracttion via batched matrix multiplication. + sequence of arguments for contraction via batched matrix multiplication. The steps we need to specify are: 1. Remove repeated and trivial indices from the left and right terms, diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 335abc98c84e..e0979925384d 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -1227,8 +1227,8 @@ def trunc_div(a, d): continue dividend = self.load(self._data(dividend)) data_divc = [trunc_div(a, divisor) for a in dividend] - divisor_parms = self.divisor(divisor) - divc = self.divc(dividend, divisor_parms) + divisor_params = self.divisor(divisor) + divc = self.divc(dividend, divisor_params) assert divc == data_divc def test_arithmetic_reduce_sum(self): diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 220e092ebbad..6f91cc318dbc 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -169,7 +169,7 @@ def get_temp_module_name(): return name -def _memoize(func): +def _memorize(func): memo = {} def wrapper(*a, **kw): @@ -194,7 +194,7 @@ def wrapper(*a, **kw): # -@_memoize +@_memorize def build_module(source_files, options=[], skip=[], only=[], module_name=None): """ Compile and import a f2py module, built from the given files. @@ -273,7 +273,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): return import_module(module_name) -@_memoize +@_memorize def build_code(source_code, options=[], skip=[], diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 34e5985ea2a2..0ea5da929895 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -841,7 +841,7 @@ def _preprocess_comments(iterable, comments, encoding): Generator that consumes a line iterated iterable and strips out the multiple (or multi-character) comments from lines. This is a pre-processing step to achieve feature parity with loadtxt - (we assume that this feature is a nieche feature). + (we assume that this feature is a niche feature). """ for line in iterable: if isinstance(line, bytes): From b2b8d7cdd364ce92500eec1ad6c4d9c6ce97ea85 Mon Sep 17 00:00:00 2001 From: Amir Sarabadani Date: Sun, 5 Apr 2026 17:31:09 +0200 Subject: [PATCH 1594/1718] Revert memoize change It is correct: https://en.wikipedia.org/wiki/Memoization --- numpy/f2py/tests/util.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 6f91cc318dbc..220e092ebbad 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -169,7 +169,7 @@ def get_temp_module_name(): return name -def _memorize(func): +def _memoize(func): memo = {} def wrapper(*a, **kw): @@ -194,7 +194,7 @@ def wrapper(*a, **kw): # -@_memorize +@_memoize def build_module(source_files, options=[], skip=[], only=[], module_name=None): """ Compile and import a f2py module, built from the given files. @@ -273,7 +273,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): return import_module(module_name) -@_memorize +@_memoize def build_code(source_code, options=[], skip=[], From 33ea714bab8165cf028a3aadfec2a7c78026dc0a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 6 Apr 2026 03:49:41 +0200 Subject: [PATCH 1595/1718] TYP: ``sum`` and ``prod`` shape-typing and improved dtypes (#31160) --- numpy/__init__.pyi | 593 +++++++++++++++ numpy/_core/fromnumeric.pyi | 706 ++++++++++++------ numpy/typing/tests/data/fail/fromnumeric.pyi | 2 +- .../typing/tests/data/reveal/fromnumeric.pyi | 93 ++- .../typing/tests/data/reveal/ndarray_misc.pyi | 78 +- 5 files changed, 1213 insertions(+), 259 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9700a5a0b08c..bffaee123f10 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2342,6 +2342,364 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co = True, ) -> ArrayT: ... + # keep in sync with `sum` below (but without `timedelta64`) + @override # type: ignore[override] + @overload # ~number + def prod[ScalarT: number]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~number, axis: + def prod[ScalarT: number | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~number | object_, keepdims=True + def prod[ArrayT: NDArray[number | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # bool_ + def prod( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # bool_, axis: + def prod( + self: NDArray[bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[int_]: ... + @overload # bool_, keepdims=True + def prod( + self: NDArray[bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[int_]]: ... + @overload # object_ + def prod( + self: NDArray[object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: ScalarT + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def prod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT (keyword) + def prod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # out: ArrayT (positional) + def prod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def prod( + self: NDArray[number | bool_ | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def prod( + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def prod( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + + # keep in sync with `prod` above (but also accept `timedelta64`) + @override # type: ignore[override] + @overload # ~number | timedelta64 + def sum[ScalarT: number | timedelta64]( + self: NDArray[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # ~number | timedelta64, axis: + def sum[ScalarT: number | timedelta64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # ~number | timedelta64 | object_, keepdims=True + def sum[ArrayT: NDArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # bool_ + def sum( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # bool_, axis: + def sum( + self: NDArray[bool_], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[int_]: ... + @overload # bool_, keepdims=True + def sum( + self: NDArray[bool_], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[int_]]: ... + @overload # object_ + def sum( + self: NDArray[object_], + axis: None = None, + dtype: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: ScalarT + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # dtype: ScalarT (keyword), keepdims=True + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # dtype: ScalarT (positional), keepdims=True + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co, dtype[ScalarT]]: ... + @overload # axis: , dtype: ScalarT + def sum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # out: ArrayT (keyword) + def sum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # out: ArrayT (positional) + def sum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # fallback + def sum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # fallback, axis: + def sum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray: ... + @overload # fallback, keepdims=True + def sum( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ndarray[_ShapeT_co]: ... + # @override # type: ignore[override] @overload # +integer | ~object_ @@ -4589,6 +4947,100 @@ class number(generic[_NumberItemT_co], Generic[_NBitT, _NumberItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... + # keep in sync with `number.sum` + @override # type: ignore[override] + @overload # out: None (default) + def prod( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def prod( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def prod( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def prod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `number.prod` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @property def itemsize(self) -> L[1]: ... @@ -4884,6 +5336,100 @@ class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... + # keep in sync with `bool.sum` + @override # type: ignore[override] + @overload # out: None (default) + def prod( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # dtype: (keyword) + def prod( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def prod( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def prod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `bool.prod` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> int_: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + # NOTE: This should _not_ be `Final[_]`, `_: TypeAlias`, or `type _` bool_ = bool @@ -6231,6 +6777,53 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __ge__(self, other: _SupportsLE, /) -> bool_: ... + # keep in sync with `number.sum` + @override # type: ignore[override] + @overload # out: None (default) + def sum( + self, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Self: ... + @overload # dtype: (keyword) + def sum( + self, + axis: _ShapeLike | None = None, + *, + dtype: DTypeLike, + out: None = None, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # dtype: (positional) + def sum( + self, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: None = None, + *, + keepdims: py_bool | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: + def sum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: py_bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @property def itemsize(self) -> L[8]: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 5457cd571185..2979e7fbaa74 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -21,14 +21,9 @@ from numpy import ( _PartitionKind, _SortKind, _SortSide, - complexfloating, float16, - floating, - int64, - int_, intp, object_, - uint64, ) from numpy._globals import _NoValueType from numpy._typing import ( @@ -43,10 +38,10 @@ from numpy._typing import ( _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, - _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, _DTypeLike, + _FloatLike_co, _IntLike_co, _NestedSequence, _NumberLike_co, @@ -139,6 +134,7 @@ type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] type _ToArray2D[ScalarT: np.generic] = _Array2D[ScalarT] | Sequence[Sequence[ScalarT]] type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Sequence[ScalarT]]] +type _ArrayLikeMultiplicative_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_], complex] type _ArrayLikeNumeric_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_ | np.timedelta64], complex] ### @@ -816,100 +812,6 @@ def clip( **kwargs: Unpack[_UFuncKwargs], ) -> Any: ... -@overload -def sum[ScalarT: np.generic]( - a: _ArrayLike[ScalarT], - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload -def sum[ScalarT: np.generic]( - a: _ArrayLike[ScalarT], - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT | NDArray[ScalarT]: ... -@overload -def sum[ScalarT: np.generic]( - a: ArrayLike, - axis: None, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload -def sum[ScalarT: np.generic]( - a: ArrayLike, - axis: None = None, - *, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload -def sum[ScalarT: np.generic]( - a: ArrayLike, - axis: _ShapeLike | None, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT | NDArray[ScalarT]: ... -@overload -def sum[ScalarT: np.generic]( - a: ArrayLike, - axis: _ShapeLike | None = None, - *, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT | NDArray[ScalarT]: ... -@overload -def sum( - a: ArrayLike, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload -def sum[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: ArrayT, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... -@overload -def sum[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - *, - out: ArrayT, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... - # keep in sync with `any` @overload def all( @@ -1282,126 +1184,6 @@ def amin[ArrayT: np.ndarray]( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -# TODO: `np.prod()``: For object arrays `initial` does not necessarily -# have to be a numerical scalar. -# The only requirement is that it is compatible -# with the `.__mul__()` method(s) of the passed array's elements. -# Note that the same situation holds for all wrappers around -# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). -# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 -@overload -def prod( - a: _ArrayLikeBool_co, - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> int_: ... -@overload -def prod( - a: _ArrayLikeUInt_co, - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> uint64: ... -@overload -def prod( - a: _ArrayLikeInt_co, - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> int64: ... -@overload -def prod( - a: _ArrayLikeFloat_co, - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> floating: ... -@overload -def prod( - a: _ArrayLikeComplex_co, - axis: None = None, - dtype: None = None, - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload -def prod[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload -def prod[ScalarT: np.generic]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = None, - *, - dtype: _DTypeLike[ScalarT], - out: None = None, - keepdims: Literal[False] | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ScalarT: ... -@overload -def prod( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - out: None = None, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... -@overload -def prod[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike | None, - out: ArrayT, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... -@overload -def prod[ArrayT: np.ndarray]( - a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = None, - dtype: DTypeLike | None = None, - *, - out: ArrayT, - keepdims: bool | _NoValueType = ..., - initial: _NumberLike_co | _NoValueType = ..., - where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... - # keep in sync with `cumsum` above @overload def cumprod[ScalarT: np.number | np.bool | np.object_]( @@ -1647,6 +1429,490 @@ def around[ArrayT: np.ndarray]( out: ArrayT, ) -> ArrayT: ... +# keep in sync with `sum` below (but without `timedelta64`) +@overload # ~builtins.float +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def prod( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def prod( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~number +def prod[ScalarT: np.number]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~number | timedelta64 | object_, axis: +def prod[ScalarT: np.number | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | object_, keepdims=True +def prod[ArrayT: NDArray[np.number | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # bool_ | +builtins.int +def prod( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # bool_ | +builtins.int, axis: +def prod( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # bool_, keepdims=True +def prod[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.int_]]: ... +@overload # object_ +def prod( + a: _SupportsArray[np.dtype[np.object_]], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # dtype: ScalarT +def prod[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def prod[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def prod[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def prod[ScalarT: np.generic]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def prod[ScalarT: np.generic]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT (keyword) +def prod[ArrayT: np.ndarray]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # out: ArrayT (positional) +def prod[ArrayT: np.ndarray]( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def prod( + a: _ArrayLikeMultiplicative_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def prod( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def prod( + a: _ArrayLikeMultiplicative_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... + +# keep in sync with `prod` above (but also accept `timedelta64`) +@overload # ~builtins.float +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def sum( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _FloatLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def sum( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~number | timedelta64 +def sum[ScalarT: np.number | np.timedelta64]( + a: _ArrayLike[ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # ~number | timedelta64 | object_, axis: +def sum[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | timedelta64 | object_, keepdims=True +def sum[ArrayT: NDArray[np.number | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # bool_ | +builtins.int +def sum( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # bool_ | +builtins.int, axis: +def sum( + a: _DualArrayLike[np.dtype[np.bool], int], + axis: int | tuple[int, ...], + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # bool_, keepdims=True +def sum[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.bool]], + axis: int | tuple[int, ...] | None = None, + dtype: None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _IntLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[np.int_]]: ... +@overload # object_ +def sum( + a: _SupportsArray[np.dtype[np.object_]], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # dtype: ScalarT +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ScalarT: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def sum[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (positional), keepdims=True +def sum[ShapeT: _Shape, ScalarT: np.generic]( + a: np.ndarray[ShapeT, np.dtype[np.number | np.bool | np.timedelta64 | np.object_]], + axis: int | tuple[int, ...] | None, + dtype: _DTypeLike[ScalarT], + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.ndarray[ShapeT, np.dtype[ScalarT]]: ... +@overload # dtype: ScalarT (keyword), keepdims=True +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # axis: , dtype: ScalarT +def sum[ScalarT: np.generic]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: _DTypeLike[ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # out: ArrayT (keyword) +def sum[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # out: ArrayT (positional) +def sum[ArrayT: np.ndarray]( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None, + dtype: DTypeLike | None, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # fallback +def sum( + a: _ArrayLikeNumeric_co, + axis: None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def sum( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + dtype: DTypeLike | None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def sum( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... + # @overload # +integer | ~object_ | +builtins.float def mean( diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index 5655ede23e6e..f38bc2eeabbd 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -118,7 +118,7 @@ np.prod(a, out=False) # type: ignore[call-overload] np.prod(a, keepdims=1.0) # type: ignore[call-overload] np.prod(a, initial=int) # type: ignore[call-overload] np.prod(a, where=1.0) # type: ignore[call-overload] -np.prod(AR_U) # type: ignore[arg-type] +np.prod(AR_U) # type: ignore[type-var] np.cumprod(a, axis=1.0) # type: ignore[call-overload] np.cumprod(a, out=False) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index a995bce2d6f2..70b63ce02554 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -193,21 +193,6 @@ assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) assert_type(np.clip(AR_f4_1d, 0, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.sum(b), np.bool) -assert_type(np.sum(f4), np.float32) -assert_type(np.sum(f), Any) -assert_type(np.sum(AR_b), np.bool) -assert_type(np.sum(AR_f4), np.float32) -assert_type(np.sum(AR_b, axis=0), Any) -assert_type(np.sum(AR_f4, axis=0), Any) -assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) -assert_type(np.sum(AR_f4, None, np.float64), np.float64) -assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) -assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) -assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) -assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) - assert_type(np.all(b), np.bool) assert_type(np.all(f4), np.bool) assert_type(np.all(f), np.bool) @@ -285,18 +270,6 @@ assert_type(np.amin(AR_b, keepdims=True), Any) assert_type(np.amin(AR_f4, keepdims=True), Any) assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.prod(AR_b), np.int_) -assert_type(np.prod(AR_u8), np.uint64) -assert_type(np.prod(AR_i8), np.int64) -assert_type(np.prod(AR_f4), np.floating) -assert_type(np.prod(AR_c16), np.complexfloating) -assert_type(np.prod(AR_O), Any) -assert_type(np.prod(AR_f4, axis=0), Any) -assert_type(np.prod(AR_f4, keepdims=True), Any) -assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) -assert_type(np.prod(AR_f4, dtype=float), Any) -assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) - assert_type(np.cumprod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.cumprod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.cumprod(f), np.ndarray[tuple[int]]) @@ -342,6 +315,67 @@ assert_type(np.around([1.5]), npt.NDArray[Any]) assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.around(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.prod(AR_nd), Any) +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.prod(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.prod(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.prod(AR_f4), np.float32) +assert_type(np.prod(AR_c16), np.complex128) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.prod(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.prod(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.prod(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.prod(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.prod(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.prod(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.prod(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.prod(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.prod(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.prod(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.prod(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +# same as above (but with `timedelta64`) +assert_type(np.sum(AR_nd), Any) +assert_type(np.sum(AR_b), np.int_) +assert_type(np.sum(AR_i8), np.int64) +assert_type(np.sum(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.sum(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.sum(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_c16), np.complex128) +assert_type(np.sum(AR_O), Any) +assert_type(np.sum(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.sum(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.sum(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.sum(AR_m_ns), np.timedelta64[int]) +assert_type(np.sum(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.sum(AR_m_nat), np.timedelta64[None]) +assert_type(np.sum(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.sum(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.sum(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.sum(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.sum(AR_f4, dtype=float), Any) +assert_type(np.sum(AR_f4, dtype=float, axis=1), np.ndarray) +assert_type(np.sum(AR_f4, dtype=float, keepdims=True), np.ndarray) +assert_type(np.sum(AR_f4, dtype=float, axis=1, keepdims=True), np.ndarray) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sum(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) + +# assert_type(np.mean(AR_b), np.float64) assert_type(np.mean(AR_i8), np.float64) assert_type(np.mean(AR_i8, axis=0), npt.NDArray[np.float64]) @@ -433,3 +467,8 @@ assert_type(np.var(AR_f4, dtype=np.float64, keepdims=True), npt.NDArray[np.float assert_type(np.var(AR_f4_1d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.var(AR_f4_2d, dtype=np.float64, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) + +### + +data: npt.NDArray[np.void] +polygons = data["vectors"].sum(axis=1) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 6d0563f0c3cb..7c9b55720811 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -7,6 +7,7 @@ function-based counterpart in `../from_numeric.py`. """ import ctypes as ct +import datetime as dt import operator from collections.abc import Iterator from types import ModuleType @@ -20,7 +21,13 @@ class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.object_]]): ... f8: np.float64 i8: np.int64 +b1: np.bool +m8_ns: np.timedelta64[int] +m8_ms: np.timedelta64[dt.timedelta] +m8_na: np.timedelta64[None] + B: SubClass + AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] AR_u1: npt.NDArray[np.uint8] @@ -33,6 +40,8 @@ AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_any: np.ndarray + ctypes_obj = AR_f8.ctypes assert_type(AR_f8.__dlpack__(), CapsuleType) @@ -108,6 +117,64 @@ assert_type(AR_f8.max(axis=0), Any) assert_type(AR_f8.max(keepdims=True), Any) assert_type(AR_f8.max(out=B), SubClass) +# same as below (but without `timedelta64`) +assert_type(b1.prod(), np.int_) +assert_type(i8.prod(), np.int64) +assert_type(f8.prod(), np.float64) +assert_type(AR_i8.prod(), np.int64) +assert_type(AR_i8.prod(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.prod(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.prod(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.prod(), np.float64) +assert_type(AR_f8.prod(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.prod(dtype=np.float32), np.float32) +assert_type(AR_f8.prod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.prod(out=B), SubClass) +assert_type(AR_f8_2d.prod(), np.float64) +assert_type(AR_f8_2d.prod(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.prod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.prod(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.prod(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.prod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.prod(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.prod(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.prod(), Any) + +# same as above (but also accept `timedelta64`) +assert_type(b1.sum(), np.int_) +assert_type(i8.sum(), np.int64) +assert_type(f8.sum(), np.float64) +assert_type(m8_ns.sum(), np.timedelta64[int]) +assert_type(m8_ms.sum(), np.timedelta64[dt.timedelta]) +assert_type(m8_na.sum(), np.timedelta64[None]) +assert_type(AR_i8.sum(), np.int64) +assert_type(AR_i8.sum(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.sum(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.sum(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.sum(), np.float64) +assert_type(AR_f8.sum(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.sum(dtype=np.float32), np.float32) +assert_type(AR_f8.sum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(dtype=np.float32, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(dtype=np.float32, axis=0, keepdims=True), npt.NDArray[np.float32]) +assert_type(AR_f8.sum(out=B), SubClass) +assert_type(AR_f8_2d.sum(), np.float64) +assert_type(AR_f8_2d.sum(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.sum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.sum(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.sum(dtype=np.float32), np.float32) +assert_type(AR_f8_2d.sum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) +assert_type(AR_f8_2d.sum(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.sum(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.sum(), Any) + +# same as below assert_type(f8.mean(), Any) assert_type(AR_f8.mean(), np.float64) assert_type(AR_f8.mean(keepdims=True), npt.NDArray[np.float64]) @@ -173,12 +240,6 @@ assert_type(AR_f8.min(axis=0), Any) assert_type(AR_f8.min(keepdims=True), Any) assert_type(AR_f8.min(out=B), SubClass) -assert_type(f8.prod(), Any) -assert_type(AR_f8.prod(), Any) -assert_type(AR_f8.prod(axis=0), Any) -assert_type(AR_f8.prod(keepdims=True), Any) -assert_type(AR_f8.prod(out=B), SubClass) - assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) assert_type(AR_f8.round(out=B), SubClass) @@ -189,11 +250,6 @@ assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) -assert_type(f8.sum(), Any) -assert_type(AR_f8.sum(), Any) -assert_type(AR_f8.sum(axis=0), Any) -assert_type(AR_f8.sum(keepdims=True), Any) -assert_type(AR_f8.sum(out=B), SubClass) assert_type(f8.take(0), np.float64) assert_type(AR_f8.take(0), np.float64) From 8203da6e4cdad722664fa740c7a9514104483f07 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 6 Apr 2026 03:50:49 +0200 Subject: [PATCH 1596/1718] TYP: ``tile``: accept numpy scalars and arrays as second argument (#31161) --- numpy/lib/_shape_base_impl.pyi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 25cf94be3927..793f2c0374b3 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -18,6 +18,7 @@ from numpy._typing import ( _ArrayLikeBool_co, _ArrayLikeComplex_co, _ArrayLikeFloat_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, @@ -202,6 +203,6 @@ def kron(a: object, b: _ArrayLikeObject_co) -> NDArray[np.object_]: ... # @overload -def tile[ScalarT: np.generic](A: _ArrayLike[ScalarT], reps: int | Sequence[int]) -> NDArray[ScalarT]: ... +def tile[ScalarT: np.generic](A: _ArrayLike[ScalarT], reps: _ArrayLikeInt) -> NDArray[ScalarT]: ... @overload -def tile(A: ArrayLike, reps: int | Sequence[int]) -> NDArray[Incomplete]: ... +def tile(A: ArrayLike, reps: _ArrayLikeInt) -> NDArray[Incomplete]: ... From e58fd1f5cea606890441d83d12c274955e3a0ac6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:55:16 +0000 Subject: [PATCH 1597/1718] MAINT: Bump astral-sh/setup-uv from 7.6.0 to 8.0.0 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 7.6.0 to 8.0.0. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/37802adc94f370d6bfd71619e3f0bf239e1f3b78...cec208311dfd045dd5311c1add060b2062131d57) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/stubtest.yml | 2 +- .github/workflows/typecheck.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 9cab4cf63430..295ca31f3e13 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 with: python-version: ${{ matrix.py }} activate-environment: true diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index 826e1ffe0f31..6f6ce50b9440 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0 + - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 with: activate-environment: true - name: Install dependencies From bf699d071887cc7a0937b3f24375f027bddb7433 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:55:53 +0000 Subject: [PATCH 1598/1718] MAINT: Bump int128/hide-comment-action from 1.53.0 to 1.54.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.53.0 to 1.54.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/c0b5ed56339ed2285f922a5da4444b55270c43f3...7e00bd46796b28fdf74b2f4b0e4f0568bd024b00) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.54.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 8541265e4a21..e05c0dc5c697 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@c0b5ed56339ed2285f922a5da4444b55270c43f3 # v1.53.0 + uses: int128/hide-comment-action@7e00bd46796b28fdf74b2f4b0e4f0568bd024b00 # v1.54.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From cb0b58356554680beae240f3d42ad23a0e1cce36 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 03:46:59 +0200 Subject: [PATCH 1599/1718] TYP: ``[a]min``, ``[a]max``, ``ptp``: shape-typing and improved dtypes (#31163) --- numpy/__init__.pyi | 106 ++++ numpy/_core/fromnumeric.pyi | 553 ++++++++++++++++-- .../typing/tests/data/reveal/fromnumeric.pyi | 93 ++- .../typing/tests/data/reveal/ndarray_misc.pyi | 38 +- 4 files changed, 708 insertions(+), 82 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index bffaee123f10..f883812b1e0e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3213,6 +3213,112 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): correction: float | _NoValueType = ..., ) -> ndarray[_ShapeT_co]: ... + # keep in sync with `ndarray.amin` below + @override # type: ignore[override] + @overload # +number | timedelta64 | datetime64 + def max[ScalarT: number | bool_ | timedelta64 | datetime64]( + self: NDArray[ScalarT], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # +number | timedelta64 | datetime64 | object_, axis: + def max[ScalarT: number | bool_ | timedelta64 | datetime64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # +number | timedelta64 | datetime64 | object_, keepdims=True + def max[ArrayT: NDArray[number | bool_ | timedelta64 | datetime64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # object_ + def max( + self: NDArray[object_], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: ArrayT + def max[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | datetime64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + + # keep in sync with `ndarray.amax` above + @override # type: ignore[override] + @overload # +number | timedelta64 | datetime64 + def min[ScalarT: number | bool_ | timedelta64 | datetime64]( + self: NDArray[ScalarT], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ScalarT: ... + @overload # +number | timedelta64 | datetime64 | object_, axis: + def min[ScalarT: number | bool_ | timedelta64 | datetime64 | object_]( + self: NDArray[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> NDArray[ScalarT]: ... + @overload # +number | timedelta64 | datetime64 | object_, keepdims=True + def min[ArrayT: NDArray[number | bool_ | timedelta64 | datetime64 | object_]]( + self: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: L[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + @overload # object_ + def min( + self: NDArray[object_], + axis: None = None, + out: None = None, + *, + keepdims: L[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload # out: ArrayT + def min[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | datetime64 | object_], + axis: int | tuple[int, ...] | None = None, + *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> ArrayT: ... + # @overload def partition( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 2979e7fbaa74..54cf087185ee 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,4 @@ -from _typeshed import Incomplete +from _typeshed import Incomplete, SupportsBool from collections.abc import Sequence from typing import ( Any, @@ -137,6 +137,16 @@ type _ToArray3D[ScalarT: np.generic] = _Array3D[ScalarT] | Sequence[Sequence[Seq type _ArrayLikeMultiplicative_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_], complex] type _ArrayLikeNumeric_co = _DualArrayLike[np.dtype[np.number | np.bool | np.object_ | np.timedelta64], complex] +@type_check_only +class _CanLE(Protocol): + def __le__(self, other: Any, /) -> SupportsBool: ... + +@type_check_only +class _CanGE(Protocol): + def __ge__(self, other: Any, /) -> SupportsBool: ... + +type _Orderable = _CanLE | _CanGE + ### # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @@ -1078,38 +1088,279 @@ def cumulative_sum[ArrayT: np.ndarray]( include_initial: bool = False, ) -> ArrayT: ... -@overload -def ptp[ScalarT: np.generic]( +# +@overload # ~builtins.int +def ptp( + a: _NestedSequence[list[int]] | list[int], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.int_: ... +@overload # ~builtins.int, axis: +def ptp( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def ptp( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def ptp( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def ptp( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[np.complex128]: ... +@overload # ~number | timedelta64 +def ptp[ScalarT: np.number | np.timedelta64]( a: _ArrayLike[ScalarT], axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ..., ) -> ScalarT: ... -@overload +@overload # ~number | timedelta64 | object_, axis: +def ptp[ScalarT: np.number | np.timedelta64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # ~number | timedelta64 | datetime64 | object_, keepdims=True +def ptp[ArrayT: NDArray[np.number | np.timedelta64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> ArrayT: ... +@overload # datetime64 def ptp( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _ArrayLike[np.datetime64], + axis: None = None, out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., +) -> np.timedelta64[Any]: ... +@overload # datetime64, axis: +def ptp( + a: _ArrayLike[np.datetime64], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # datetime64, keepdims=True +def ptp[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.datetime64]], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.timedelta64]]: ... +@overload # object_ +def ptp( + a: _ArrayLike[np.object_], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., ) -> Any: ... -@overload -def ptp[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None, - out: ArrayT, - keepdims: bool | _NoValueType = ..., -) -> ArrayT: ... -@overload +@overload # out: ArrayT def ptp[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, *, out: ArrayT, keepdims: bool | _NoValueType = ..., ) -> ArrayT: ... +@overload # fallback +def ptp( + a: _ArrayLikeNumeric_co, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def ptp( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def ptp( + a: _ArrayLikeNumeric_co, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], +) -> NDArray[Any]: ... -@overload -def amax[ScalarT: np.generic]( +# keep in sync with `amin` below +@overload # sequence of just `Any` (workaround) +def amax( + a: _NestedSequence[Never], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # builtins.bool +def amax( + a: _NestedSequence[bool], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload # builtins.bool, axis: +def amax( + a: _NestedSequence[bool], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # builtins.bool, keepdims=True +def amax( + a: _NestedSequence[bool], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # ~builtins.int +def amax( + a: _NestedSequence[list[int]] | list[int], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # ~builtins.int, axis: +def amax( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def amax( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def amax( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def amax( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def amax( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def amax( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def amax( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def amax( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # +number | timedelta64 | datetime64 +def amax[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64]( a: _ArrayLike[ScalarT], axis: None = None, out: None = None, @@ -1117,37 +1368,199 @@ def amax[ScalarT: np.generic]( initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ScalarT: ... -@overload +@overload # +number | timedelta64 | datetime64 | object_, axis: +def amax[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # +number | timedelta64 | datetime64 | object_, keepdims=True +def amax[ArrayT: NDArray[np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # object_ def amax( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _ArrayLike[np.object_], + axis: None = None, out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... -@overload +@overload # out: ArrayT def amax[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None, + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + *, out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -@overload -def amax[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None = None, +@overload # fallback +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def amax( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... +) -> NDArray[Any]: ... -@overload -def amin[ScalarT: np.generic]( +max = amax + +# keep in sync with `amax` above +@overload # sequence of just `Any` (workaround) +def amin( + a: _NestedSequence[Never], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # builtins.bool +def amin( + a: _NestedSequence[bool], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload # builtins.bool, axis: +def amin( + a: _NestedSequence[bool], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # builtins.bool, keepdims=True +def amin( + a: _NestedSequence[bool], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # ~builtins.int +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.int_: ... +@overload # ~builtins.int, axis: +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.int, keepdims=True +def amin( + a: _NestedSequence[list[int]] | list[int], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.int_]: ... +@overload # ~builtins.float +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.float64: ... +@overload # ~builtins.float, axis: +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.float, keepdims=True +def amin( + a: _NestedSequence[list[float]] | list[float], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # ~builtins.complex +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.complex128: ... +@overload # ~builtins.complex, axis: +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # ~builtins.complex, keepdims=True +def amin( + a: _NestedSequence[list[complex]] | list[complex], + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # +number | timedelta64 | datetime64 +def amin[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64]( a: _ArrayLike[ScalarT], axis: None = None, out: None = None, @@ -1155,34 +1568,74 @@ def amin[ScalarT: np.generic]( initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ScalarT: ... -@overload +@overload # +number | timedelta64 | datetime64 | object_, axis: +def amin[ScalarT: np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[ScalarT]: ... +@overload # +number | timedelta64 | datetime64 | object_, keepdims=True +def amin[ArrayT: NDArray[np.number | np.bool | np.timedelta64 | np.datetime64 | np.object_]]( + a: ArrayT, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> ArrayT: ... +@overload # object_ def amin( - a: ArrayLike, - axis: _ShapeLike | None = None, + a: _ArrayLike[np.object_], + axis: None = None, out: None = None, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... -@overload +@overload # out: ArrayT def amin[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None, + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + *, out: ArrayT, keepdims: bool | _NoValueType = ..., initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -@overload -def amin[ArrayT: np.ndarray]( - a: ArrayLike, - axis: _ShapeLike | None = None, +@overload # fallback +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload # fallback, axis: +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[Any]: ... +@overload # fallback, keepdims=True +def amin( + a: _ArrayLikeNumeric_co | _NestedSequence[_Orderable], + axis: int | tuple[int, ...] | None = None, + out: None = None, *, - out: ArrayT, - keepdims: bool | _NoValueType = ..., + keepdims: Literal[True], initial: _NumberLike_co | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> ArrayT: ... +) -> NDArray[Any]: ... + +min = amin # keep in sync with `cumsum` above @overload @@ -2495,6 +2948,4 @@ def var( correction: float | _NoValueType = ..., ) -> NDArray[Any]: ... -max = amax -min = amin round = around diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 70b63ce02554..cc1f1b1b9ee8 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -21,6 +21,7 @@ AR_subclass: NDArraySubclass AR_m_ns: npt.NDArray[np.timedelta64[int]] AR_m_s: npt.NDArray[np.timedelta64[dt.timedelta]] AR_m_nat: npt.NDArray[np.timedelta64[None]] +AR_M_ns: npt.NDArray[np.datetime64[int]] AR_0d: np.ndarray[tuple[()]] AR_1d: np.ndarray[tuple[int]] AR_nd: np.ndarray @@ -30,6 +31,9 @@ f4: np.float32 i8: np.int64 f: float +_dtype_list: list[np.dtype] +_any_list: list[Any] + # integer‑dtype subclass for argmin/argmax class NDArrayIntSubclass(np.ndarray[tuple[Any, ...], np.dtype[np.intp]]): ... AR_sub_i: NDArrayIntSubclass @@ -237,38 +241,83 @@ assert_type(np.cumulative_sum(f, dtype=float), np.ndarray[tuple[int]]) assert_type(np.cumulative_sum(f, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.ptp(b), np.bool) -assert_type(np.ptp(f4), np.float32) -assert_type(np.ptp(f), Any) -assert_type(np.ptp(AR_b), np.bool) +assert_type(np.ptp(AR_i8), np.int64) +assert_type(np.ptp(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.ptp(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.ptp(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.ptp(AR_f4), np.float32) -assert_type(np.ptp(AR_b, axis=0), Any) -assert_type(np.ptp(AR_f4, axis=0), Any) -assert_type(np.ptp(AR_b, keepdims=True), Any) -assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_c16), np.complex128) +assert_type(np.ptp(AR_O), Any) +assert_type(np.ptp(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.ptp(AR_m_ns), np.timedelta64[int]) +assert_type(np.ptp(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.ptp(AR_m_nat), np.timedelta64[None]) +assert_type(np.ptp(AR_M_ns), np.timedelta64) +assert_type(np.ptp(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.ptp(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.ptp(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ptp(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.ptp(AR_nd), Any) -assert_type(np.amax(b), np.bool) -assert_type(np.amax(f4), np.float32) -assert_type(np.amax(f), Any) +# same as below +assert_type(np.amax(AR_i8), np.int64) +assert_type(np.amax(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.amax(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.amax(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.amax(AR_b), np.bool) assert_type(np.amax(AR_f4), np.float32) -assert_type(np.amax(AR_b, axis=0), Any) -assert_type(np.amax(AR_f4, axis=0), Any) -assert_type(np.amax(AR_b, keepdims=True), Any) -assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_c16), np.complex128) +assert_type(np.amax(AR_O), Any) +assert_type(np.amax(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.amax(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amax(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amax(AR_m_ns), np.timedelta64[int]) +assert_type(np.amax(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.amax(AR_m_nat), np.timedelta64[None]) +assert_type(np.amax(AR_M_ns), np.datetime64[int]) +assert_type(np.amax(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.amax(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.amax(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.amax(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.amax(AR_nd), Any) +assert_type(np.amax(AR_nd, axis=1), np.ndarray) +assert_type(np.amax(AR_nd, keepdims=True), np.ndarray) +assert_type(np.amax(_dtype_list), Any) +assert_type(np.amax(_dtype_list, axis=1), npt.NDArray[Any]) +assert_type(np.amax(_dtype_list, keepdims=True), npt.NDArray[Any]) +assert_type(np.amax(_any_list), Any) -assert_type(np.amin(b), np.bool) -assert_type(np.amin(f4), np.float32) -assert_type(np.amin(f), Any) +# same as above +assert_type(np.amin(AR_i8), np.int64) +assert_type(np.amin(AR_i8, axis=0), npt.NDArray[np.int64]) +assert_type(np.amin(AR_i8, keepdims=True), npt.NDArray[np.int64]) +assert_type(np.amin(AR_i8, axis=0, keepdims=True), npt.NDArray[np.int64]) assert_type(np.amin(AR_b), np.bool) assert_type(np.amin(AR_f4), np.float32) -assert_type(np.amin(AR_b, axis=0), Any) -assert_type(np.amin(AR_f4, axis=0), Any) -assert_type(np.amin(AR_b, keepdims=True), Any) -assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_c16), np.complex128) +assert_type(np.amin(AR_O), Any) +assert_type(np.amin(AR_O, axis=0), npt.NDArray[np.object_]) +assert_type(np.amin(AR_O, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amin(AR_O, axis=0, keepdims=True), npt.NDArray[np.object_]) +assert_type(np.amin(AR_m_ns), np.timedelta64[int]) +assert_type(np.amin(AR_m_s), np.timedelta64[dt.timedelta]) +assert_type(np.amin(AR_m_nat), np.timedelta64[None]) +assert_type(np.amin(AR_M_ns), np.datetime64[int]) +assert_type(np.amin(AR_f4, axis=0), npt.NDArray[np.float32]) +assert_type(np.amin(AR_f4, keepdims=True), npt.NDArray[np.float32]) +assert_type(np.amin(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.amin(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.amin(AR_nd), Any) +assert_type(np.amin(AR_nd, axis=1), np.ndarray) +assert_type(np.amin(AR_nd, keepdims=True), np.ndarray) +assert_type(np.amin(_dtype_list), Any) +assert_type(np.amin(_dtype_list, axis=1), npt.NDArray[Any]) +assert_type(np.amin(_dtype_list, keepdims=True), npt.NDArray[Any]) assert_type(np.cumprod(b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.cumprod(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 7c9b55720811..c68f69cc300c 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -111,12 +111,6 @@ assert_type(f8.cumsum(), npt.NDArray[Any]) assert_type(AR_f8.cumsum(), npt.NDArray[Any]) assert_type(AR_f8.cumsum(out=B), SubClass) -assert_type(f8.max(), Any) -assert_type(AR_f8.max(), Any) -assert_type(AR_f8.max(axis=0), Any) -assert_type(AR_f8.max(keepdims=True), Any) -assert_type(AR_f8.max(out=B), SubClass) - # same as below (but without `timedelta64`) assert_type(b1.prod(), np.int_) assert_type(i8.prod(), np.int64) @@ -234,11 +228,37 @@ assert_type(AR_f8_2d.var(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8_2d.var(dtype=np.float32, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(AR_f8_2d.var(dtype=np.float32, axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +# same as below +assert_type(f8.max(), Any) +assert_type(AR_i8.max(), np.int64) +assert_type(AR_i8.max(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.max(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.max(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.max(), np.float64) +assert_type(AR_f8.max(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.max(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.max(axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.max(out=B), SubClass) +assert_type(AR_f8_2d.max(), np.float64) +assert_type(AR_f8_2d.max(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.max(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.max(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) + +# same as above assert_type(f8.min(), Any) -assert_type(AR_f8.min(), Any) -assert_type(AR_f8.min(axis=0), Any) -assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_i8.min(), np.int64) +assert_type(AR_i8.min(keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_i8.min(axis=0), npt.NDArray[np.int64]) +assert_type(AR_i8.min(axis=0, keepdims=True), npt.NDArray[np.int64]) +assert_type(AR_f8.min(), np.float64) +assert_type(AR_f8.min(keepdims=True), npt.NDArray[np.float64]) +assert_type(AR_f8.min(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.min(axis=0, keepdims=True), npt.NDArray[np.float64]) assert_type(AR_f8.min(out=B), SubClass) +assert_type(AR_f8_2d.min(), np.float64) +assert_type(AR_f8_2d.min(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.min(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.min(axis=0, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) From 8c55907dc14287f09a723a4437871899b2201171 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 04:30:19 +0200 Subject: [PATCH 1600/1718] TYP: ``ndarray.cum{prod,sum}`` shape-typing and improved dtypes (#31164) --- numpy/__init__.pyi | 184 ++++++++++++++++- numpy/ma/core.pyi | 194 ++++++++++++++++-- numpy/typing/tests/data/reveal/ma.pyi | 4 +- .../typing/tests/data/reveal/ndarray_misc.pyi | 30 ++- 4 files changed, 381 insertions(+), 31 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f883812b1e0e..0a9b47444313 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1799,7 +1799,7 @@ class _ArrayOrScalarCommon: self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: ArrayT ) -> ArrayT: ... - # Keep in sync with `MaskedArray.cumprod` + # @overload # out: None (default) def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -1809,7 +1809,7 @@ class _ArrayOrScalarCommon: self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: ArrayT ) -> ArrayT: ... - # Keep in sync with `MaskedArray.cumsum` + # @overload # out: None (default) def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... @overload # out: ndarray @@ -2700,6 +2700,186 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ndarray[_ShapeT_co]: ... + # keep in sync with `MaskedArray.cumprod` + @override # type: ignore[override] + @overload # number | object_ + def cumprod[DTypeT: dtype[number | object_]]( + self: ndarray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, DTypeT]: ... + @overload # bool_ + def cumprod( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, dtype[int_]]: ... + @overload # dtype: (keyword) + def cumprod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (keyword) + def cumprod( + self: NDArray[number | bool_ | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # dtype: (positional) + def cumprod[ScalarT: generic]( + self: NDArray[number | bool_ | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (positional) + def cumprod( + self: NDArray[number | bool_ | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # axis: + def cumprod[ArrayT: NDArray[number | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumprod[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[bool_]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ndarray[ShapeT, dtype[int_]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape, ScalarT: generic]( + self: ndarray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[ShapeT]: ... + @overload # out: ndarray + def cumprod[ArrayT: ndarray]( + self: NDArray[number | bool_ | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumprod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # keep in sync with `MaskedArray.cumsum` + @override # type: ignore[override] + @overload # number | timedelta64 | object_ + def cumsum[DTypeT: dtype[number | timedelta64 | object_]]( + self: ndarray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, DTypeT]: ... + @overload # bool_ + def cumsum( + self: NDArray[bool_], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> ndarray[_1D, dtype[int_]]: ... + @overload # dtype: (keyword) + def cumsum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (keyword) + def cumsum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # dtype: (positional) + def cumsum[ScalarT: generic]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[_1D, dtype[ScalarT]]: ... + @overload # dtype: (positional) + def cumsum( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[_1D]: ... + @overload # axis: + def cumsum[ArrayT: NDArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumsum[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[bool_]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ndarray[ShapeT, dtype[int_]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape, ScalarT: generic]( + self: ndarray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> ndarray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape]( + self: ndarray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> ndarray[ShapeT]: ... + @overload # out: ndarray + def cumsum[ArrayT: ndarray]( + self: NDArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumsum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: NDArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + # @override # type: ignore[override] @overload # +integer | ~object_ diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 2fd55419f8cf..0b0c77784e79 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2038,21 +2038,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> ArrayT: ... - # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` - @overload # out: None (default) - def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... - @overload # out: ndarray - def cumsum[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... - @overload - def cumsum[ArrayT: np.ndarray]( - self, - /, - axis: SupportsIndex | None = None, - dtype: DTypeLike | None = None, - *, - out: ArrayT, - ) -> ArrayT: ... - # Keep in sync with `ma.core.prod` @overload # type: ignore[override] def prod( @@ -2085,15 +2070,180 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): product = prod - # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` - @overload # out: None (default) - def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> MaskedArray: ... + # Keep in sync with `ndarray.cumprod` + @override # type: ignore[override] + @overload + def cumprod[DTypeT: dtype[number | object_]]( + self: MaskedArray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # bool_ + def cumprod( + self: _MaskedArray[np.bool], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> _Masked1D[np.int_]: ... + @overload # dtype: (keyword) + def cumprod[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (keyword) + def cumprod( + self: _MaskedArray[number | bool_ | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # dtype: (positional) + def cumprod[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (positional) + def cumprod( + self: _MaskedArray[number | bool_ | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # axis: + def cumprod[ArrayT: _MaskedArray[number | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumprod[ShapeT: _Shape]( + self: MaskedArray[ShapeT, np.dtype[np.bool]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> MaskedArray[ShapeT, np.dtype[np.int_]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape, ScalarT: np.generic]( + self: MaskedArray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> MaskedArray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumprod[ShapeT: _Shape]( + self: MaskedArray[ShapeT, dtype[number | bool_ | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> MaskedArray[ShapeT]: ... + @overload # out: ndarray + def cumprod[ArrayT: ndarray]( + self: _MaskedArray[number | bool_ | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... + @overload + def cumprod[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: _MaskedArray[number | bool_ | object_], + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: ArrayT, + ) -> ArrayT: ... + + # Keep in sync with `ndarray.cumsum` + @override # type: ignore[override] + @overload + def cumsum[DTypeT: dtype[number | timedelta64 | object_]]( + self: MaskedArray[Any, DTypeT], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # bool_ + def cumsum( + self: _MaskedArray[np.bool], + axis: None = None, + dtype: None = None, + out: None = None, + ) -> _Masked1D[np.int_]: ... + @overload # dtype: (keyword) + def cumsum[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (keyword) + def cumsum( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None = None, + *, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # dtype: (positional) + def cumsum[ScalarT: np.generic]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> _Masked1D[ScalarT]: ... + @overload # dtype: (positional) + def cumsum( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: None, + dtype: DTypeLike, + out: None = None, + ) -> _Masked1D[Any]: ... + @overload # axis: + def cumsum[ArrayT: _MaskedArray[number | timedelta64 | object_]]( + self: ArrayT, + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> ArrayT: ... + @overload # bool_, axis: + def cumsum[ShapeT: _Shape]( + self: MaskedArray[ShapeT, np.dtype[np.bool]], + axis: SupportsIndex, + dtype: None = None, + out: None = None, + ) -> MaskedArray[ShapeT, np.dtype[np.int_]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape, ScalarT: np.generic]( + self: MaskedArray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: _DTypeLike[ScalarT], + out: None = None, + ) -> MaskedArray[ShapeT, dtype[ScalarT]]: ... + @overload # axis: , dtype: + def cumsum[ShapeT: _Shape]( + self: MaskedArray[ShapeT, dtype[number | bool_ | timedelta64 | object_]], + axis: SupportsIndex, + dtype: DTypeLike, + out: None = None, + ) -> MaskedArray[ShapeT]: ... @overload # out: ndarray - def cumprod[ArrayT: np.ndarray](self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: ArrayT) -> ArrayT: ... + def cumsum[ArrayT: ndarray]( + self: _MaskedArray[number | bool_ | timedelta64 | object_], + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: ArrayT, + ) -> ArrayT: ... @overload - def cumprod[ArrayT: np.ndarray]( - self, - /, + def cumsum[ArrayT: ndarray]( # pyright: ignore[reportIncompatibleMethodOverride] + self: _MaskedArray[number | bool_ | timedelta64 | object_], axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 18bd3acd916d..159ffa32cf3c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -440,10 +440,10 @@ assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timed assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) -assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) assert_type(MAR_f8.view(), MaskedArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index c68f69cc300c..b3c2a4d62372 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -103,13 +103,33 @@ assert_type(f8.conjugate(), np.float64) assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) assert_type(B.conjugate(), SubClass) -assert_type(f8.cumprod(), npt.NDArray[Any]) -assert_type(AR_f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_i8.cumprod(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(AR_i8.cumprod(axis=0), npt.NDArray[np.int64]) +assert_type(AR_f8.cumprod(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.cumprod(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.cumprod(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8.cumprod(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8.cumprod(out=B), SubClass) - -assert_type(f8.cumsum(), npt.NDArray[Any]) -assert_type(AR_f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8_2d.cumprod(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumprod(axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumprod(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.cumprod(dtype=np.float32, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.cumprod(), np.ndarray[tuple[int]]) +assert_type(AR_any.cumprod(axis=0), np.ndarray) + +assert_type(AR_i8.cumsum(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(AR_i8.cumsum(axis=0), npt.NDArray[np.int64]) +assert_type(AR_f8.cumsum(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.cumsum(axis=0), npt.NDArray[np.float64]) +assert_type(AR_f8.cumsum(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8.cumsum(dtype=np.float32, axis=0), npt.NDArray[np.float32]) assert_type(AR_f8.cumsum(out=B), SubClass) +assert_type(AR_f8_2d.cumsum(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumsum(axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(AR_f8_2d.cumsum(dtype=np.float32), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(AR_f8_2d.cumsum(dtype=np.float32, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(AR_any.cumsum(), np.ndarray[tuple[int]]) +assert_type(AR_any.cumsum(axis=0), np.ndarray) # same as below (but without `timedelta64`) assert_type(b1.prod(), np.int_) From cc3b09b7a98b0c281e71691d4bf48bb1a793ea3c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 04:31:40 +0200 Subject: [PATCH 1601/1718] TYP: ``diagonal`` and ``ndarray.diagonal`` shape-typing (#31165) --- numpy/__init__.pyi | 22 ++++++++++++++ numpy/_core/fromnumeric.pyi | 29 ++++++++++++++++--- numpy/ma/core.pyi | 23 ++++++++++++++- .../typing/tests/data/reveal/fromnumeric.pyi | 2 ++ .../typing/tests/data/reveal/ndarray_misc.pyi | 2 ++ 5 files changed, 73 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0a9b47444313..8b9dd26bef0d 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3559,6 +3559,28 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> ndarray[_ShapeT_co, _dtype[intp]]: ... # keep in sync with `ma.MaskedArray.diagonal` + @overload # ?d (workaround) + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[Never, Never, Never, Never], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, DTypeT]: ... + @overload # 2d + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[tuple[int], DTypeT]: ... + @overload # 3d + def diagonal[DTypeT: dtype]( + self: ndarray[tuple[int, int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[tuple[int, int], DTypeT]: ... + @overload # Nd (fallback) def diagonal( self, offset: SupportsIndex = 0, diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 54cf087185ee..797e4bf010c5 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -613,19 +613,40 @@ def squeeze[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike | None def squeeze(a: ArrayLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... # keep in sync with `ma.core.diagonal` -@overload +@overload # ?d (workaround) +def diagonal[ScalarT: np.generic]( + a: _ArrayJustND[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[ScalarT]: ... +@overload # 2d +def diagonal[ScalarT: np.generic]( + a: _ToArray2D[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> _Array1D[ScalarT]: ... +@overload # 3d +def diagonal[ScalarT: np.generic]( + a: _ToArray3D[ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> _Array2D[ScalarT]: ... +@overload # Nd def diagonal[ScalarT: np.generic]( a: _ArrayLike[ScalarT], offset: SupportsIndex = 0, axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, # >= 2D array + axis2: SupportsIndex = 1, ) -> NDArray[ScalarT]: ... -@overload +@overload # fallback def diagonal( a: ArrayLike, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, - axis2: SupportsIndex = 1, # >= 2D array + axis2: SupportsIndex = 1, ) -> NDArray[Any]: ... # keep in sync with `ma.core.trace` diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 0b0c77784e79..1bcedd4360a1 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2661,9 +2661,30 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): # keep in sync with `ndarray.diagonal` @override + @overload # ?d (workaround) + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[Never, Never, Never, Never], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, DTypeT]: ... + @overload # 2d + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[tuple[int], DTypeT]: ... + @overload # 3d + def diagonal[DTypeT: dtype]( + self: MaskedArray[tuple[int, int, int], DTypeT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[tuple[int, int], DTypeT]: ... + @overload # Nd (fallback) def diagonal( self, - /, offset: SupportsIndex = 0, axis1: SupportsIndex = 0, axis2: SupportsIndex = 1, diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index cc1f1b1b9ee8..812dd91c5517 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -151,6 +151,8 @@ assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) assert_type(np.diagonal(AR_b), npt.NDArray[np.bool]) assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) +assert_type(np.diagonal(AR_f4_2d), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.diagonal(AR_f4_3d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index b3c2a4d62372..f25014ce0bd6 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -302,6 +302,8 @@ assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) assert_type(AR_f8.argpartition(0, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) +assert_type(AR_f8_2d.diagonal(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8_3d.diagonal(), np.ndarray[tuple[int, int], np.dtype[np.float64]]) assert_type(AR_f8.dot(1), npt.NDArray[Any]) assert_type(AR_f8.dot([1]), Any) From 282d065c947161d34d28f418d008fce4587c1c37 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 04:32:32 +0200 Subject: [PATCH 1602/1718] TYP: ``arg{min,max}`` shape-typing (#31166) --- numpy/__init__.pyi | 87 +++++++++++++++++ numpy/_core/fromnumeric.pyi | 95 +++++++++++-------- numpy/ma/core.pyi | 64 ++++++++----- .../typing/tests/data/reveal/fromnumeric.pyi | 18 ++-- numpy/typing/tests/data/reveal/ma.pyi | 24 ++--- .../typing/tests/data/reveal/ndarray_misc.pyi | 14 ++- 6 files changed, 222 insertions(+), 80 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8b9dd26bef0d..7266cad8c4af 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2268,6 +2268,93 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def transpose(self, /, *axes: SupportsIndex) -> Self: ... + # keep in sync with `ndarray.argmin` (below) and `ma.MaskedArray.argmax` + @override # type: ignore[override] + @overload + def argmax( + self, + axis: None = None, + out: None = None, + *, + keepdims: L[False] = False, + ) -> intp: ... + @overload # axis: + def argmax( + self, + axis: SupportsIndex, + out: None = None, + *, + keepdims: L[False] = False, + ) -> NDArray[intp]: ... + @overload # keepdims: True + def argmax( + self, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: L[True], + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmax[ArrayT: NDArray[intp]]( + self, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + keepdims: py_bool = False, + ) -> ArrayT: ... + @overload # out: (positional) + def argmax[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: SupportsIndex | None, + out: ArrayT, + *, + keepdims: py_bool = False, + ) -> ArrayT: ... + + # keep in sync with `ndarray.argmax` (above) and `ma.MaskedArray.argmin` + @override # type: ignore[override] + @overload + def argmin( + self, + axis: None = None, + out: None = None, + *, + keepdims: L[False] = False, + ) -> intp: ... + @overload # axis: + def argmin( + self, + axis: SupportsIndex, + out: None = None, + *, + keepdims: L[False] = False, + ) -> NDArray[intp]: ... + @overload # keepdims: True + def argmin( + self, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: L[True], + ) -> ndarray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmin[ArrayT: NDArray[intp]]( + self, + axis: SupportsIndex | None = None, + *, + out: ArrayT, + keepdims: py_bool = False, + ) -> ArrayT: ... + @overload # out: (positional) + def argmin[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] + self, + axis: SupportsIndex | None, + out: ArrayT, + *, + keepdims: py_bool = False, + ) -> ArrayT: ... + + # @overload def all( self, diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 797e4bf010c5..f954cc6b10f0 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -504,72 +504,89 @@ def argsort( stable: bool | None = None, ) -> _Array1D[np.intp]: ... -# -@overload +# keep in sync with `argmin` below +@overload # ?d def argmax( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], axis: None = None, out: None = None, *, keepdims: Literal[False] | _NoValueType = ..., -) -> intp: ... -@overload +) -> np.intp: ... +@overload # ?d, axis: def argmax( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.intp]: ... +@overload # Nd, keepdims=True +def argmax[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> Any: ... -@overload -def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( - a: ArrayLike, - axis: SupportsIndex | None, - out: BoolOrIntArrayT, + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # ?d, keepdims=True +def argmax( + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> BoolOrIntArrayT: ... -@overload -def argmax[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( - a: ArrayLike, + keepdims: Literal[True], +) -> NDArray[np.intp]: ... +@overload # ?d, out: ArrayT +def argmax[ArrayT: NDArray[np.intp]]( + a: ArrayLike | _NestedSequence[_Orderable], axis: SupportsIndex | None = None, *, - out: BoolOrIntArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> BoolOrIntArrayT: ... +) -> ArrayT: ... -@overload +# keep in sync with `argmax` above +@overload # ?d def argmin( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], axis: None = None, out: None = None, *, keepdims: Literal[False] | _NoValueType = ..., -) -> intp: ... -@overload +) -> np.intp: ... +@overload # ?d, axis: def argmin( - a: ArrayLike, + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> NDArray[np.intp]: ... +@overload # Nd, keepdims=True +def argmin[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: SupportsIndex | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> Any: ... -@overload -def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( - a: ArrayLike, - axis: SupportsIndex | None, - out: BoolOrIntArrayT, + keepdims: Literal[True], +) -> np.ndarray[ShapeT, np.dtype[np.intp]]: ... +@overload # ?d, keepdims=True +def argmin( + a: ArrayLike | _NestedSequence[_Orderable], + axis: SupportsIndex | None = None, + out: None = None, *, - keepdims: bool | _NoValueType = ..., -) -> BoolOrIntArrayT: ... -@overload -def argmin[BoolOrIntArrayT: NDArray[np.integer | np.bool]]( - a: ArrayLike, + keepdims: Literal[True], +) -> NDArray[np.intp]: ... +@overload # ?d, out: ArrayT +def argmin[ArrayT: NDArray[np.intp]]( + a: ArrayLike | _NestedSequence[_Orderable], axis: SupportsIndex | None = None, *, - out: BoolOrIntArrayT, + out: ArrayT, keepdims: bool | _NoValueType = ..., -) -> BoolOrIntArrayT: ... +) -> ArrayT: ... # TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 1bcedd4360a1..9f7d97c6374e 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2372,9 +2372,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): stable: bool = False, ) -> _MaskedArray[intp]: ... - # Keep in-sync with np.ma.argmin - @overload # type: ignore[override] - def argmin( # pyrefly: ignore[bad-param-name-override] + # keep in sync with `MaskedArray.argmin` (below) and `ndarray.argmax` + @override # type: ignore[override] + @overload + def argmax( self, axis: None = None, fill_value: _ScalarLike_co | None = None, @@ -2382,17 +2383,26 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): *, keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... - @overload - def argmin( + @overload # axis: + def argmax( + self, + axis: SupportsIndex, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _MaskedArray[intp]: ... + @overload # keepdims: True + def argmax( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., - ) -> Any: ... - @overload - def argmin[ArrayT: np.ndarray]( + keepdims: Literal[True], + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmax[ArrayT: NDArray[intp]]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, @@ -2400,8 +2410,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: ArrayT, keepdims: bool | _NoValueType = ..., ) -> ArrayT: ... - @overload - def argmin[ArrayT: np.ndarray]( + @overload # out: (positional) + def argmax[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, @@ -2410,9 +2420,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): keepdims: bool | _NoValueType = ..., ) -> ArrayT: ... - # Keep in-sync with np.ma.argmax - @overload # type: ignore[override] - def argmax( # pyrefly: ignore[bad-param-name-override] + # keep in sync with `MaskedArray.argmax` (above) and `ndarray.argmin` + @override # type: ignore[override] + @overload + def argmin( self, axis: None = None, fill_value: _ScalarLike_co | None = None, @@ -2420,17 +2431,26 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): *, keepdims: Literal[False] | _NoValueType = ..., ) -> intp: ... - @overload - def argmax( + @overload # axis: + def argmin( + self, + axis: SupportsIndex, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _MaskedArray[intp]: ... + @overload # keepdims: True + def argmin( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, out: None = None, *, - keepdims: bool | _NoValueType = ..., - ) -> Any: ... - @overload - def argmax[ArrayT: np.ndarray]( + keepdims: Literal[True], + ) -> MaskedArray[_ShapeT_co, dtype[intp]]: ... + @overload # out: (keyword) + def argmin[ArrayT: NDArray[intp]]( self, axis: SupportsIndex | None = None, fill_value: _ScalarLike_co | None = None, @@ -2438,8 +2458,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): out: ArrayT, keepdims: bool | _NoValueType = ..., ) -> ArrayT: ... - @overload - def argmax[ArrayT: np.ndarray]( + @overload # out: (positional) + def argmin[ArrayT: NDArray[intp]]( # pyright: ignore[reportIncompatibleMethodOverride] self, axis: SupportsIndex | None, fill_value: _ScalarLike_co | None, diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 812dd91c5517..c3d20ed28642 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -120,16 +120,22 @@ assert_type(np.argsort(AR_f4, axis=None), np.ndarray[tuple[int], np.dtype[np.int assert_type(np.argsort(AR_f4_1d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) assert_type(np.argsort(AR_f4_2d, axis=None), np.ndarray[tuple[int], np.dtype[np.intp]]) -assert_type(np.argmax(AR_b), np.intp) +# same as below assert_type(np.argmax(AR_f4), np.intp) -assert_type(np.argmax(AR_b, axis=0), Any) -assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), npt.NDArray[np.intp]) +assert_type(np.argmax(AR_f4, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.argmax(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argmax(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argmax(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) -assert_type(np.argmin(AR_b), np.intp) +# same as above assert_type(np.argmin(AR_f4), np.intp) -assert_type(np.argmin(AR_b, axis=0), Any) -assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), npt.NDArray[np.intp]) +assert_type(np.argmin(AR_f4, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.argmin(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.argmin(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argmin(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.searchsorted(AR_b[0], 0), np.intp) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 159ffa32cf3c..c2d2c4c23f3d 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -15,6 +15,7 @@ class IntoMaskedArraySubClass[ScalarT: np.generic]: def __array__(self) -> MaskedArraySubclass[ScalarT]: ... type MaskedArraySubclassC = MaskedArraySubclass[np.complex128] +type MaskedArraySubclassI = MaskedArraySubclass[np.intp] AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -51,6 +52,7 @@ MAR_floating: MaskedArray[np.floating] MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclassC +MAR_subclass_i: MaskedArraySubclassI MAR_into_subclass: IntoMaskedArraySubClass[np.float32] MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] @@ -131,12 +133,12 @@ assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) -assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) -assert_type(MAR_b.argmin(axis=0), Any) -assert_type(MAR_f4.argmin(axis=0), Any) -assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), MaskedArray[np.intp]) +assert_type(MAR_f4.argmin(axis=0), MaskedArray[np.intp]) +assert_type(MAR_b.argmin(keepdims=True), MaskedArray[np.intp]) +assert_type(MAR_f4.argmin(out=MAR_subclass_i), MaskedArraySubclassI) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass_i), MaskedArraySubclassI) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -150,11 +152,11 @@ assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubcl assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) -assert_type(MAR_b.argmax(axis=0), Any) -assert_type(MAR_f4.argmax(axis=0), Any) -assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_b.argmax(axis=0), MaskedArray[np.intp]) +assert_type(MAR_f4.argmax(axis=0), MaskedArray[np.intp]) +assert_type(MAR_b.argmax(keepdims=True), MaskedArray[np.intp]) +assert_type(MAR_f4.argmax(out=MAR_subclass_i), MaskedArraySubclassI) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass_i), MaskedArraySubclassI) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index f25014ce0bd6..85da6342a02c 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -68,15 +68,25 @@ assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.any(out=B), SubClass) +# same as below assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) -assert_type(AR_f8.argmax(axis=0), Any) +assert_type(AR_f8.argmax(axis=0), npt.NDArray[np.intp]) assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.int64]) +assert_type(AR_f8.argmax(keepdims=True), npt.NDArray[np.intp]) +assert_type(AR_f8_1d.argmax(keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(AR_f8_2d.argmax(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(AR_f8_3d.argmax(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) +# same as above assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) -assert_type(AR_f8.argmin(axis=0), Any) +assert_type(AR_f8.argmin(axis=0), npt.NDArray[np.intp]) assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.int64]) +assert_type(AR_f8.argmin(keepdims=True), npt.NDArray[np.intp]) +assert_type(AR_f8_1d.argmin(keepdims=True), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(AR_f8_2d.argmin(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(AR_f8_3d.argmin(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.intp]]) assert_type(f8.argsort(), npt.NDArray[np.intp]) assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) From f2cffb4ee77b39f3ad62369dba3a90a9687a7bfd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 04:34:02 +0200 Subject: [PATCH 1603/1718] TYP: ``partition`` shape-typing (#31169) --- numpy/_core/fromnumeric.pyi | 36 +++++++++++++------ .../typing/tests/data/reveal/fromnumeric.pyi | 8 +++-- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f954cc6b10f0..3fbaa3767e16 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -345,30 +345,46 @@ def matrix_transpose[ScalarT: np.generic](x: _ArrayLike[ScalarT], /) -> NDArray[ def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... # -@overload +@overload # Nd +def partition[ArrayT: np.ndarray]( + a: ArrayT, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> ArrayT: ... +@overload # ?d def partition[ScalarT: np.generic]( a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", - order: None = None, + order: str | Sequence[str] | None = None, ) -> NDArray[ScalarT]: ... -@overload -def partition( - a: _ArrayLike[np.void], +@overload # axis: None +def partition[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: None, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, -) -> NDArray[np.void]: ... -@overload +) -> _Array1D[ScalarT]: ... +@overload # fallback def partition( a: ArrayLike, kth: _ArrayLikeInt, - axis: SupportsIndex | None = -1, + axis: SupportsIndex = -1, kind: _PartitionKind = "introselect", order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... +@overload # fallback, axis: None +def partition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: None, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> _Array1D[Any]: ... # keep roughly in sync with `ndarray.argpartition` @overload # axis: None diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index c3d20ed28642..6132c2a4d2a6 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -81,11 +81,13 @@ assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) assert_type(np.transpose(AR_f4_1d), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.transpose(AR_f4_2d), np.ndarray[tuple[int, int], np.dtype[np.float32]]) -assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) -assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) -assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.partition(AR_b, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) +assert_type(np.partition(AR_f4, 0, axis=None), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_1d, 0), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_2d, 0), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.partition(AR_f4_3d, 0), np.ndarray[tuple[int, int, int], np.dtype[np.float32]]) assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) From af6be5de9040a77a841bde78acc9bf8f135834da Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 04:38:59 +0200 Subject: [PATCH 1604/1718] DOC: release notes for the latest shape-typing improvements (#31172) --- doc/release/upcoming_changes/31172.typing.rst | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 doc/release/upcoming_changes/31172.typing.rst diff --git a/doc/release/upcoming_changes/31172.typing.rst b/doc/release/upcoming_changes/31172.typing.rst new file mode 100644 index 000000000000..739583f79eaf --- /dev/null +++ b/doc/release/upcoming_changes/31172.typing.rst @@ -0,0 +1,22 @@ +Shape-typing support for many functions and methods +--------------------------------------------------- +Many functions and methods now have shape-aware return type annotations. +Type-checkers can now infer the number of dimensions of the returned array +through common operations. For example, ``np.linspace(0, 1)`` is now typed +as a 1-d ``float64`` array, and ``np.sum(x, keepdims=True)`` has the same +number of dimensions as ``x``. + +This covers ``numpy.linalg`` functions, array creation functions (like +``asarray``, ``from{buffer,string,file,iter,regex}``), range functions (``linspace``, +``logspace``, ``geomspace``), aggregation functions and methods (``sum``, +``mean``, ``std``, ``var``, ``min``, ``max``, ``all``, ``any``, etc.), sorting +(``sort``, ``argsort``, ``argpartition``), cumulative operations (``cumsum``, +``cumprod``, etc.), set operations (``unique_values``, ``intersect1d``, +``union1d``, etc.), and various other functions including ``nonzero``, +``transpose``, ``diagonal``, ``atleast_{1,2,3}d``, ``clip``, ``round``, +``inner``, ``bincount``, and ``fft.fftfreq``. Several of these also gained +more precise return dtype annotations as part of this work. + +Shape-typing is still a work-in-progress, so coverage is not yet complete. +Because of limitations in Python's type system and current type-checkers, +shape-typing is often only implemented for the most common lower-rank cases. From 3b024185c3dc2a89cd266174d78a29431131be0e Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Tue, 7 Apr 2026 10:16:45 +0200 Subject: [PATCH 1605/1718] DEP: Deprecate setting the dtype of an numpy.ndarray (#29575) Co-authored-by: Sebastian Berg --- .../upcoming_changes/29244.deprecation.rst | 6 +++ numpy/_core/records.py | 6 ++- numpy/_core/src/multiarray/getset.c | 48 +++++++++++++++---- numpy/_core/src/multiarray/getset.h | 1 + numpy/_core/src/multiarray/methods.c | 14 +++++- numpy/_core/tests/test_deprecations.py | 7 ++- numpy/_core/tests/test_dtype.py | 6 ++- numpy/_core/tests/test_numerictypes.py | 2 +- numpy/lib/_stride_tricks_impl.py | 2 +- numpy/ma/core.py | 2 +- numpy/tests/test_warnings.py | 2 +- numpy/typing/__init__.py | 9 +--- 12 files changed, 81 insertions(+), 24 deletions(-) create mode 100644 doc/release/upcoming_changes/29244.deprecation.rst diff --git a/doc/release/upcoming_changes/29244.deprecation.rst b/doc/release/upcoming_changes/29244.deprecation.rst new file mode 100644 index 000000000000..cec845302901 --- /dev/null +++ b/doc/release/upcoming_changes/29244.deprecation.rst @@ -0,0 +1,6 @@ +Setting the ``dtype`` attribute is deprecated +--------------------------------------------- +Setting the dtype attribute is now deprecated since mutating +an array is unsafe if an array is shared, especially by multiple +threads. As an alternative, you can create a view with a new dtype +via `array.view(dtype=new_dtype)`. diff --git a/numpy/_core/records.py b/numpy/_core/records.py index d77015a4ccce..dbf84efcdd56 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -455,7 +455,11 @@ def __setattr__(self, attr, val): newattr = attr not in self.__dict__ try: - ret = object.__setattr__(self, attr, val) + if attr == 'dtype': + # gh-29244 + ret = self._set_dtype(val) + else: + ret = object.__setattr__(self, attr, val) except Exception: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 6275b5f55fb6..04ecd832b9ea 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -341,16 +341,11 @@ array_nbytes_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) * (contiguous or fortran) with compatible dimensions The shape and strides * will be adjusted in that case as well. */ -static int -array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) +NPY_NO_EXPORT int +array_descr_set_internal(PyArrayObject *self, PyObject *arg) { PyArray_Descr *newtype = NULL; - - if (arg == NULL) { - PyErr_SetString(PyExc_AttributeError, - "Cannot delete array dtype"); - return -1; - } + assert(arg); if (!(PyArray_DescrConverter(arg, &newtype)) || newtype == NULL) { @@ -496,6 +491,43 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) return -1; } +static int +non_unique_reference(PyObject *lhs) +{ + // Return 1 if we have a guaranteed non-unique reference + // When 0 is returned, the object can be unique or non-unique +#if defined(PYPY_VERSION) + // on pypy we cannot use reference counting + return 0; +#endif + return Py_REFCNT(lhs) > 1; +} + +static int +array_descr_set(PyArrayObject *self, PyObject *arg) +{ + if (arg == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete array dtype"); + return -1; + } + + if (non_unique_reference((PyObject *)self)) { + // this will not emit deprecation warnings for all cases, but for most it will + // we skip unique references, so that we will not get a deprecation warning + // when array.view(new_dtype) is called + /* DEPRECATED 2026-02-06, NumPy 2.5 */ + int ret = PyErr_WarnEx(PyExc_DeprecationWarning, + "Setting the dtype on a NumPy array has been deprecated in NumPy 2.4.\n" + "Instead of changing the dtype on an array x, create a new array with x.view(new_dtype)", + 1); + if (ret) { + return -1; + } + } + return array_descr_set_internal(self, arg); +} + static PyObject * array_struct_get(PyArrayObject *self, void *NPY_UNUSED(ignored)) { diff --git a/numpy/_core/src/multiarray/getset.h b/numpy/_core/src/multiarray/getset.h index 5436efaa325b..a9ae39481837 100644 --- a/numpy/_core/src/multiarray/getset.h +++ b/numpy/_core/src/multiarray/getset.h @@ -3,6 +3,7 @@ extern NPY_NO_EXPORT PyGetSetDef array_getsetlist[]; +NPY_NO_EXPORT int array_descr_set_internal(PyArrayObject *self, PyObject *arg); NPY_NO_EXPORT int array_shape_set_internal(PyArrayObject *self, PyObject *val); #endif /* NUMPY_CORE_SRC_MULTIARRAY_GETSET_H_ */ diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 6728020ad984..ce21261648c5 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -2556,6 +2556,15 @@ array_trace(PyArrayObject *self, #undef _CHKTYPENUM +static PyObject* array__set_dtype(PyObject *self, PyObject *args) +{ + int r = array_descr_set_internal((PyArrayObject *)self, args); + + if (r < 0) { + return NULL; + } + Py_RETURN_NONE; +} static PyObject * array_clip(PyArrayObject *self, @@ -3057,6 +3066,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { {"to_device", (PyCFunction)array_to_device, METH_VARARGS | METH_KEYWORDS, NULL}, - + // For deprecation of ndarray setters + {"_set_dtype", + (PyCFunction)array__set_dtype, + METH_O, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 886d1879ec1a..089af5a381f1 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -11,7 +11,7 @@ import numpy as np from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 -from numpy.testing import assert_raises +from numpy.testing import IS_PYPY, assert_raises class _DeprecationTestCase: @@ -250,6 +250,11 @@ def test_deprecated_strides_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + @pytest.mark.skipif(IS_PYPY, reason="PyPy handles refcounts differently") + def test_deprecated_dtype_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, "dtype", int)) + def test_deprecated_shape_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index e8002941ab73..03827c0764e9 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -6,6 +6,7 @@ import pickle import sys import types +import warnings from itertools import permutations from typing import Any @@ -1116,7 +1117,10 @@ def test_zero_stride(self): arr = np.broadcast_to(arr, 10) assert arr.strides == (0,) with pytest.raises(ValueError): - arr.dtype = "i1" + with warnings.catch_warnings(): # gh-28901 + warnings.filterwarnings(action="ignore", + category=DeprecationWarning) + arr.dtype = "i1" class TestDTypeMakeCanonical: def check_canonical(self, dtype, canonical): diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 1cb204a69c2c..193b253d4f92 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -335,7 +335,7 @@ class TestReadValuesNestedMultiple(ReadValuesNested): class TestEmptyField: def test_assign(self): a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"), ("float", "<2f4")] + a = a.view(dtype=[("int", "<0i4"), ("float", "<2f4")]) assert_(a['int'].shape == (5, 0)) assert_(a['float'].shape == (5, 2)) diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 919763ad3918..cee16fb9e7fc 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -140,7 +140,7 @@ def as_strided( array = np.asarray(DummyArray(interface, base=base)) # The route via `__interface__` does not preserve structured # dtypes. Since dtype should remain unchanged, we set it explicitly. - array.dtype = base.dtype + array._set_dtype(base.dtype) view = _maybe_view_as_subclass(base, array) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 28200da59675..9cf6606375a7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3495,7 +3495,7 @@ def dtype(self): @dtype.setter def dtype(self, dtype): - super(MaskedArray, type(self)).dtype.__set__(self, dtype) + self._set_dtype(dtype) if self._mask is not nomask: self._mask = self._mask.view(make_mask_descr(dtype), ndarray) # Try to reset the shape of the mask (if we don't have a void). diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 150327e85c61..84f2f79d1f86 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -34,7 +34,7 @@ def visit_Call(self, node): ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': - if getattr(node.args[0], "value", None) == "ignore": + if node.args and getattr(node.args[0], "value", None) == "ignore": if not self.__filename.name.startswith("test_"): raise AssertionError( "ignore filters should only be used in tests; " diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 1a90f9e0c212..c46a381533ab 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -64,14 +64,7 @@ ndarray ~~~~~~~ -It's possible to mutate the dtype of an array at runtime. For example, -the following code is valid: - -.. code-block:: python - - >>> x = np.array([1, 2]) - >>> x.dtype = np.bool - +It's possible (but deprecated) to mutate the dtype of an array at runtime. This sort of mutation is not allowed by the types. Users who want to write statically typed code should instead use the `numpy.ndarray.view` method to create a view of the array with a different dtype. From 6cb0da6350fe1871b20322063de335da02e4e4b3 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 7 Apr 2026 14:55:31 +0200 Subject: [PATCH 1606/1718] DEV: avoid byte-compiling every .py file on a `spin build` call. (#31173) --- .spin/cmds.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.spin/cmds.py b/.spin/cmds.py index 4dcafb3ff1f1..12832fb3b24d 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -77,10 +77,14 @@ def changelog(token, revision_range): help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) @spin.util.extend_command(spin.cmds.meson.build) -def build(*, parent_callback, with_scipy_openblas, **kwargs): +def build(*, parent_callback, meson_args, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - parent_callback(**kwargs) + + # Avoid byte-compiling on every rebuild/reinstall, that's very expensive + meson_args += ("-Dpython.bytecompile=-1",) + + parent_callback(**{'meson_args': meson_args, **kwargs}) @spin.util.extend_command(spin.cmds.meson.docs) From 86662bfb4f8947a7c814fbf5a3cfdd6f18c279f3 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 7 Apr 2026 14:49:44 +0200 Subject: [PATCH 1607/1718] DEP: Undo deprecation for ``np.dtype()`` signature used by old pickles Some (very) old pickles pass align=0, copy=1 as integers rather than bools. This just assumes no user will really do that, and allows it. It may be possible to also use `copyreg` to work around it, but since it is a rather specific signature this seems easier/just as well. --- numpy/_core/src/multiarray/descriptor.c | 14 ++++++++++--- numpy/_core/tests/test_datetime.py | 26 ++++++++++++------------- numpy/_core/tests/test_multiarray.py | 21 -------------------- 3 files changed, 23 insertions(+), 38 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index e712ff5372bf..ce4d2b85a256 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2529,6 +2529,7 @@ arraydescr_new(PyTypeObject *subtype, PyObject *odescr; PyObject *oalign = NULL; + PyObject *ocopy = NULL; PyObject *metadata = NULL; PyArray_Descr *conv; npy_bool align = NPY_FALSE; @@ -2537,20 +2538,27 @@ arraydescr_new(PyTypeObject *subtype, static char *kwlist[] = {"dtype", "align", "copy", "metadata", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O!:dtype", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OOO!:dtype", kwlist, &odescr, &oalign, - PyArray_BoolConverter, ©, + &ocopy, &PyDict_Type, &metadata)) { return NULL; } + if (ocopy != NULL && !PyArray_BoolConverter(ocopy, ©)) { + return NULL; + } if (oalign != NULL) { /* * In the future, reject non Python (or NumPy) boolean, including integers to avoid any * possibility of thinking that an integer alignment makes sense here. */ - if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool)) { + if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool) && !( + // Some old pickles use 0, 1 exactly, assume no user passes it + // (It may also be possible to use `copyreg` instead.) + PyLong_CheckExact(oalign) && PyLong_AsLong(oalign) == 0 && + ocopy != NULL && PyLong_CheckExact(ocopy) && PyLong_AsLong(ocopy) == 1)) { /* Deprecated 2025-07-01: NumPy 2.4 */ if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 469cf1823a86..cf80e09d21ac 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -906,20 +906,18 @@ def test_pickle(self): delta) # Check that loading pickles from 1.6 works - with pytest.warns(np.exceptions.VisibleDeprecationWarning, - match=r".*align should be passed"): - pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ - b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ - b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ - b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." - assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n"\ + b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n"\ + b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype(''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) def test_gh_29555(self): # check that dtype metadata round-trips when none diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 49572a43a99c..f8a2b08997c0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -4729,9 +4729,6 @@ def _loads(self, obj): # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version0_int8(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4743,9 +4740,6 @@ def test_version0_int8(self): p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version0_float32(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4758,9 +4752,6 @@ def test_version0_float32(self): p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version0_object(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4774,9 +4765,6 @@ def test_version0_object(self): assert_equal(a, p) # version 1 pickles, using protocol=2 to pickle - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version1_int8(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4788,9 +4776,6 @@ def test_version1_int8(self): p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version1_float32(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4803,9 +4788,6 @@ def test_version1_float32(self): p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_version1_object(self): s = ( b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\n" @@ -4818,9 +4800,6 @@ def test_version1_object(self): p = self._loads(s) assert_equal(a, p) - @pytest.mark.filterwarnings( - "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning", - ) def test_subarray_int_shape(self): s = ( b"cnumpy.core.multiarray\n_reconstruct\np0\n" From 1d6144c8bebb97a23d011ffdb48b4bfc3664d575 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 17:22:29 +0200 Subject: [PATCH 1608/1718] TYP: ``all`` and ``any`` shape-typing (#31170) --- numpy/__init__.pyi | 68 +++++++---- numpy/_core/fromnumeric.pyi | 112 +++++++++++++----- .../typing/tests/data/reveal/fromnumeric.pyi | 22 ++-- .../typing/tests/data/reveal/ndarray_misc.pyi | 14 ++- 4 files changed, 151 insertions(+), 65 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7266cad8c4af..4875e64dccc1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2355,77 +2355,97 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> ArrayT: ... # + # keep in sync with `ndarray.any` (below) @overload def all( self, axis: None = None, out: None = None, - keepdims: L[False, 0] = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True ) -> bool_: ... - @overload + @overload # axis: + def all( + self, + axis: int | tuple[int, ...], + out: None = None, + keepdims: L[False] = False, + *, + where: _ArrayLikeBool_co = True, + ) -> NDArray[bool_]: ... + @overload # keepdims: True def all( self, axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, *, + keepdims: L[True], where: _ArrayLikeBool_co = True, - ) -> bool_ | NDArray[bool_]: ... - @overload + ) -> ndarray[_ShapeT_co, dtype[bool_]]: ... + @overload # out: (keyword) def all[ArrayT: ndarray]( self, - axis: int | tuple[int, ...] | None, - out: ArrayT, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None = None, *, + out: ArrayT, + keepdims: py_bool = False, where: _ArrayLikeBool_co = True, ) -> ArrayT: ... - @overload + @overload # out: (positional) def all[ArrayT: ndarray]( self, - axis: int | tuple[int, ...] | None = None, - *, + axis: int | tuple[int, ...] | None, out: ArrayT, - keepdims: SupportsIndex = False, + keepdims: py_bool = False, + *, where: _ArrayLikeBool_co = True, ) -> ArrayT: ... + # keep in sync with `ndarray.all` (above) @overload def any( self, axis: None = None, out: None = None, - keepdims: L[False, 0] = False, + keepdims: L[False] = False, *, where: _ArrayLikeBool_co = True ) -> bool_: ... - @overload + @overload # axis: + def any( + self, + axis: int | tuple[int, ...], + out: None = None, + keepdims: L[False] = False, + *, + where: _ArrayLikeBool_co = True, + ) -> NDArray[bool_]: ... + @overload # keepdims: True def any( self, axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, *, + keepdims: L[True], where: _ArrayLikeBool_co = True, - ) -> bool_ | NDArray[bool_]: ... - @overload + ) -> ndarray[_ShapeT_co, dtype[bool_]]: ... + @overload # out: (keyword) def any[ArrayT: ndarray]( self, - axis: int | tuple[int, ...] | None, - out: ArrayT, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None = None, *, + out: ArrayT, + keepdims: py_bool = False, where: _ArrayLikeBool_co = True, ) -> ArrayT: ... - @overload + @overload # out: (positional) def any[ArrayT: ndarray]( self, - axis: int | tuple[int, ...] | None = None, - *, + axis: int | tuple[int, ...] | None, out: ArrayT, - keepdims: SupportsIndex = False, + keepdims: py_bool = False, + *, where: _ArrayLikeBool_co = True, ) -> ArrayT: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 3fbaa3767e16..55bca3dbbbe2 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,4 @@ -from _typeshed import Incomplete, SupportsBool +from _typeshed import SupportsBool from collections.abc import Sequence from typing import ( Any, @@ -882,35 +882,62 @@ def all( a: ArrayLike | None, axis: None = None, out: None = None, - keepdims: Literal[False, 0] | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... -@overload -def all( - a: ArrayLike | None, +@overload # axis: int +def all[ShapeT: _Shape]( + a: ArrayLike, + axis: int, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # axis: (int, ...) +def all[ShapeT: _Shape]( + a: ArrayLike, + axis: tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool] | Any: ... +@overload # Nd, keepdims: True +def all[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: _BoolLike_co | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... -@overload +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # ?d, keepdims: True +def all[ShapeT: _Shape]( + a: ArrayLike, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # out: (keyword) def all[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None, - out: ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., + axis: int | tuple[int, ...] | None = None, *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -@overload +@overload # out: (positional) def all[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None = None, - *, + axis: int | tuple[int, ...] | None, out: ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., + keepdims: bool | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... @@ -920,35 +947,62 @@ def any( a: ArrayLike | None, axis: None = None, out: None = None, - keepdims: Literal[False, 0] | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... -@overload -def any( - a: ArrayLike | None, +@overload # axis: int +def any[ShapeT: _Shape]( + a: ArrayLike, + axis: int, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # axis: (int, ...) +def any[ShapeT: _Shape]( + a: ArrayLike, + axis: tuple[int, ...], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool] | Any: ... +@overload # Nd, keepdims: True +def any[ShapeT: _Shape]( + a: np.ndarray[ShapeT], axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: _BoolLike_co | _NoValueType = ..., *, + keepdims: Literal[True], where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Incomplete: ... -@overload +) -> np.ndarray[ShapeT, np.dtype[np.bool]]: ... +@overload # ?d, keepdims: True +def any[ShapeT: _Shape]( + a: ArrayLike, + axis: int | tuple[int, ...] | None = None, + out: None = None, + *, + keepdims: Literal[True], + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[np.bool]: ... +@overload # out: (keyword) def any[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None, - out: ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., + axis: int | tuple[int, ...] | None = None, *, + out: ArrayT, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... -@overload +@overload # out: (positional) def any[ArrayT: np.ndarray]( a: ArrayLike | None, - axis: int | tuple[int, ...] | None = None, - *, + axis: int | tuple[int, ...] | None, out: ArrayT, - keepdims: _BoolLike_co | _NoValueType = ..., + keepdims: bool | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., ) -> ArrayT: ... diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 6132c2a4d2a6..5807a5c80a93 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -212,10 +212,13 @@ assert_type(np.all(f4), np.bool) assert_type(np.all(f), np.bool) assert_type(np.all(AR_b), np.bool) assert_type(np.all(AR_f4), np.bool) -assert_type(np.all(AR_b, axis=0), Any) -assert_type(np.all(AR_f4, axis=0), Any) -assert_type(np.all(AR_b, keepdims=True), Any) -assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4, axis=0), npt.NDArray[np.bool]) +assert_type(np.all(AR_b, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.all(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.all(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.all(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.any(b), np.bool) @@ -223,10 +226,13 @@ assert_type(np.any(f4), np.bool) assert_type(np.any(f), np.bool) assert_type(np.any(AR_b), np.bool) assert_type(np.any(AR_f4), np.bool) -assert_type(np.any(AR_b, axis=0), Any) -assert_type(np.any(AR_f4, axis=0), Any) -assert_type(np.any(AR_b, keepdims=True), Any) -assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_b, axis=0), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4, axis=0), npt.NDArray[np.bool]) +assert_type(np.any(AR_b, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4, keepdims=True), npt.NDArray[np.bool]) +assert_type(np.any(AR_f4_1d, keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.any(AR_f4_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.any(AR_f4_3d, keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.cumsum(b), np.ndarray[tuple[int], np.dtype[np.bool]]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 85da6342a02c..ba718421af88 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -58,14 +58,20 @@ assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) assert_type(f8.all(), np.bool) assert_type(AR_f8.all(), np.bool) -assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) -assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(axis=0), npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), npt.NDArray[np.bool]) +assert_type(AR_f8_1d.all(keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(AR_f8_2d.all(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(AR_f8_3d.all(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(AR_f8.all(out=B), SubClass) assert_type(f8.any(), np.bool) assert_type(AR_f8.any(), np.bool) -assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) -assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(axis=0), npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), npt.NDArray[np.bool]) +assert_type(AR_f8_1d.any(keepdims=True), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(AR_f8_2d.any(keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(AR_f8_3d.any(keepdims=True), np.ndarray[tuple[int, int, int], np.dtype[np.bool]]) assert_type(AR_f8.any(out=B), SubClass) # same as below From 628813c5b6b6eb1be6ae293cb854fc3f83336296 Mon Sep 17 00:00:00 2001 From: Aleksei Nikiforov <103434461+AlekseiNikiforovIBM@users.noreply.github.com> Date: Tue, 7 Apr 2026 17:28:14 +0200 Subject: [PATCH 1609/1718] MAINT: Rename ppc64le and s390x workflow (#31121) --- .github/workflows/{linux-ppc64le.yml => linux-ibm.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{linux-ppc64le.yml => linux-ibm.yml} (100%) diff --git a/.github/workflows/linux-ppc64le.yml b/.github/workflows/linux-ibm.yml similarity index 100% rename from .github/workflows/linux-ppc64le.yml rename to .github/workflows/linux-ibm.yml From 8868f644abcd323df00cf3d75a7774973f6f2ecd Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 7 Apr 2026 17:57:36 +0200 Subject: [PATCH 1610/1718] TYP: fix ``np.shape`` assignability issue for python lists (#31171) --- numpy/_core/fromnumeric.pyi | 17 +++++++++-------- numpy/typing/tests/data/reveal/fromnumeric.pyi | 9 ++++++++- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 55bca3dbbbe2..dbff4fcc8283 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -741,25 +741,26 @@ def nonzero(a: _ToArray3D[Any]) -> tuple[_Array1D[np.intp], _Array1D[np.intp], _ @overload # Nd (fallback) def nonzero(a: _ArrayLike[Any]) -> tuple[_Array1D[np.intp], ...]: ... -# this prevents `Any` from being returned with Pyright -@overload +# `collections.abc.Sequence` can't be used here because `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload # this prevents `Any` from being returned with Pyright def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload def shape[ShapeT: _Shape](a: _SupportsShape[ShapeT]) -> ShapeT: ... @overload def shape(a: _PyScalar) -> tuple[()]: ... -# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are -# subtypes of it, which would make the return types incompatible. +@overload # an unbound type variable is used because `list` is invariant +def shape[ScalarT: _PyScalar](a: _PyArray[ScalarT]) -> _1D: ... @overload -def shape(a: _PyArray[_PyScalar]) -> _1D: ... -@overload -def shape(a: _PyArray[_PyArray[_PyScalar]]) -> _2D: ... -# this overload will be skipped by typecheckers that don't support PEP 688 +def shape[ScalarT: _PyScalar](a: Sequence[_PyArray[ScalarT]]) -> _2D: ... @overload +def shape[ScalarT: _PyScalar](a: Sequence[Sequence[_PyArray[ScalarT]]]) -> _3D: ... +@overload # this will be skipped by typecheckers that don't support PEP 688 def shape(a: memoryview | bytearray) -> _1D: ... @overload def shape(a: ArrayLike) -> _AnyShape: ... +# @overload def compress[ScalarT: np.generic]( condition: _ArrayLikeBool_co, # 1D bool array diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 5807a5c80a93..af473fd41305 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -31,6 +31,10 @@ f4: np.float32 i8: np.int64 f: float +_py_list_1d: list[int] +_py_list_2d: list[list[int]] +_py_list_3d: list[list[list[int]]] + _dtype_list: list[np.dtype] _any_list: list[Any] @@ -184,7 +188,10 @@ assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) assert_type(np.shape([1]), tuple[int]) assert_type(np.shape([[2]]), tuple[int, int]) -assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape([[[3]]]), tuple[int, int, int]) +assert_type(np.shape(_py_list_1d), tuple[int]) +assert_type(np.shape(_py_list_2d), tuple[int, int]) +assert_type(np.shape(_py_list_3d), tuple[int, int, int]) assert_type(np.shape(AR_b), tuple[Any, ...]) assert_type(np.shape(AR_nd), tuple[Any, ...]) # these fail on mypy, but it works as expected with pyright/pylance From bee41a195099419d569e5d7e7b841759a2161cab Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 7 Apr 2026 18:00:42 +0200 Subject: [PATCH 1611/1718] REV: Manual revert of float16 svml use (#31178) --- .../_core/src/umath/loops_half.dispatch.c.src | 6 +-- numpy/_core/tests/test_umath_accuracy.py | 40 ------------------- 2 files changed, 1 insertion(+), 45 deletions(-) diff --git a/numpy/_core/src/umath/loops_half.dispatch.c.src b/numpy/_core/src/umath/loops_half.dispatch.c.src index a81a64ed0294..2b17fd622c4b 100644 --- a/numpy/_core/src/umath/loops_half.dispatch.c.src +++ b/numpy/_core/src/umath/loops_half.dispatch.c.src @@ -12,7 +12,7 @@ #define NPY__SVML_IS_ENABLED 0 #endif -#if NPY__SVML_IS_ENABLED && !defined(NPY_HAVE_AVX512_SPR) +#if NPY__SVML_IS_ENABLED typedef __m256i npyvh_f16; #define npyv_cvt_f16_f32 _mm512_cvtph_ps @@ -80,11 +80,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && (steps[0] == sizeof(npy_half)) && (steps[1] == sizeof(npy_half))) { - #ifdef NPY_HAVE_AVX512_SPR - __svml_@intrin@s32(src, dst, len); - #else avx512_@intrin@_f16(src, dst, len); - #endif return; } #endif // NPY__SVML_IS_ENABLED diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 3ca2f508672e..cfed5a40931e 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -21,8 +21,6 @@ IS_AVX = __cpu_features__.get('AVX512F', False) or \ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) -IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False) - # only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). runtest = (sys.platform.startswith('linux') and IS_AVX and not _glibc_older_than("2.17")) @@ -84,8 +82,6 @@ def test_validate_transcendentals(self): maxulperr = data_subset['ulperr'].max() assert_array_max_ulp(npfunc(inval), outval, maxulperr) - @pytest.mark.skipif(IS_AVX512FP16, - reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): @@ -94,39 +90,3 @@ def test_validate_fp16_transcendentals(self, ufunc): datafp32 = datafp16.astype(np.float32) assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), maxulp=1, dtype=np.float16) - - @pytest.mark.skipif(not IS_AVX512FP16, - reason="lower ULP only apply for SVML FP16") - def test_validate_svml_fp16(self): - max_ulp_err = { - "arccos": 2.54, - "arccosh": 2.09, - "arcsin": 3.06, - "arcsinh": 1.51, - "arctan": 2.61, - "arctanh": 1.88, - "cbrt": 1.57, - "cos": 1.43, - "cosh": 1.33, - "exp2": 1.33, - "exp": 1.27, - "expm1": 0.53, - "log": 1.80, - "log10": 1.27, - "log1p": 1.88, - "log2": 1.80, - "sin": 1.88, - "sinh": 2.05, - "tan": 2.26, - "tanh": 3.00, - } - - with np.errstate(all='ignore'): - arr = np.arange(65536, dtype=np.int16) - datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) - datafp32 = datafp16.astype(np.float32) - for func in max_ulp_err: - ufunc = getattr(np, func) - ulp = np.ceil(max_ulp_err[func]) - assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), - maxulp=ulp, dtype=np.float16) From cc3332d46573608a4fc86337130a523931735291 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Apr 2026 17:55:15 +0000 Subject: [PATCH 1612/1718] MAINT: Bump prefix-dev/setup-pixi from 0.9.4 to 0.9.5 Bumps [prefix-dev/setup-pixi](https://github.com/prefix-dev/setup-pixi) from 0.9.4 to 0.9.5. - [Release notes](https://github.com/prefix-dev/setup-pixi/releases) - [Commits](https://github.com/prefix-dev/setup-pixi/compare/a0af7a228712d6121d37aba47adf55c1332c9c2e...1b2de7f3351f171c8b4dfeb558c639cb58ed4ec0) --- updated-dependencies: - dependency-name: prefix-dev/setup-pixi dependency-version: 0.9.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/pixi-packages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index d864340eadc2..8408ae032bd3 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -38,7 +38,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: prefix-dev/setup-pixi@a0af7a228712d6121d37aba47adf55c1332c9c2e # v0.9.4 + - uses: prefix-dev/setup-pixi@1b2de7f3351f171c8b4dfeb558c639cb58ed4ec0 # v0.9.5 with: pixi-version: v0.64.0 run-install: false From d12adf8d19932a3c9d10d23dd6aad7591bc3a638 Mon Sep 17 00:00:00 2001 From: Ijtihed Kilani Date: Tue, 7 Apr 2026 23:07:28 +0300 Subject: [PATCH 1613/1718] BUG: Fix matvec/vecmat in-place aliasing (out=input produces zeros) (#31150) `np.matvec(A, b, out=b)` returns all zeros because matvec and vecmat are missing the ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE output flag that matmul already has. Without it the iterator ends up skipping the overlap check when out is the same array as an input so no temporary copy is made and BLAS gemv clobbers the input buffer. This PR adds the same fix to matvec and vecmat and adds regression testing for both. --- numpy/_core/src/umath/ufunc_object.c | 32 +++++++++++++++++----------- numpy/_core/tests/test_ufunc.py | 13 +++++++++++ 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 514fa5c9af9c..b17f24637277 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -142,21 +142,16 @@ PyUFunc_clearfperr() NPY_ITER_NO_SUBTYPE | \ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE -/* Called at module initialization to set the matmul ufunc output flags */ +/* Called at module initialization to set the matmul family gufunc output flags */ NPY_NO_EXPORT int set_matmul_flags(PyObject *d) { - PyObject *matmul = NULL; - int result = PyDict_GetItemStringRef(d, "matmul", &matmul); - if (result <= 0) { - // caller sets an error if one isn't already set - return -1; - } /* * The default output flag NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE allows * perfectly overlapping input and output (in-place operations). While * correct for the common mathematical operations, this assumption is - * incorrect in the general case and specifically in the case of matmul. + * incorrect in the general case and specifically in the case of matmul, + * matvec, and vecmat. * * NPY_ITER_UPDATEIFCOPY is added by default in * PyUFunc_GeneralizedFunction, which is the variant called for gufuncs @@ -164,11 +159,22 @@ set_matmul_flags(PyObject *d) * * Enabling NPY_ITER_WRITEONLY can prevent a copy in some cases. */ - ((PyUFuncObject *)matmul)->op_flags[2] = (NPY_ITER_WRITEONLY | - NPY_ITER_UPDATEIFCOPY | - NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) & - ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; - Py_DECREF(matmul); + npy_uint32 flags = (NPY_ITER_WRITEONLY | + NPY_ITER_UPDATEIFCOPY | + NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) & + ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE; + + const char *names[] = {"matmul", "matvec", "vecmat"}; + for (int i = 0; i < 3; i++) { + PyObject *ufunc = NULL; + int result = PyDict_GetItemStringRef(d, names[i], &ufunc); + if (result <= 0) { + // caller sets an error if one isn't already set + return -1; + } + ((PyUFuncObject *)ufunc)->op_flags[2] = flags; + Py_DECREF(ufunc); + } return 0; } diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index ef2f19162372..3473242f8089 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -895,6 +895,19 @@ def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): expected3 = expected1.astype(object) assert_array_equal(actual3, expected3) + @pytest.mark.parametrize("func", [ + lambda A, x, **kw: np.matvec(A, x, **kw), + lambda A, x, **kw: np.vecmat(x, A, **kw), + ]) + def test_matvec_vecmat_out(self, func): + # overlapping memory: out=input should not produce zeros + a = np.arange(18, dtype=float).reshape(2, 3, 3) + b = np.arange(6, dtype=float).reshape(2, 3) + expected = func(a, b) + c = func(a, b, out=b) + assert c is b + assert_allclose(c, expected) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass From e8fd3579ea3c76159a9bb7e995a6d129ff2fc14a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 8 Apr 2026 10:22:24 +0200 Subject: [PATCH 1614/1718] DEPR: adjust dtype setter deprecation warning (incorrect numpy version) --- numpy/_core/src/multiarray/getset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 04ecd832b9ea..dfe0e962b06e 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -518,7 +518,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg) // when array.view(new_dtype) is called /* DEPRECATED 2026-02-06, NumPy 2.5 */ int ret = PyErr_WarnEx(PyExc_DeprecationWarning, - "Setting the dtype on a NumPy array has been deprecated in NumPy 2.4.\n" + "Setting the dtype on a NumPy array has been deprecated in NumPy 2.5.\n" "Instead of changing the dtype on an array x, create a new array with x.view(new_dtype)", 1); if (ret) { From 97f255f747f103447de958cd3b681397740de024 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 17:56:30 +0000 Subject: [PATCH 1615/1718] MAINT: Bump pyrefly from 0.58.0 to 0.59.0 in /requirements Bumps [pyrefly](https://github.com/facebook/pyrefly) from 0.58.0 to 0.59.0. - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.58.0...0.59.0) --- updated-dependencies: - dependency-name: pyrefly dependency-version: 0.59.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 3fc1cb905b31..54c439343a13 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.0 -pyrefly==0.58.0 +pyrefly==0.59.0 From bd9fc5da7d3ee987a0e474f1fab2ad2c81704c41 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 17:56:38 +0000 Subject: [PATCH 1616/1718] MAINT: Bump delvewheel from 1.11.2 to 1.12.0 in /requirements Bumps [delvewheel](https://github.com/adang1345/delvewheel) from 1.11.2 to 1.12.0. - [Changelog](https://github.com/adang1345/delvewheel/blob/master/CHANGELOG.md) - [Commits](https://github.com/adang1345/delvewheel/commits) --- updated-dependencies: - dependency-name: delvewheel dependency-version: 1.12.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/delvewheel_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/delvewheel_requirements.txt b/requirements/delvewheel_requirements.txt index 7058ed7384af..67a197c417a5 100644 --- a/requirements/delvewheel_requirements.txt +++ b/requirements/delvewheel_requirements.txt @@ -1 +1 @@ -delvewheel==1.11.2 ; sys_platform == 'win32' +delvewheel==1.12.0 ; sys_platform == 'win32' From 8b2219a514b86386ca685b1e18531607105f3132 Mon Sep 17 00:00:00 2001 From: Noxaster <208382900+Noxaster@users.noreply.github.com> Date: Wed, 8 Apr 2026 22:59:57 +0200 Subject: [PATCH 1617/1718] Apply suggestions from code review Co-authored-by: Joren Hammudoglu --- numpy/linalg/tests/test_linalg.py | 3 +-- numpy/testing/_private/utils.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index f576b9ef0df4..88096edb2ed7 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -329,8 +329,7 @@ def _stride_comb_iter(x): xi[...] = x xi = xi.view(x.__class__) assert_(np.all(xi == x)) - parts = [f"{j:+}" for j in repeats] - yield xi, "stride_" + "_".join(parts) + yield xi, "stride_" + "_".join(f"{j:+}" for j in repeats) # generate also zero strides if possible if x.ndim >= 1 and x.shape[-1] == 1: diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 6b9bf6bca5eb..4cd56cfc4fc4 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -1223,7 +1223,7 @@ def compare(x, y): return z < 1.5 * 10.0**(-decimal) assert_array_compare(compare, actual, desired, err_msg=err_msg, verbose=verbose, - header=(f'Arrays are not almost equal to {decimal} decimals'), + header=f'Arrays are not almost equal to {decimal} decimals', precision=decimal) From 6dfe60ef3e459e71e9721e3ec5517b1dd6d13d3d Mon Sep 17 00:00:00 2001 From: agp19d <35179438+agp19d@users.noreply.github.com> Date: Thu, 9 Apr 2026 11:57:10 -0500 Subject: [PATCH 1618/1718] DOC: clarify wording in quickstart ix_() section (#31149) --- doc/source/user/quickstart.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 1208bd1a6347..b611b5000eef 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -1380,8 +1380,8 @@ and then use it as:: The advantage of this version of reduce compared to the normal ufunc.reduce is that it makes use of the :ref:`broadcasting rules ` -in order to avoid creating an argument array the size of the output -times the number of vectors. +in order to avoid creating a temporary array that needs as much memory +as the output array multiplied by the number of vectors. Indexing with strings --------------------- From 6a8599103202609be3dc6516c602c018a4ae2cef Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Fri, 10 Apr 2026 02:00:34 +0900 Subject: [PATCH 1619/1718] ENH: tweak timedelta deprecation message (#31107) Co-authored-by: Davis Bennett --- doc/source/reference/arrays.datetime.rst | 51 ++++++++++++++++++++++++ numpy/_core/src/multiarray/datetime.c | 14 ++++--- numpy/_core/tests/test_array_coercion.py | 2 +- numpy/_core/tests/test_cython.py | 4 +- numpy/_core/tests/test_datetime.py | 34 +++++++++------- numpy/_core/tests/test_deprecations.py | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_ufunc.py | 2 +- numpy/polynomial/tests/test_classes.py | 2 +- numpy/testing/tests/test_utils.py | 4 +- 11 files changed, 88 insertions(+), 31 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 9cb7f59db78b..1d8201bbce1b 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -284,6 +284,8 @@ Datetime units The Datetime and Timedelta data types support a large number of time units, as well as generic units which can be coerced into any of the other units based on input data. +The generic units are deprecated since NumPy 2.5 +and will raise an error in the future. Migration guidance is provided in the `migration guide for deprecation of generic units`_ section below. Datetimes are always stored with an epoch of 1970-01-01T00:00. This means the supported dates are @@ -345,6 +347,12 @@ The protocol is described in the following table: Generic units `datetime.date` ``int`` ================================ ================================= ================================== + +.. deprecated:: 2.5 + The generic units of `timedelta64` are deprecated since NumPy 2.5 and + will raise an error in the future. + + .. admonition:: Example .. try_examples:: @@ -635,3 +643,46 @@ given below. A 472, by Stephenson et.al. `_. A sensible estimate is `50491112870 ± 90` seconds, with a difference of 10330 seconds. + + +.. _migration_guide_generic_units: + +Migration guide for deprecation of generic units +================================================ + +The generic units of `timedelta64` are deprecated since NumPy 2.5 +and will raise an error in the future. +This section provides guidance on how to update code +that uses generic units of `timedelta64` to avoid future errors. + +The straight forward way is to replace the generic unit with a specific time unit such as 'D' (day), 'h' (hour), 'm' (minute), 's' (second), etc. The choice of the specific time unit will depend on the context of your code and the level of precision you require. + + +.. admonition:: Example + + .. try_examples:: + + >>> import numpy as np + + >>> # Old code using generic units of timedelta64 + >>> np.timedelta64(5, "s") + 1 + DeprecationWarning: The 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. This includes implicit conversion of bare integers (e.g. `+ 1`).Please use a specific unit instead. + + >>> # Updated code using a specific time unit + >>> np.timedelta64(5, "s") + np.timedelta64(1, "s") + np.timedelta64(6,'s') + + + When comparing `timedelta64` objects, make sure to use the same specific time unit for both operands even if they are representing ``0``. + + >>> np.timedelta64(0, "s") == 0 + DeprecationWarning: The 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. This includes implicit conversion of bare integers (e.g. `== 0`).Please use a specific unit instead. + np.True_ + + >>> np.timedelta64(0, "s") == np.timedelta64(0, "s") + np.True_ + + When using ``numpy.testing.assert_allclose`` to compare `timedelta64` objects, ensure to set a specific time unit to ``atol`` parameter as well. + + >>> arr = np.ones(5, dtype='m8[s]') + >>> np.testing.assert_allclose(arr, np.timedelta64(1, "s"), atol=np.timedelta64(0, "s")) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 715fdfbdd756..d907fa8d9c3c 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2562,9 +2562,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, /* If output is NaT, skip this warning. */ if(meta->base == NPY_FR_GENERIC) { if (DEPRECATE( - "Using 'generic' unit for NumPy timedelta is deprecated, " - "and will raise an error in the future. Please use a " - "specific units instead.") < 0) { + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { return -1; } } @@ -2587,9 +2588,10 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, if (meta->base == NPY_FR_GENERIC) { if (DEPRECATE( - "Using 'generic' unit for NumPy timedelta is deprecated, " - "and will raise an error in the future. " - "Please use a specific units instead.") < 0) { + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { return -1; } } diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 522223bd3681..476f6d3a6f7d 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -440,7 +440,7 @@ def test_coercion_timedelta_convert_to_number(self, dtype, value, unit): def test_coercion_generic_timedelta_convert_to_number(self, dtype, value, unit): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): scalar = np.timedelta64(value, unit) arr = np.array(scalar, dtype=dtype) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index a6cfedb3770f..0c81543008dd 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -88,7 +88,7 @@ def test_is_timedelta64_object(install_temp): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): assert checks.is_td64(np.timedelta64(1234)) @@ -113,7 +113,7 @@ def test_is_datetime64_object(install_temp): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): assert not checks.is_dt64(np.timedelta64(1234)) diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 469cf1823a86..c0922413ea6b 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,6 +1,7 @@ import datetime import pickle import warnings +from typing import Final from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -34,6 +35,9 @@ def _assert_equal_hash(v1, v2): class TestDateTime: + generic_unit_deprecation_message: Final[str] = ( + "The 'generic' unit for NumPy timedelta is deprecated" + ) def test_string(self): msg = "no explicit representation of timezones available for " \ @@ -204,7 +208,7 @@ def test_compare_generic_nat(self): # regression tests for gh-6452 with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_( np.datetime64("NaT") != np.datetime64("2000") + np.timedelta64("NaT") @@ -396,7 +400,7 @@ def test_timedelta_np_int_construction(self, unit): else: with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message ): assert_equal(np.timedelta64(np.int64(123)), np.timedelta64(123)) @@ -409,7 +413,7 @@ def test_timedelta_scalar_construction(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): # Default construction means 0 assert_equal(np.timedelta64(), np.timedelta64(0)) @@ -421,7 +425,7 @@ def test_timedelta_scalar_construction(self): assert_equal(str(np.timedelta64('NaT', 'ns')), 'NaT') with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_equal(repr(np.timedelta64("NaT")), "np.timedelta64('NaT')") assert_equal(str(np.timedelta64(3, 's')), '3 seconds') @@ -430,7 +434,7 @@ def test_timedelta_scalar_construction(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_equal(repr(np.timedelta64(12)), "np.timedelta64(12)") @@ -521,7 +525,7 @@ def test_timedelta_nat_format(self): # gh-17552 with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_equal('NaT', f'{np.timedelta64("nat")}') @@ -872,7 +876,7 @@ def test_timedelta_array_with_nats(self): # Regression test for gh-29497. with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): x = np.array( [ @@ -1185,7 +1189,7 @@ def test_datetime_add(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): # m8 + bool assert_equal(tdb + True, tdb + 1) @@ -1265,7 +1269,7 @@ def test_datetime_subtract(self): assert_equal((tdb - tda).dtype, np.dtype("m8[h]")) with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): # m8 - bool assert_equal(tdc - True, tdc - 1) @@ -1404,7 +1408,7 @@ def test_timedelta_floor_divide(self, op1, op2, exp): def test_generic_timedelta_floor_divide(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_equal(np.timedelta64(1890) // np.timedelta64(31), 60) @@ -1439,7 +1443,7 @@ def test_timedelta_floor_div_warnings(self, op1, op2): def test_timedelta_floor_div_precision(self, val1, val2): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): op1 = np.timedelta64(val1) op2 = np.timedelta64(val2) @@ -1491,7 +1495,7 @@ def test_timedelta_divmod(self, op1, op2): def test_generic_timedelta_divmod(self, op1, op2): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): op1 = np.timedelta64(op1) op2 = np.timedelta64(op2) @@ -1580,7 +1584,7 @@ def test_datetime_divide(self): 'ignore', r".*encountered in divide", RuntimeWarning) warnings.filterwarnings( "ignore", - "Using 'generic' unit for NumPy timedelta is deprecated", + self.generic_unit_deprecation_message, DeprecationWarning, ) nat = np.timedelta64('NaT', 's') @@ -2048,7 +2052,7 @@ def test_datetime_arange(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): # Unit should be detected as months here a = np.arange("1969-05", "1970-05", 2, dtype="M8") @@ -2756,7 +2760,7 @@ def test_datetime_hash_big_positive(self, wk, unit): def test_timedelta_hash_generic(self): with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match=self.generic_unit_deprecation_message, ): assert_raises(ValueError, hash, np.timedelta64(123)) # generic diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 089af5a381f1..02eff25e328d 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -375,7 +375,7 @@ def test_round_emits_deprecation_warning_scalar(self): class TestDeprecatedGenericTimedelta(_DeprecationTestCase): # Deprecated in Numpy 2.5, 2025-11 # See gh-29619 - message = "Using 'generic' unit for NumPy timedelta is deprecated" + message = "The 'generic' unit for NumPy timedelta is deprecated" @pytest.mark.parametrize('value', [ 3, 10, "NaT" diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 03827c0764e9..6464ccd61f9d 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1977,7 +1977,7 @@ def test_result_type_integers_and_unitless_timedelta64(): # would cause a seg. fault. with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): td = np.timedelta64(4) result = np.result_type(0, td) diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 8e3465dbb87d..5909c9eb8564 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1784,7 +1784,7 @@ def assert_equal_w_dt(a, b, err_msg): elif dt == 'm': with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): m = np.zeros((3, 3), dtype=dt) n = np.ones(1, dtype=dt) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 3473242f8089..443f739f1b1e 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2873,7 +2873,7 @@ def test_ufunc_types(ufunc): if 'm' in inp: with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): args = [np.ones((3, 3), t) for t in inp] else: diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index 43dd788fdc4e..93952745cf82 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -336,7 +336,7 @@ def test_truediv(Poly): s = stype(5, 'D') with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 906f6ea3b0d8..30613f451b60 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -434,7 +434,7 @@ def test_nat_items(self): # not a timedelta with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): natd_no_unit = np.timedelta64("NaT") natd_s = np.timedelta64("NaT", "s") @@ -1343,7 +1343,7 @@ def test_timedelta(self): # see gh-18286 with pytest.warns( DeprecationWarning, - match="Using 'generic' unit for NumPy timedelta is deprecated", + match="The 'generic' unit for NumPy timedelta is deprecated", ): a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") assert_allclose(a, a) From 849a584e06b2ab98f02b1600bc643b01e9aa8047 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 9 Apr 2026 10:33:46 -0700 Subject: [PATCH 1620/1718] BUG: raise OverflowError on datetime64 unit conversion overflow (#31085) Co-authored-by: Claude Opus 4.6 (1M context) --- numpy/_core/src/multiarray/_datetime.h | 41 +++++++++ numpy/_core/src/multiarray/datetime.c | 22 +++-- numpy/_core/src/multiarray/dtype_transfer.c | 20 +---- numpy/_core/tests/test_casting_unittests.py | 7 +- numpy/_core/tests/test_datetime.py | 92 +++++++++++++++++++++ 5 files changed, 157 insertions(+), 25 deletions(-) diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index 5261e8232a08..c6fb57ee2837 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -335,4 +335,45 @@ datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt); NPY_NO_EXPORT npy_hash_t timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td); +/* + * Scale a datetime or timedelta value by num/denom, checking for overflow. + * + * Positive values compute *dt * num / denom. + * Negative values compute (*dt * num - (denom - 1)) / denom to round + * toward negative infinity. + * + * NPY_DATETIME_NAT is NPY_MIN_INT64 (i.e. -NPY_MAX_INT64 - 1). + * The asymmetric neg_limit formula ensures that a valid *dt * num never + * produces NPY_MIN_INT64, which would be misinterpreted as NaT. + * + * NaT values pass through unchanged. + * + * Returns 0 on success, -1 on overflow (with PyExc_OverflowError set). + */ +static inline int +_datetime_scale_with_overflow_check( + npy_int64 *dt, npy_int64 num, npy_int64 denom, + const char *type_name) +{ + if (*dt == NPY_DATETIME_NAT) { + return 0; + } + npy_int64 pos_limit = NPY_MAX_INT64 / num; + npy_int64 neg_limit = (NPY_MAX_INT64 - denom + 1) / num; + + if (*dt > pos_limit || *dt < -neg_limit) { + PyErr_Format(PyExc_OverflowError, + "Overflow when converting between " + "%s units", type_name); + return -1; + } + if (*dt < 0) { + *dt = (*dt * num - (denom - 1)) / denom; + } + else { + *dt = *dt * num / denom; + } + return 0; +} + #endif /* NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ */ diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index d907fa8d9c3c..edaceedb3b01 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -444,8 +444,16 @@ NpyDatetime_ConvertDatetime64ToDatetimeStruct( return -1; } - /* TODO: Change to a mechanism that avoids the potential overflow */ - dt *= meta->num; + /* Check for overflow and apply meta->num scaling */ + if (meta->num > 1) { + if (_datetime_scale_with_overflow_check( + &dt, (npy_int64)meta->num, 1, "datetime64") < 0) { + return -1; + } + } + else { + dt *= meta->num; + } /* * Note that care must be taken with the / and % operators @@ -3178,13 +3186,11 @@ cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta, return -1; } - /* Apply the scaling */ - if (src_dt < 0) { - *dst_dt = (src_dt * num - (denom - 1)) / denom; - } - else { - *dst_dt = src_dt * num / denom; + /* Apply the scaling, checking for overflow */ + if (_datetime_scale_with_overflow_check(&src_dt, num, denom, "timedelta64") < 0) { + return -1; } + *dst_dt = src_dt; return 0; } diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index dbad10842aff..1b98c9add0a9 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -821,14 +821,8 @@ _strided_to_strided_datetime_cast( while (N > 0) { memcpy(&dt, src, sizeof(dt)); - if (dt != NPY_DATETIME_NAT) { - /* Apply the scaling */ - if (dt < 0) { - dt = (dt * num - (denom - 1)) / denom; - } - else { - dt = dt * num / denom; - } + if (_datetime_scale_with_overflow_check(&dt, num, denom, "datetime64") < 0) { + return -1; } memcpy(dst, &dt, sizeof(dt)); @@ -857,14 +851,8 @@ _aligned_strided_to_strided_datetime_cast( while (N > 0) { dt = *(npy_int64 *)src; - if (dt != NPY_DATETIME_NAT) { - /* Apply the scaling */ - if (dt < 0) { - dt = (dt * num - (denom - 1)) / denom; - } - else { - dt = dt * num / denom; - } + if (_datetime_scale_with_overflow_check(&dt, num, denom, "datetime64") < 0) { + return -1; } *(npy_int64 *)dst = dt; diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 34509f31f47d..1f9b8e07104f 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -481,7 +481,12 @@ def test_time_to_time(self, from_dt, to_dt, arr, out = self.get_data_variation( orig_arr, orig_out, aligned, contig) out[...] = 0 - cast._simple_strided_call((arr, out)) + try: + cast._simple_strided_call((arr, out)) + except OverflowError: + # Extreme values (e.g. INT64_MAX) can overflow when + # scaled by the unit conversion factor. gh-16352 + break assert_array_equal(out.view("int64"), expected_out.view("int64")) def string_with_modified_length(self, dtype, change_length): diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index c0922413ea6b..fe30eb20f5ce 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -993,6 +993,98 @@ def cast2(): numpy.datetime64("2014").astype(" nanoseconds. + # INT64_MAX / 1e9 ≈ 9.2e9 seconds ≈ 292 years from epoch, + # so dates beyond ~2262 overflow when cast to ns. + + # gh-16352: upconversion to finer units overflows + arr = np.array(["2367-12-31 12:00:00"], dtype="datetime64[h]") + with pytest.raises(OverflowError, match="Overflow"): + arr.astype("datetime64[ns]") + + # gh-16352: scalar case + val = np.datetime64("3000-01-01", "s") + with pytest.raises(OverflowError, match="Overflow"): + val.astype("datetime64[ns]") + + # gh-22346: downconversion to coarser units overflows near INT64_MIN + dt = np.datetime64(np.iinfo(np.int64).min + 1, "s") + with pytest.raises(OverflowError, match="Overflow"): + dt.astype("M8[m]") + + # negative overflow (far in the past) + val_neg = np.datetime64("0001-01-01", "s") + with pytest.raises(OverflowError, match="Overflow"): + val_neg.astype("datetime64[ns]") + + # timedelta overflow (strided cast path in dtype_transfer.c) + td = np.timedelta64(2**62, "s") + with pytest.raises(OverflowError, match="Overflow"): + td.astype("timedelta64[ns]") + + # timedelta overflow (scalar cast path in datetime.c via + # cast_timedelta_to_timedelta) + td_big = np.timedelta64(2**62, "s") + with pytest.raises(OverflowError, match="Overflow"): + np.array(td_big, dtype="timedelta64[ns]") + + # timedelta exact boundary: INT64_MAX // 1e9 = 9223372036 + td_ok = np.timedelta64(9223372036, "s") + result_td = td_ok.astype("timedelta64[ns]") + assert result_td == np.timedelta64(9223372036000000000, "ns") + + td_bad = np.timedelta64(9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + td_bad.astype("timedelta64[ns]") + + # negative timedelta overflow + td_neg = np.timedelta64(-9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + td_neg.astype("timedelta64[ns]") + + # timedelta NaT passthrough + td_nat = np.timedelta64("NaT", "s") + result_td_nat = td_nat.astype("timedelta64[ns]") + assert np.isnat(result_td_nat) + + # valid conversions near the boundary should still work + val_ok = np.datetime64("2020-01-01", "s") + result = val_ok.astype("datetime64[ns]") + assert result == np.datetime64("2020-01-01", "ns") + + arr_ok = np.array(["2000-01-01", "2020-06-15"], dtype="datetime64[s]") + result_arr = arr_ok.astype("datetime64[ns]") + expected = np.array(["2000-01-01", "2020-06-15"], dtype="datetime64[ns]") + assert_equal(result_arr, expected) + + # NaT should pass through without raising + arr_nat = np.array(["NaT", "2020-01-01"], dtype="datetime64[s]") + result_nat = arr_nat.astype("datetime64[ns]") + assert np.isnat(result_nat[0]) + assert result_nat[1] == np.datetime64("2020-01-01", "ns") + + # Exact boundary: INT64_MAX // 1e9 = 9223372036 seconds is OK, + # 9223372037 seconds overflows when cast to ns. + ok_boundary = np.datetime64(9223372036, "s") + result_boundary = ok_boundary.astype("datetime64[ns]") + assert result_boundary == np.datetime64(9223372036, "s") + + bad_boundary = np.datetime64(9223372037, "s") + with pytest.raises(OverflowError, match="Overflow"): + bad_boundary.astype("datetime64[ns]") + + # Exercise the num != 1 code path (e.g. "2s" metadata) + arr_2s = np.array([3], dtype="datetime64[2s]") + result_2s = arr_2s.astype("datetime64[s]") + assert result_2s[0] == np.datetime64(6, "s") + + # Overflow with num != 1 + arr_2s_big = np.array([np.iinfo(np.int64).max // 2], dtype="datetime64[2s]") + with pytest.raises(OverflowError, match="Overflow"): + arr_2s_big.astype("datetime64[ns]") + def test_pyobject_roundtrip(self): # All datetime types should be able to roundtrip through object a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, From 17d6e99560f9ed9e8d2e6e08cf30921412f04dd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Apr 2026 17:55:21 +0000 Subject: [PATCH 1621/1718] MAINT: Bump pypa/cibuildwheel from 3.4.0 to 3.4.1 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 3.4.0 to 3.4.1. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/ee02a1537ce3071a004a6b08c41e72f0fdc42d9a...8d2b08b68458a16aeb24b64e68a09ab1c8e82084) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 3.4.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 5ba4b07d44f9..48f0a3104b50 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -36,7 +36,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0 + - uses: pypa/cibuildwheel@8d2b08b68458a16aeb24b64e68a09ab1c8e82084 # v3.4.1 env: CIBW_PLATFORM: pyodide CIBW_BUILD: cp312-* diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 6c899e82b41c..610b3de3fdb0 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -102,7 +102,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0 + uses: pypa/cibuildwheel@8d2b08b68458a16aeb24b64e68a09ab1c8e82084 # v3.4.1 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From f0bd9d47ea4e880d14bd5d9aaa1eb2affbdcd036 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Apr 2026 17:56:21 +0000 Subject: [PATCH 1622/1718] MAINT: Bump pyrefly from 0.59.0 to 0.59.1 in /requirements Bumps [pyrefly](https://github.com/facebook/pyrefly) from 0.59.0 to 0.59.1. - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.59.0...0.59.1) --- updated-dependencies: - dependency-name: pyrefly dependency-version: 0.59.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 54c439343a13..f8e9c6f73b8b 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.0 -pyrefly==0.59.0 +pyrefly==0.59.1 From 1edf7b6d52d19926d9917f3c60aed667c7425f56 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 9 Apr 2026 21:12:07 +0200 Subject: [PATCH 1623/1718] Refine int checks (a bit annoyingly). --- numpy/_core/src/multiarray/descriptor.c | 12 ++++++++++-- numpy/_core/tests/test_deprecations.py | 7 +++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index ce4d2b85a256..bd26cf139fb3 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -2553,13 +2553,21 @@ arraydescr_new(PyTypeObject *subtype, /* * In the future, reject non Python (or NumPy) boolean, including integers to avoid any * possibility of thinking that an integer alignment makes sense here. + * We omit the case of `oalign == 0` and `ocopy == 1` if there are exact ints. + * This can fail, in which case res is -1 and we enter the deprecation path. */ + int res = 0; + int overflow; if (!PyBool_Check(oalign) && !PyArray_IsScalar(oalign, Bool) && !( // Some old pickles use 0, 1 exactly, assume no user passes it // (It may also be possible to use `copyreg` instead.) - PyLong_CheckExact(oalign) && PyLong_AsLong(oalign) == 0 && - ocopy != NULL && PyLong_CheckExact(ocopy) && PyLong_AsLong(ocopy) == 1)) { + PyLong_CheckExact(oalign) && (res = PyLong_IsZero(oalign)) == 1 && + ocopy != NULL && PyLong_CheckExact(ocopy) && + (res = PyLong_AsLongAndOverflow(ocopy, &overflow)) == 1)) { /* Deprecated 2025-07-01: NumPy 2.4 */ + if (res == -1 && PyErr_Occurred()) { + return NULL; // Should actually be impossible (as inputs are `long`) + } if (PyErr_WarnFormat(npy_static_pydata.VisibleDeprecationWarning, 1, "dtype(): align should be passed as Python or NumPy boolean but got `align=%.100R`. " "Did you mean to pass a tuple to create a subarray type? (Deprecated NumPy 2.4)", diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 089af5a381f1..a5d708b1f748 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -280,11 +280,18 @@ def test_deprecated(self): # alignment, or pass them accidentally as a subarray shape (meaning to pass # a tuple). self.assert_deprecated(lambda: np.dtype("f8", align=3)) + self.assert_deprecated(lambda: np.dtype("f8", align=0, copy=10**100)) + self.assert_deprecated(lambda: np.dtype("f8", align=10**100, copy=0)) + # Subclasses of ints don't hit the below pickle code path: + self.assert_deprecated( + lambda: np.dtype("f8", align=np.long(0), copy=np.long(1))) @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) def test_not_deprecated(self, align): # if the user passes a bool, it is accepted. self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + # The following specific case is used by old pickles: + self.assert_not_deprecated(lambda: np.dtype("f8", align=0, copy=1)) class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): From 563c6049923e52f37a31198019b53ec564a50c7a Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 9 Apr 2026 23:28:49 +0200 Subject: [PATCH 1624/1718] BUG: Use dict to store ufunc loops (fixes thread-safety) (#31184) --- numpy/_core/code_generators/generate_umath.py | 2 +- numpy/_core/include/numpy/ufuncobject.h | 2 +- numpy/_core/src/umath/dispatching.cpp | 110 ++++++++---------- numpy/_core/src/umath/ufunc_object.c | 30 +++-- numpy/_core/src/umath/wrapping_array_method.c | 27 ++--- 5 files changed, 72 insertions(+), 99 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 5dc623cf1b67..ecb455767a24 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1589,7 +1589,7 @@ def make_ufuncs(funcdict): mlist.append(rf"((PyUFuncObject *)f)->type_resolver = &{uf.typereso};") for c in uf.indexed: # Handle indexed loops by getting the underlying ArrayMethodObject - # from the list in f._loops and setting its field appropriately + # from the dict in f._loops and setting its field appropriately fmt = textwrap.dedent(""" {{ PyArray_DTypeMeta *dtype = PyArray_DTypeFromTypeNum({typenum}); diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index f821c6c29411..3d0d2deac6bd 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -223,7 +223,7 @@ typedef struct _tagPyUFuncObject { #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION /* New private fields related to dispatching */ void *_dispatch_cache; - /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + /* Ordered dict `tuple of DTypes -> (tuple of DTypes, ArrayMethod/Promoter)` */ PyObject *_loops; #endif #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index aa5020a5b0fa..340c0bd7f8e2 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -6,8 +6,8 @@ * * - operand_DTypes: The datatypes as passed in by the user. * - signature: The DTypes fixed by the user with `dtype=` or `signature=`. - * - ufunc._loops: A list of all ArrayMethods and promoters, it contains - * tuples `(dtypes, ArrayMethod)` or `(dtypes, promoter)`. + * - ufunc._loops: Ordered dict of all ArrayMethods and promoters, mapping + * `dtypes` to tuples `(dtypes, ArrayMethod)` or `(dtypes, promoter)`. * - ufunc._dispatch_cache: A cache to store previous promotion and/or * dispatching results. * - The actual arrays are used to support the old code paths where necessary. @@ -70,8 +70,8 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, /** - * Function to add a new loop to the ufunc. This mainly appends it to the - * list (as it currently is just a list). + * Function to add a new loop to the ufunc. This adds it to the + * _loops dict keyed by the DType tuple. * * @param ufunc The universal function to add the loop to. * @param info The tuple (dtype_tuple, ArrayMethod/promoter). @@ -114,38 +114,16 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) return -1; } - if (ufunc->_loops == NULL) { - ufunc->_loops = PyList_New(0); - if (ufunc->_loops == NULL) { - return -1; - } + int found = PyDict_SetDefaultRef(ufunc->_loops, DType_tuple, info, NULL); + if (found < 0) { + return -1; } - - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, DType_tuple, Py_EQ); - if (cmp < 0) { - return -1; - } - if (cmp == 0) { - continue; - } - if (ignore_duplicate) { - return 0; - } + if (found && !ignore_duplicate) { PyErr_Format(PyExc_TypeError, "A loop/promoter has already been registered with '%s' for %R", ufunc_get_name_cstr(ufunc), DType_tuple); return -1; } - - if (PyList_Append(loops, info) < 0) { - return -1; - } return 0; } @@ -332,7 +310,13 @@ resolve_implementation_info(PyUFuncObject *ufunc, PyObject **out_info) { int nin = ufunc->nin, nargs = ufunc->nargs; - Py_ssize_t size = PySequence_Length(ufunc->_loops); + int ret = -1; + /* PyDict_Values returns a snapshot, safe against concurrent additions. */ + PyObject *loops = PyDict_Values(ufunc->_loops); + if (loops == NULL) { + return -1; + } + Py_ssize_t size = PySequence_Length(loops); PyObject *best_dtypes = NULL; PyObject *best_resolver_info = NULL; @@ -349,8 +333,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, for (Py_ssize_t res_idx = 0; res_idx < size; res_idx++) { /* Test all resolvers */ - PyObject *resolver_info = PySequence_Fast_GET_ITEM( - ufunc->_loops, res_idx); + PyObject *resolver_info = PySequence_Fast_GET_ITEM(loops, res_idx); if (only_promoters && PyObject_TypeCheck( PyTuple_GET_ITEM(resolver_info, 1), &PyArrayMethod_Type)) { @@ -411,7 +394,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, int subclass = PyObject_IsSubclass( (PyObject *)given_dtype, (PyObject *)resolver_dtype); if (subclass < 0) { - return -1; + goto finish; } if (!subclass) { matches = NPY_FALSE; @@ -509,7 +492,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, "a better match is not yet implemented. This " "will pick the better (or bail) in the future."); *out_info = NULL; - return -1; + goto finish; } if (best == -1) { @@ -541,8 +524,9 @@ resolve_implementation_info(PyUFuncObject *ufunc, * We just redo it anyway for simplicity.) */ if (!only_promoters) { - return resolve_implementation_info(ufunc, - op_dtypes, NPY_TRUE, out_info); + ret = resolve_implementation_info( + ufunc, op_dtypes, NPY_TRUE, out_info); + goto finish; } /* * If this is already the retry, we are out of luck. Promoters @@ -564,7 +548,8 @@ resolve_implementation_info(PyUFuncObject *ufunc, Py_DECREF(given); } *out_info = NULL; - return 0; + ret = 0; + goto finish; } else if (current_best == 0) { /* The new match is not better, continue looking. */ @@ -578,11 +563,15 @@ resolve_implementation_info(PyUFuncObject *ufunc, if (best_dtypes == NULL) { /* The non-legacy lookup failed */ *out_info = NULL; - return 0; } + else { + *out_info = best_resolver_info; + } + ret = 0; - *out_info = best_resolver_info; - return 0; +finish: + Py_DECREF(loops); + return ret; } @@ -816,7 +805,7 @@ legacy_promote_using_legacy_type_resolver(PyUFuncObject *ufunc, /* - * Note, this function returns a BORROWED references to info since it adds + * Note, this function returns a BORROWED reference to info since it adds * it to the loops. */ NPY_NO_EXPORT PyObject * @@ -845,8 +834,11 @@ add_and_return_legacy_wrapping_ufunc_loop(PyUFuncObject *ufunc, Py_DECREF(info); return NULL; } - Py_DECREF(info); /* now borrowed from the ufunc's list of loops */ - return info; + /* Loop currently borrowed from the _loops (use original if not replaced) */ + PyObject *result = PyDict_GetItemWithError( // noqa: borrowed-ref OK + ufunc->_loops, PyTuple_GET_ITEM(info, 0)); + Py_DECREF(info); + return result; } @@ -1369,28 +1361,20 @@ get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, return NULL; } for (int i=0; i < ndtypes; i++) { - PyTuple_SetItem(t_dtypes, i, (PyObject *)op_dtype); - } - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, - t_dtypes, Py_EQ); - if (cmp < 0) { - Py_DECREF(t_dtypes); - return NULL; - } - if (cmp == 0) { - continue; - } - /* Got the match */ + Py_INCREF(op_dtype); + PyTuple_SET_ITEM(t_dtypes, i, (PyObject *)op_dtype); + } + PyObject *info; + if (PyDict_GetItemRef(ufunc->_loops, t_dtypes, &info) < 0) { Py_DECREF(t_dtypes); - return PyTuple_GetItem(item, 1); + return NULL; } Py_DECREF(t_dtypes); + if (info != NULL) { + PyObject *result = PyTuple_GET_ITEM(info, 1); + Py_DECREF(info); + return result; + } Py_RETURN_NONE; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index b17f24637277..1f9da112df0a 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -4921,7 +4921,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi */ ufunc->_dispatch_cache = NULL; } - ufunc->_loops = PyList_New(0); + ufunc->_loops = PyDict_New(); if (ufunc->_loops == NULL) { Py_DECREF(ufunc); return NULL; @@ -5249,21 +5249,18 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, * A new-style loop should not be replaced by an old-style one. */ int add_new_loop = 1; - for (Py_ssize_t j = 0; j < PyList_GET_SIZE(ufunc->_loops); j++) { - PyObject *item = PyList_GET_ITEM(ufunc->_loops, j); // noqa: borrowed-ref OK - PyObject *existing_tuple = PyTuple_GET_ITEM(item, 0); - - int cmp = PyObject_RichCompareBool(existing_tuple, signature_tuple, Py_EQ); - if (cmp < 0) { - goto fail; - } - if (!cmp) { - continue; - } - PyObject *registered = PyTuple_GET_ITEM(item, 1); - if (!PyObject_TypeCheck(registered, &PyArrayMethod_Type) || ( - (PyArrayMethodObject *)registered)->get_strided_loop != - &get_wrapped_legacy_ufunc_loop) { + PyObject *existing_item; + if (PyDict_GetItemRef(ufunc->_loops, signature_tuple, &existing_item) < 0) { + goto fail; + } + if (existing_item != NULL) { + PyObject *registered = PyTuple_GET_ITEM(existing_item, 1); + int not_compatible = ( + !PyObject_TypeCheck(registered, &PyArrayMethod_Type) || + ((PyArrayMethodObject *)registered)->get_strided_loop != + &get_wrapped_legacy_ufunc_loop); + Py_DECREF(existing_item); + if (not_compatible) { PyErr_Format(PyExc_TypeError, "A non-compatible loop was already registered for " "ufunc %s and DTypes %S.", @@ -5272,7 +5269,6 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, } /* The loop was already added */ add_new_loop = 0; - break; } if (add_new_loop) { PyObject *info = add_and_return_legacy_wrapping_ufunc_loop( diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 924bac9524e9..bf30f12bfec4 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -235,6 +235,7 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, PyObject *wrapped_dt_tuple = NULL; PyObject *new_dt_tuple = NULL; PyArrayMethodObject *meth = NULL; + PyObject *existing_info = NULL; if (!PyObject_TypeCheck(ufunc_obj, &PyUFunc_Type)) { PyErr_SetString(PyExc_TypeError, @@ -249,28 +250,19 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, } PyArrayMethodObject *wrapped_meth = NULL; - PyObject *loops = ufunc->_loops; - Py_ssize_t length = PyList_Size(loops); - for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItemRef(loops, i); - PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); - Py_DECREF(item); - int cmp = PyObject_RichCompareBool(cur_DType_tuple, wrapped_dt_tuple, Py_EQ); - if (cmp < 0) { - goto finish; - } - if (cmp == 0) { - continue; - } - wrapped_meth = (PyArrayMethodObject *)PyTuple_GET_ITEM(item, 1); - if (!PyObject_TypeCheck(wrapped_meth, &PyArrayMethod_Type)) { + if (PyDict_GetItemRef(ufunc->_loops, wrapped_dt_tuple, &existing_info) < 0) { + goto finish; + } + if (existing_info != NULL) { + PyObject *existing_meth = PyTuple_GET_ITEM(existing_info, 1); + if (!PyObject_TypeCheck(existing_meth, &PyArrayMethod_Type)) { PyErr_SetString(PyExc_TypeError, "Matching loop was not an ArrayMethod."); goto finish; } - break; + wrapped_meth = (PyArrayMethodObject *)existing_meth; } - if (wrapped_meth == NULL) { + else { PyErr_Format(PyExc_TypeError, "Did not find the to-be-wrapped loop in the ufunc with given " "DTypes. Received wrapping types: %S", wrapped_dt_tuple); @@ -336,5 +328,6 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, Py_XDECREF(wrapped_dt_tuple); Py_XDECREF(new_dt_tuple); Py_XDECREF(meth); + Py_XDECREF(existing_info); return res; } From 8df456689d93d62a4843796a4a1f52f02ad58d92 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 10 Apr 2026 09:46:34 +0200 Subject: [PATCH 1625/1718] TST: Add tests for PyUFunc_ReplaceLoopBySignature (#31097) Co-authored-by: Claude Opus 4.6 (1M context) Co-authored-by: Sebastian Berg --- numpy/_core/_umath_tests.pyi | 4 ++ numpy/_core/src/umath/_umath_tests.c.src | 72 ++++++++++++++++++++++++ numpy/_core/tests/test_umath.py | 20 +++++++ 3 files changed, 96 insertions(+) diff --git a/numpy/_core/_umath_tests.pyi b/numpy/_core/_umath_tests.pyi index 696cec3b755e..cce84afdd081 100644 --- a/numpy/_core/_umath_tests.pyi +++ b/numpy/_core/_umath_tests.pyi @@ -30,6 +30,10 @@ def test_signature( # undocumented def test_dispatch() -> _TestDispatchResult: ... +# undocumented test helpers for PyUFunc_ReplaceLoopBySignature +def replace_loop(ufunc: np.ufunc, /) -> object: ... +def restore_loop(ufunc: np.ufunc, capsule: object, /) -> None: ... + # undocumented ufuncs and gufuncs always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index a1b64ecc0444..a37da4cd98bb 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -857,6 +857,72 @@ static void *const conv1d_full_data[] = {NULL}; static const char conv1d_full_typecodes[] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; +/* + * Test helpers for PyUFunc_ReplaceLoopBySignature. + * + * _constant42_loop: unary float64 loop that always writes 42.0. + */ +static void +_constant42_loop(char **args, npy_intp const *dimensions, + npy_intp const *steps, void *NPY_UNUSED(data)) +{ + npy_intp n = dimensions[0]; + char *out = args[1]; + npy_intp out_step = steps[1]; + for (npy_intp i = 0; i < n; i++) { + *(double *)out = 42.0; + out += out_step; + } +} + +/* + * replace_loop(ufunc): Replace the dd->d loop with _constant42_loop. + * Only works for unary ufuncs. Returns a capsule holding the old loop. + */ +static PyObject * +UMath_Tests_replace_loop(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyUFuncObject *ufunc; + if (!PyArg_ParseTuple(args, "O!", &PyUFunc_Type, &ufunc)) { + return NULL; + } + if (ufunc->nin != 1 || ufunc->nout != 1) { + PyErr_SetString(PyExc_ValueError, + "replace_loop only supports unary ufuncs"); + return NULL; + } + int signature[2] = {NPY_DOUBLE, NPY_DOUBLE}; + PyUFuncGenericFunction oldfunc = NULL; + if (PyUFunc_ReplaceLoopBySignature( + ufunc, _constant42_loop, signature, &oldfunc) < 0) { + PyErr_SetString(PyExc_RuntimeError, + "failed to find a float64 loop"); + return NULL; + } + return PyCapsule_New((void *)oldfunc, "oldfunc", NULL); +} + +/* + * restore_loop(ufunc, capsule): Restore the loop saved by replace_loop. + */ +static PyObject * +UMath_Tests_restore_loop(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyUFuncObject *ufunc; + PyObject *capsule; + if (!PyArg_ParseTuple(args, "O!O", &PyUFunc_Type, &ufunc, &capsule)) { + return NULL; + } + PyUFuncGenericFunction oldfunc = (PyUFuncGenericFunction) + PyCapsule_GetPointer(capsule, "oldfunc"); + if (oldfunc == NULL) { + return NULL; + } + int signature[2] = {NPY_DOUBLE, NPY_DOUBLE}; + PyUFunc_ReplaceLoopBySignature(ufunc, oldfunc, signature, NULL); + Py_RETURN_NONE; +} + static PyMethodDef UMath_TestsMethods[] = { {"test_signature", UMath_Tests_test_signature, METH_VARARGS, "Test signature parsing of ufunc. \n" @@ -865,6 +931,12 @@ static PyMethodDef UMath_TestsMethods[] = { "internals. \n", }, {"test_dispatch", UMath_Tests_test_dispatch, METH_NOARGS, NULL}, + {"replace_loop", UMath_Tests_replace_loop, METH_VARARGS, + "Replace the float64 loop of a ufunc with one that outputs 42.0.\n" + "Returns a capsule holding the old loop for restore_loop().\n"}, + {"restore_loop", UMath_Tests_restore_loop, METH_VARARGS, + "Restore a ufunc loop previously replaced by replace_loop().\n" + "Arguments: ufunc, capsule\n"}, {NULL, NULL, 0, NULL} /* Sentinel */ }; diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 8cc288e9ca92..ff61e7f3bafc 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -5111,6 +5111,26 @@ def test_bad_legacy_gufunc_silent_errors(x1): ncu_tests.always_error_gufunc(x1, 0.0) +class TestReplaceLoopBySignature: + """Tests for PyUFunc_ReplaceLoopBySignature C API.""" + + @pytest.mark.thread_unsafe(reason="modifies ufunc within test") + def test_replace_loop(self): + # Call the ufunc first to populate any internal dispatch caches, + # then replace the float64 loop with one that outputs 42.0, + # verify the replacement is used, and restore the original. + a = np.array([1.0, 2.0, 3.0]) + assert_array_equal(np.negative(a), [-1.0, -2.0, -3.0]) + + saved = ncu_tests.replace_loop(np.negative) + try: + assert_array_equal(np.negative(a), [42.0, 42.0, 42.0]) + finally: + ncu_tests.restore_loop(np.negative, saved) + + assert_array_equal(np.negative(a), [-1.0, -2.0, -3.0]) + + class TestAddDocstring: @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") def test_add_same_docstring(self): From 121f1ccb595e7dec8e50eac75bdb057d26fceb94 Mon Sep 17 00:00:00 2001 From: Joseph Mehdiyev <157145635+JosephMehdiyev@users.noreply.github.com> Date: Fri, 10 Apr 2026 18:42:58 +0100 Subject: [PATCH 1626/1718] DEP: remove warnings from `read_rows` (#31113) Removed a warning from `read_rows` and its related tests, that was given about a change in empty line handling in NumPy 1.23. --- numpy/_core/src/multiarray/textreading/rows.c | 26 +------------------ numpy/lib/tests/test_io.py | 18 +++++-------- 2 files changed, 8 insertions(+), 36 deletions(-) diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 7f3797b58928..401b8775a4f0 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -186,9 +186,6 @@ read_rows(stream *s, size_t rows_per_block = 1; /* will be increased depending on row size */ npy_intp data_allocated_rows = 0; - /* We give a warning if max_rows is used and an empty line is encountered */ - bool give_empty_row_warning = max_rows >= 0; - int ts_result = 0; tokenizer_state ts; if (npy_tokenizer_init(&ts, pconfig) < 0) { @@ -226,29 +223,8 @@ read_rows(stream *s, } current_num_fields = ts.num_fields; field_info *fields = ts.fields; + if (NPY_UNLIKELY(ts.num_fields == 0)) { - /* - * Deprecated NumPy 1.23, 2021-01-13 (not really a deprecation, - * but similar policy should apply to removing the warning again) - */ - /* Tokenizer may give a final "empty line" even if there is none */ - if (give_empty_row_warning && ts_result == 0) { - give_empty_row_warning = false; - if (PyErr_WarnFormat(PyExc_UserWarning, 3, - "Input line %zd contained no data and will not be " - "counted towards `max_rows=%zd`. This differs from " - "the behaviour in NumPy <=1.22 which counted lines " - "rather than rows. If desired, the previous behaviour " - "can be achieved by using `itertools.islice`.\n" - "Please see the 1.23 release notes for an example on " - "how to do this. If you wish to ignore this warning, " - "use `warnings.filterwarnings`. This warning is " - "expected to be removed in the future and is given " - "only once per `loadtxt` call.", - row_count + skiplines + 1, max_rows) < 0) { - goto error; - } - } continue; /* Ignore empty line */ } diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 3112b6aeff32..e3ae359c7d16 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1269,20 +1269,16 @@ def test_max_rows_empty_lines(self, skip, data): if callable(data): data = data() - with pytest.warns(UserWarning, - match=f"Input line 3.*max_rows={3 - skip}"): - res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3 - skip) - assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) if isinstance(data, StringIO): data.seek(0) - - with warnings.catch_warnings(): - warnings.simplefilter("error", UserWarning) - with pytest.raises(UserWarning): - np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", - max_rows=3 - skip) + # gh-31113 old test checked the warning twice on `StringIO` inputs + x = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(x, [[-1, 0], [1, 2], [3, 4]][skip:]) class Testfromregex: def test_record(self): From 3be3f69cb57d1d89018dc3666286aaaf376f2892 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Apr 2026 20:01:05 +0200 Subject: [PATCH 1627/1718] MAINT: bump ``ruff`` to ``0.15.10`` --- environment.yml | 2 +- requirements/linter_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 0985b755ff06..38801afcb52c 100644 --- a/environment.yml +++ b/environment.yml @@ -45,7 +45,7 @@ dependencies: - breathe>4.33.0 # For linting - cython-lint - - ruff=0.15.7 + - ruff=0.15.10 - gitpython # Used in some tests - cffi diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 99bc7635cd67..5563a31fdcc9 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.8 +ruff==0.15.10 GitPython>=3.1.30 spin From 124111ebb8f5183589bddac4a0b702af23c77d7f Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 10 Apr 2026 20:02:29 +0200 Subject: [PATCH 1628/1718] STY: fix two new ruff errors (``UP018``) --- numpy/typing/tests/data/pass/comparisons.py | 2 +- numpy/typing/tests/data/reveal/comparisons.pyi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index 47507079b43d..cfb15cddc36e 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -20,7 +20,7 @@ b_ = np.bool() b = False -c = complex() +c = 0j f = 0.0 i = 0 diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 4d359929fd00..f9e2a7ee0519 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -21,7 +21,7 @@ td = np.timedelta64(0, "D") b_ = np.bool() b = False -c = complex() +c = 0j f = 0.0 i = 0 From eb47e0ae7e9507ed139e86cb99164698ceed2784 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 11 Apr 2026 10:57:38 +0200 Subject: [PATCH 1629/1718] ENH: Improve performance of PyArray_FromAny and cleanup PyArray_ExtractDTypeAndDescriptor (#31099) Co-authored-by: Claude Opus 4.6 (1M context) Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/array_coercion.c | 7 +- numpy/_core/src/multiarray/array_converter.c | 2 +- numpy/_core/src/multiarray/convert_datatype.c | 6 +- numpy/_core/src/multiarray/ctors.c | 72 +++++++++---------- numpy/_core/src/multiarray/ctors.h | 6 +- numpy/_core/src/multiarray/descriptor.c | 17 ++--- numpy/_core/src/multiarray/descriptor.h | 2 +- numpy/_core/src/multiarray/multiarraymodule.c | 2 +- numpy/_core/src/multiarray/scalartypes.c.src | 2 +- 9 files changed, 51 insertions(+), 65 deletions(-) diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index e890b73bda2f..fabb5695134e 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -920,10 +920,7 @@ PyArray_AdaptDescriptorToArray( return descr; } if (dtype == NULL) { - res = PyArray_ExtractDTypeAndDescriptor(descr, &new_descr, &dtype); - if (res < 0) { - return NULL; - } + PyArray_ExtractDTypeAndDescriptor(descr, &new_descr, &dtype); if (new_descr != NULL) { Py_DECREF(dtype); return new_descr; @@ -1034,7 +1031,7 @@ PyArray_DiscoverDTypeAndShape_Recursive( } int was_copied_by__array__ = 0; arr = (PyArrayObject *)_array_from_array_like(obj, - requested_descr, 0, NULL, copy, &was_copied_by__array__); + requested_descr, 0, copy, &was_copied_by__array__); if (arr == NULL) { return -1; } diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index 674676dfc0c1..27850491646b 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -83,7 +83,7 @@ array_converter_new( } else { item->array = (PyArrayObject *)PyArray_FromAny_int( - item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, + item->object, NULL, NULL, 0, NPY_MAXDIMS, 0, &item->scalar_input); if (item->array == NULL) { goto fail; diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index b2a179b78cca..caea50760dc3 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -993,10 +993,8 @@ PyArray_FindConcatenationDescriptor( PyArray_DTypeMeta *common_dtype; PyArray_Descr *result = NULL; - if (PyArray_ExtractDTypeAndDescriptor( - requested_dtype, &result, &common_dtype) < 0) { - return NULL; - } + PyArray_ExtractDTypeAndDescriptor( + requested_dtype, &result, &common_dtype); if (result != NULL) { if (PyDataType_SUBARRAY(result) != NULL) { PyErr_Format(PyExc_TypeError, diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b5c6776970f0..34964b3fabfa 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1429,7 +1429,6 @@ _array_from_buffer_3118(PyObject *memoryview) * @param requested_dtype a requested dtype instance, may be NULL; The result * DType may be used, but is not enforced. * @param writeable whether the result must be writeable. - * @param context Unused parameter, must be NULL (should be removed later). * @param copy Specifies the copy behavior. * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy * was made by implementor. @@ -1440,7 +1439,7 @@ _array_from_buffer_3118(PyObject *memoryview) */ NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, - PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, + PyArray_Descr *requested_dtype, npy_bool writeable, int copy, int *was_copied_by__array__) { PyObject* tmp; @@ -1520,19 +1519,19 @@ NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { + if (context != NULL) { + PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); + Py_XDECREF(newtype); + return NULL; + } + npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( newtype, &dt_info.descr, &dt_info.dtype); Py_XDECREF(newtype); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - /* * The internal implementation treats 0 as actually wanting a zero-dimensional * array, but the API for this function has typically treated it as @@ -1546,7 +1545,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int was_scalar; PyObject* ret = PyArray_FromAny_int( op, dt_info.descr, dt_info.dtype, - min_depth, max_depth, flags, context, &was_scalar); + min_depth, max_depth, flags, &was_scalar); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); @@ -1568,23 +1567,29 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, - int flags, PyObject *context, int *was_scalar) + int flags, int *was_scalar) { /* * This is the main code to make a NumPy array from a Python * Object. It is called from many different places. */ + + /* + * Fast path: op is already an ndarray with no dtype, flags, or depth + * constraints. Avoids DiscoverDTypeAndShape + PyArray_CanCastArrayTo. + */ + if (in_descr == NULL && in_DType == NULL && flags == 0 + && min_depth == 0 && PyArray_Check(op)) { + *was_scalar = 0; + return Py_NewRef(op); + } + PyArrayObject *arr = NULL, *ret = NULL; PyArray_Descr *dtype = NULL; coercion_cache_obj *cache = NULL; int ndim = 0; npy_intp dims[NPY_MAXDIMS]; - if (context != NULL) { - PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); - return NULL; - } - // Default is copy = None int copy = -1; int was_copied_by__array__ = 0; @@ -1817,27 +1822,26 @@ NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requirements, PyObject *context) { + if (context != NULL) { + PyErr_SetString(PyExc_RuntimeError, "'context' must be NULL"); + Py_XDECREF(descr); + return NULL; + } + npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( descr, &dt_info.descr, &dt_info.dtype); Py_XDECREF(descr); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - /* See comment in PyArray_FromAny for rationale */ if (max_depth == 0 || max_depth > NPY_MAXDIMS) { max_depth = NPY_MAXDIMS; } PyObject* ret = PyArray_CheckFromAny_int( - op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements, - context); + op, dt_info.descr, dt_info.dtype, min_depth, max_depth, requirements); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); @@ -1851,7 +1855,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requirements, PyObject *context) + int max_depth, int requirements) { PyObject *obj; Py_XINCREF(in_descr); /* take ownership as we may replace it */ @@ -1870,7 +1874,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, - max_depth, requirements, context, &was_scalar); + max_depth, requirements, &was_scalar); Py_XDECREF(in_descr); if (obj == NULL) { return NULL; @@ -3001,18 +3005,12 @@ PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order) { npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( type, &dt_info.descr, &dt_info.dtype); // steal reference Py_XDECREF(type); - if (res < 0) { - Py_XDECREF(dt_info.descr); - Py_XDECREF(dt_info.dtype); - return NULL; - } - PyObject *ret = PyArray_Zeros_int(nd, dims, dt_info.descr, dt_info.dtype, is_f_order); @@ -3066,16 +3064,12 @@ PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order) { npy_dtype_info dt_info = {NULL, NULL}; - int res = PyArray_ExtractDTypeAndDescriptor( + PyArray_ExtractDTypeAndDescriptor( type, &dt_info.descr, &dt_info.dtype); // steal reference Py_XDECREF(type); - if (res < 0) { - return NULL; - } - PyObject *ret = PyArray_Empty_int( nd, dims, dt_info.descr, dt_info.dtype, is_f_order); diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index b7a60e0065e0..7e3e9f6587bb 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -53,13 +53,13 @@ PyArray_New( NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, - PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, + PyArray_Descr *requested_dtype, npy_bool writeable, int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, - int flags, PyObject *context, int *was_scalar); + int flags, int *was_scalar); NPY_NO_EXPORT PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, @@ -68,7 +68,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, - int max_depth, int requirements, PyObject *context); + int max_depth, int requirements); NPY_NO_EXPORT PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index bd26cf139fb3..e80ec01dfe83 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1412,8 +1412,9 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, } /* Flexible descr with generic time unit (which can be adapted) */ if (PyDataType_ISDATETIME(descr)) { - PyArray_DatetimeMetaData *meta; - meta = get_datetime_metadata_from_dtype(descr); + _PyArray_LegacyDescr *ldescr = (_PyArray_LegacyDescr *)descr; + PyArray_DatetimeMetaData *meta = + &(((PyArray_DatetimeDTypeMetaData *)ldescr->c_metadata)->meta); if (meta->base == NPY_FR_GENERIC) { return 1; } @@ -1428,12 +1429,13 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, * both results can be NULL (if the input is). But it always sets the DType * when a descriptor is set. * + * This function cannot fail. + * * @param dtype Input descriptor to be converted * @param out_descr Output descriptor * @param out_DType DType of the output descriptor - * @return 0 on success -1 on failure */ -NPY_NO_EXPORT int +NPY_NO_EXPORT void PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, PyArray_Descr **out_descr, PyArray_DTypeMeta **out_DType) { @@ -1449,7 +1451,6 @@ PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, Py_INCREF(*out_descr); } } - return 0; } @@ -1493,12 +1494,8 @@ PyArray_DTypeOrDescrConverterRequired(PyObject *obj, npy_dtype_info *dt_info) * be considered an instance with actual 0 length. * TODO: It would be nice to fix that eventually. */ - int res = PyArray_ExtractDTypeAndDescriptor( - descr, &dt_info->descr, &dt_info->dtype); + PyArray_ExtractDTypeAndDescriptor(descr, &dt_info->descr, &dt_info->dtype); Py_DECREF(descr); - if (res < 0) { - return NPY_FAIL; - } return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/descriptor.h b/numpy/_core/src/multiarray/descriptor.h index 233c434fd6b4..408d9ea0da56 100644 --- a/numpy/_core/src/multiarray/descriptor.h +++ b/numpy/_core/src/multiarray/descriptor.h @@ -20,7 +20,7 @@ PyArray_DTypeOrDescrConverterOptional(PyObject *, npy_dtype_info *dt_info); NPY_NO_EXPORT int PyArray_DTypeOrDescrConverterRequired(PyObject *, npy_dtype_info *dt_info); -NPY_NO_EXPORT int +NPY_NO_EXPORT void PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, PyArray_Descr **out_descr, PyArray_DTypeMeta **out_DType); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index af24684b84b3..359793bec0d8 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1693,7 +1693,7 @@ _array_fromobject_generic( flags |= NPY_ARRAY_FORCECAST; ret = (PyArrayObject *)PyArray_CheckFromAny_int( - op, dtype, in_DType, 0, ndmax, flags, NULL); + op, dtype, in_DType, 0, ndmax, flags); finish: Py_XDECREF(dtype); diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 4c9098d7d9ca..8e4613db10f8 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -227,7 +227,7 @@ find_binary_operation_path( */ int was_scalar; PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( - other, NULL, NULL, 0, NPY_MAXDIMS, 0, NULL, &was_scalar); + other, NULL, NULL, 0, NPY_MAXDIMS, 0, &was_scalar); if (arr == NULL) { return -1; } From a17c7fd32cde532aafde858b2b031a847f35d9c3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 11 Apr 2026 13:06:55 +0200 Subject: [PATCH 1630/1718] TYP: ``pad`` shape-typing --- numpy/lib/_arraypad_impl.pyi | 20 +++++++++++++++++++- numpy/typing/tests/data/reveal/arraypad.pyi | 5 +++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index da7c89859d86..1d41ee58fa47 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,7 +1,7 @@ from typing import Any, Literal as L, Protocol, overload, type_check_only import numpy as np -from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt, _Shape __all__ = ["pad"] @@ -44,6 +44,17 @@ type _PadWidth = ( # Expand `**kwargs` into explicit keyword-only arguments @overload +def pad[ShapeT: _Shape, DTypeT: np.dtype]( + array: np.ndarray[ShapeT, DTypeT], + pad_width: _PadWidth, + mode: _ModeKind = "constant", + *, + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload def pad[ScalarT: np.generic]( array: _ArrayLike[ScalarT], pad_width: _PadWidth, @@ -66,6 +77,13 @@ def pad( reflect_type: L["odd", "even"] = "even", ) -> NDArray[Any]: ... @overload +def pad[ShapeT: _Shape, DTypeT: np.dtype]( + array: np.ndarray[ShapeT, DTypeT], + pad_width: _PadWidth, + mode: _ModeFunc, + **kwargs: Any, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload def pad[ScalarT: np.generic]( array: _ArrayLike[ScalarT], pad_width: _PadWidth, diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index 3d53d913a770..bbf843da0042 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -13,6 +13,8 @@ def mode_func( AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] AR_LIKE: list[int] assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) @@ -25,3 +27,6 @@ assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) + +assert_type(np.pad(AR_f8_1d, (2, 3)), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.pad(AR_f8_2d, (2, 3)), np.ndarray[tuple[int, int], np.dtype[np.float64]]) From b8e0988b21a9326ea8fc45161b40f40ffa0a3811 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 11 Apr 2026 14:12:36 +0200 Subject: [PATCH 1631/1718] TYP: ``ix_`` shape-typing and fix for boolean input --- numpy/lib/_index_tricks_impl.pyi | 91 +++++++++++++++---- .../typing/tests/data/reveal/index_tricks.pyi | 70 +++++++++++++- 2 files changed, 141 insertions(+), 20 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index ad3b5cb6e236..3d1a5406e15e 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -23,8 +23,8 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _DTypeLike, - _HasDType, _NestedSequence, + _ScalarLike_co, _SupportsArray, ) @@ -54,6 +54,12 @@ _MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=Tru _NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) _Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) +type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] +type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] +type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] + +type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] + ### class ndenumerate(Generic[_ScalarT_co]): @@ -230,22 +236,73 @@ class IndexExpression(Generic[_BoolT_co]): @overload def __getitem__[T](self: IndexExpression[L[False]], item: T) -> T: ... -@overload -def ix_[DTypeT: np.dtype]( - *args: _NestedSequence[_HasDType[DTypeT]] | _HasDType[DTypeT] -) -> tuple[np.ndarray[_AnyShape, DTypeT], ...]: ... -@overload -def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... -@overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... -@overload -def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... -@overload -def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... -@overload -def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... -@overload -def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... +# only the `int` sequences have special-cased shape-type overloads, because this is the +# most common use case and the others would require too many overloads to be worth it. +@overload # 0 +def ix_() -> tuple[()]: ... +@overload # 1 +int +def ix_(arg0: Sequence[int], /) -> tuple[_Array1D[np.int_]]: ... +@overload # 1 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + /, +) -> tuple[_Array1D[ScalarT]]: ... +@overload # 2 +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + /, +) -> tuple[_Array2D[np.int_], _Array2D[np.int_]]: ... +@overload # 2 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + /, +) -> tuple[_Array2D[ScalarT], _Array2D[ScalarT]]: ... +@overload # 3 +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + arg2: Sequence[int], + /, +) -> tuple[_Array3D[np.int_], _Array3D[np.int_], _Array3D[np.int_]]: ... +@overload # 3 ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + arg2: _ToArray1D[ScalarT], + /, +) -> tuple[_Array3D[ScalarT], _Array3D[ScalarT], _Array3D[ScalarT]]: ... +@overload # N +int +def ix_( + arg0: Sequence[int], + arg1: Sequence[int], + arg2: Sequence[int], + /, + *args: Sequence[int], +) -> tuple[NDArray[np.int_], ...]: ... +@overload # N ScalarT +def ix_[ScalarT: np.generic]( + arg0: _ToArray1D[ScalarT], + arg1: _ToArray1D[ScalarT], + arg2: _ToArray1D[ScalarT], + /, + *args: _ToArray1D[ScalarT], +) -> tuple[NDArray[ScalarT], ...]: ... +@overload # N float +def ix_(arg0: list[float], /, *args: Sequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload # N complex +def ix_(arg0: list[complex], /, *args: Sequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... +@overload # N bytes +def ix_(arg0: Sequence[bytes], /, *args: Sequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload # N str +def ix_(arg0: Sequence[str], /, *args: Sequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload # fallback +def ix_( + arg0: Sequence[_ScalarLike_co] | _Array1D[Any], + /, + *args: Sequence[_ScalarLike_co] | _Array1D[Any], +) -> tuple[NDArray[Any], ...]: ... # def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index f6067c3bed6b..c70e4eb69e12 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -7,10 +7,13 @@ import numpy.typing as npt AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_S: list[bytes] AR_LIKE_U: list[str] AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) @@ -58,9 +61,70 @@ assert_type(np.s_[0:1], slice[int, int, None]) assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) -assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.ix_(AR_LIKE_b), tuple[np.ndarray[tuple[int], np.dtype[np.int_]]]) +assert_type(np.ix_(AR_LIKE_i), tuple[np.ndarray[tuple[int], np.dtype[np.int_]]]) +assert_type(np.ix_(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.float32]]]) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_f4, AR_f4), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int], np.dtype[np.float32]], + ], +) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b, AR_LIKE_b), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i, AR_LIKE_i), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + np.ndarray[tuple[int, int, int], np.dtype[np.int_]], + ], +) +assert_type( + np.ix_(AR_f4, AR_f4, AR_f4), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + np.ndarray[tuple[int, int, int], np.dtype[np.float32]], + ], +) +assert_type( + np.ix_(AR_LIKE_b, AR_LIKE_b, AR_LIKE_b, AR_LIKE_b), + tuple[npt.NDArray[np.int_], ...], +) +assert_type( + np.ix_(AR_LIKE_i, AR_LIKE_i, AR_LIKE_i, AR_LIKE_b), + tuple[npt.NDArray[np.int_], ...], +) +assert_type( + np.ix_(AR_f4, AR_f4, AR_f4, AR_f4), + tuple[npt.NDArray[np.float32], ...], +) +assert_type(np.ix_(AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_LIKE_c), tuple[npt.NDArray[np.complex128], ...]) +assert_type(np.ix_(AR_LIKE_S), tuple[npt.NDArray[np.bytes_], ...]) +assert_type(np.ix_(AR_LIKE_U), tuple[npt.NDArray[np.str_], ...]) assert_type(np.fill_diagonal(AR_i8, 5), None) From 76ddd632dfe29d06386d98689859aa29836a64fa Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 11 Apr 2026 15:15:57 +0200 Subject: [PATCH 1632/1718] TYP: ``expand_dims`` shape-typing --- numpy/lib/_shape_base_impl.pyi | 56 +++++++++++++++++-- numpy/typing/tests/data/reveal/shape_base.pyi | 22 +++++++- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 793f2c0374b3..565f48f862d9 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -3,6 +3,7 @@ from collections.abc import Callable, Sequence from typing import ( Any, Concatenate, + Never, Protocol, Self, SupportsIndex, @@ -14,6 +15,7 @@ import numpy as np from numpy._typing import ( ArrayLike, NDArray, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeComplex_co, @@ -22,6 +24,7 @@ from numpy._typing import ( _ArrayLikeInt_co, _ArrayLikeObject_co, _ArrayLikeUInt_co, + _Shape, _ShapeLike, ) @@ -70,6 +73,8 @@ class _SupportsSplitOps(Protocol): def swapaxes(self, axis1: int, axis2: int, /) -> Self: ... def __getitem__(self, key: Any, /) -> Self: ... +type _JustAnyShape = tuple[Never, Never, Never] # workaround for microsoft/pyright#10232 + ### def take_along_axis[ScalarT: np.generic]( @@ -112,10 +117,53 @@ def apply_over_axes[ScalarT: np.generic]( ) -> NDArray[ScalarT]: ... # -@overload -def expand_dims[ScalarT: np.generic](a: _ArrayLike[ScalarT], axis: _ShapeLike) -> NDArray[ScalarT]: ... -@overload -def expand_dims(a: ArrayLike, axis: _ShapeLike) -> NDArray[Incomplete]: ... +@overload # Nd -> Nd +def expand_dims[ShapeT: _Shape, DTypeT: np.dtype]( + a: np.ndarray[ShapeT, DTypeT], + axis: tuple[()], +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # ?d -> ?d (workaround) +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[_JustAnyShape, DTypeT], + axis: int | tuple[int, ...], +) -> np.ndarray[_AnyShape, DTypeT]: ... +@overload # 0d -> 1d +def expand_dims[ScalarT: np.generic]( + a: ScalarT | np.ndarray[tuple[()], np.dtype[ScalarT]], + axis: int | tuple[int], +) -> np.ndarray[tuple[int], np.dtype[ScalarT]]: ... +@overload # 0d -> 2d +def expand_dims[ScalarT: np.generic]( + a: ScalarT | np.ndarray[tuple[()], np.dtype[ScalarT]], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int], np.dtype[ScalarT]]: ... +@overload # 1d -> 2d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int], DTypeT], + axis: int | tuple[int], +) -> np.ndarray[tuple[int, int], DTypeT]: ... +@overload # 1d -> 3d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int], DTypeT], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # 2d -> 3d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int, int], DTypeT], + axis: int | tuple[int], +) -> np.ndarray[tuple[int, int, int], DTypeT]: ... +@overload # 2d -> 4d +def expand_dims[DTypeT: np.dtype]( + a: np.ndarray[tuple[int, int], DTypeT], + axis: tuple[int, int], +) -> np.ndarray[tuple[int, int, int, int], DTypeT]: ... +@overload # Nd -> ?d +def expand_dims[ScalarT: np.generic]( + a: _ArrayLike[ScalarT], + axis: int | tuple[int, ...], +) -> NDArray[ScalarT]: ... +@overload # fallback +def expand_dims(a: ArrayLike, axis: int | tuple[int, ...]) -> NDArray[Any]: ... # keep in sync with `numpy.ma.extras.column_stack` @overload diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index ce033e97d070..09c3d732c2cc 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -8,6 +8,9 @@ f8: np.float64 AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] +AR_i8_0d: np.ndarray[tuple[()], np.dtype[np.int64]] +AR_i8_1d: np.ndarray[tuple[int], np.dtype[np.int64]] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] AR_f8: npt.NDArray[np.float64] AR_LIKE_f8: list[float] @@ -26,8 +29,23 @@ assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) -assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) -assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) +assert_type(np.expand_dims(AR_LIKE_f8, 0), np.ndarray) +assert_type(np.expand_dims(AR_i8, ()), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, 0), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, (0,)), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8, (0, 1)), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_i8_0d, ()), np.ndarray[tuple[()], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, 0), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, (0,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_0d, (0, 1)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, ()), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, 0), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, (0,)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_1d, (0, 1)), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, ()), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, 0), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, (0,)), np.ndarray[tuple[int, int, int], np.dtype[np.int64]]) +assert_type(np.expand_dims(AR_i8_2d, (0, 1)), np.ndarray[tuple[int, int, int, int], np.dtype[np.int64]]) assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) From ff41520a2aa6142dd08251ea9cd136745be446bc Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 12 Apr 2026 17:45:32 +0200 Subject: [PATCH 1633/1718] TYP: ``diag_indices[_from]`` shape-typing (#31219) --- numpy/lib/_index_tricks_impl.pyi | 30 +++++++++++++++++-- numpy/typing/tests/data/fail/index_tricks.pyi | 2 +- .../typing/tests/data/reveal/index_tricks.pyi | 22 +++++++++++--- 3 files changed, 47 insertions(+), 7 deletions(-) diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index 3d1a5406e15e..ae24f84189f0 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -6,6 +6,7 @@ from typing import ( Final, Generic, Literal as L, + Never, Self, SupportsIndex, final, @@ -23,6 +24,7 @@ from numpy._typing import ( _AnyShape, _ArrayLike, _DTypeLike, + _IntLike_co, _NestedSequence, _ScalarLike_co, _SupportsArray, @@ -58,8 +60,12 @@ type _Array1D[ScalarT: np.generic] = np.ndarray[tuple[int], np.dtype[ScalarT]] type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[ScalarT]] type _Array3D[ScalarT: np.generic] = np.ndarray[tuple[int, int, int], np.dtype[ScalarT]] +type _Int1D = _Array1D[np.intp] + type _ToArray1D[ScalarT: np.generic] = _Array1D[ScalarT] | Sequence[ScalarT] +type _JustAnyShape = tuple[Never, Never, Never, Never, Never] # workaround for microsoft/pyright#10232 + ### class ndenumerate(Generic[_ScalarT_co]): @@ -308,8 +314,28 @@ def ix_( def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... # -def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... -def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... +@overload +def diag_indices(n: _IntLike_co, ndim: L[0]) -> tuple[()]: ... +@overload +def diag_indices(n: _IntLike_co, ndim: L[1]) -> tuple[_Int1D]: ... +@overload +def diag_indices(n: _IntLike_co, ndim: L[2] = 2) -> tuple[_Int1D, _Int1D]: ... +@overload +def diag_indices(n: _IntLike_co, ndim: L[3]) -> tuple[_Int1D, _Int1D, _Int1D]: ... +@overload +def diag_indices(n: _IntLike_co, ndim: int) -> tuple[_Int1D, ...]: ... + +# +@overload # ?d (workaround) +def diag_indices_from(arr: np.ndarray[_JustAnyShape]) -> tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]: ... +@overload # 2d +def diag_indices_from(arr: np.ndarray[tuple[int, int]]) -> tuple[_Int1D, _Int1D]: ... +@overload # 3d +def diag_indices_from(arr: np.ndarray[tuple[int, int, int]]) -> tuple[_Int1D, _Int1D, _Int1D]: ... +@overload # 4d +def diag_indices_from(arr: np.ndarray[tuple[int, int, int, int]]) -> tuple[_Int1D, _Int1D, _Int1D, _Int1D]: ... +@overload # >=2d (fallback) +def diag_indices_from(arr: np.ndarray[tuple[int, int, *tuple[int, ...]]]) -> tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]: ... # mgrid: Final[MGridClass] = ... diff --git a/numpy/typing/tests/data/fail/index_tricks.pyi b/numpy/typing/tests/data/fail/index_tricks.pyi index 8b7b1ae2b5bf..f1f78bd5d805 100644 --- a/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/numpy/typing/tests/data/fail/index_tricks.pyi @@ -11,4 +11,4 @@ np.mgrid[...] # type: ignore[index] np.ogrid[1] # type: ignore[index] np.ogrid[...] # type: ignore[index] np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] -np.diag_indices(1.0) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index c70e4eb69e12..6567deb9c3f4 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -13,9 +13,15 @@ AR_LIKE_U: list[str] AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_i8_2d: np.ndarray[tuple[int, int], np.dtype[np.int64]] +AR_i8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.int64]] +AR_i8_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.int64]] +AR_i8_5d: np.ndarray[tuple[int, int, int, int, int], np.dtype[np.int64]] AR_f4: npt.NDArray[np.float32] AR_O: npt.NDArray[np.object_] +type _Int1D = np.ndarray[tuple[int], np.dtype[np.intp]] + assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) @@ -128,7 +134,15 @@ assert_type(np.ix_(AR_LIKE_U), tuple[npt.NDArray[np.str_], ...]) assert_type(np.fill_diagonal(AR_i8, 5), None) -assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) -assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) - -assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(4), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices(4, 0), tuple[()]) +assert_type(np.diag_indices(4, 1), tuple[_Int1D]) +assert_type(np.diag_indices(4, 2), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices(4, 3), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices(4, 4), tuple[_Int1D, ...]) + +assert_type(np.diag_indices_from(AR_i8), tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]) +assert_type(np.diag_indices_from(AR_i8_2d), tuple[_Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_3d), tuple[_Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_4d), tuple[_Int1D, _Int1D, _Int1D, _Int1D]) +assert_type(np.diag_indices_from(AR_i8_5d), tuple[_Int1D, _Int1D, *tuple[_Int1D, ...]]) From 2f6071f464093561d3c498de18a68b5c15d00832 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 12 Apr 2026 17:46:48 +0200 Subject: [PATCH 1634/1718] TYP: ``real_if_close`` and ``nan_to_num`` shape-typing (#31223) --- numpy/lib/_type_check_impl.pyi | 121 +++++++++++++++--- numpy/typing/tests/data/reveal/type_check.pyi | 41 +++++- 2 files changed, 137 insertions(+), 25 deletions(-) diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index d23123f80c8e..29c78c116117 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,4 +1,3 @@ -from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Literal as L, Protocol, overload, type_check_only from typing_extensions import deprecated @@ -10,9 +9,11 @@ from numpy._typing import ( _16Bit, _32Bit, _64Bit, + _AnyShape, _ArrayLike, _NestedSequence, _ScalarLike_co, + _Shape, _SupportsArray, ) @@ -94,48 +95,134 @@ def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... # -@overload -def nan_to_num[ScalarT: np.generic]( - x: ScalarT, +@overload # np.generic | np.ndarray (`ndarray` subclasses pass through) +def nan_to_num[ScalarOrArrayT: np.generic | np.ndarray]( + x: ScalarOrArrayT, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> ScalarT: ... -@overload +) -> ScalarOrArrayT: ... +@overload # >0-d def nan_to_num[ScalarT: np.generic]( - x: NDArray[ScalarT] | _NestedSequence[_ArrayLike[ScalarT]], + x: _NestedSequence[_ArrayLike[ScalarT]], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, ) -> NDArray[ScalarT]: ... -@overload -def nan_to_num[ScalarT: np.generic]( - x: _SupportsArray[np.dtype[ScalarT]], +@overload # ?-d +def nan_to_num[DTypeT: np.dtype]( + x: _SupportsArray[DTypeT], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> ScalarT | NDArray[ScalarT]: ... -@overload +) -> np.ndarray[_AnyShape, DTypeT] | Any: ... +@overload # 0-d ~bool +def nan_to_num( + x: bool, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.bool: ... +@overload # 0-d +int +def nan_to_num( + x: int, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.int_ | Any: ... +@overload # 0-d +float +def nan_to_num( + x: float, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.float64 | Any: ... +@overload # 0-d +complex +def nan_to_num( + x: complex, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> np.complex128 | Any: ... +@overload # >0-d ~bool +def nan_to_num( + x: _NestedSequence[bool], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.bool]: ... +@overload # >0-d ~int +def nan_to_num( + x: _NestedSequence[list[int]] | list[int], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.int_]: ... +@overload # >0-d ~float +def nan_to_num( + x: _NestedSequence[list[float]] | list[float], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.float64]: ... +@overload # >0-d ~complex +def nan_to_num( + x: _NestedSequence[list[complex]] | list[complex], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[np.complex128]: ... +@overload # >0-d def nan_to_num( x: _NestedSequence[ArrayLike], copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> NDArray[Incomplete]: ... -@overload +) -> np.ndarray: ... +@overload # ?-d def nan_to_num( x: ArrayLike, copy: bool = True, nan: float = 0.0, posinf: float | None = None, neginf: float | None = None, -) -> Incomplete: ... +) -> np.ndarray | Any: ... -# NOTE: The [overload-overlap] mypy error is a false positive +# +@overload +def real_if_close[ShapeT: _Shape, DTypeT: np.dtype[_ToReal]]( + a: np.ndarray[ShapeT, DTypeT], + tol: float = 100, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.float32 | np.complex64]]: ... +@overload +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.float64 | np.complex128]]: ... +@overload +def real_if_close[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + tol: float = 100, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble | np.clongdouble]]: ... +@overload +def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... @overload def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... @overload @@ -143,8 +230,6 @@ def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np. @overload def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... @overload -def real_if_close[RealT: _ToReal](a: _ArrayLike[RealT], tol: float = 100) -> NDArray[RealT]: ... -@overload def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... # diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index 22eed7493689..a1cc267b7494 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -3,10 +3,11 @@ from typing import Any, Literal, assert_type import numpy as np import numpy.typing as npt +i4: np.int32 f8: np.float64 -f: float +m8_ns: np.timedelta64[int] +M8_ns: np.datetime64[int] -# NOTE: Avoid importing the platform specific `np.float128` type AR_i8: npt.NDArray[np.int64] AR_i4: npt.NDArray[np.int32] AR_f2: npt.NDArray[np.float16] @@ -15,7 +16,15 @@ AR_f16: npt.NDArray[np.longdouble] AR_c8: npt.NDArray[np.complex64] AR_c16: npt.NDArray[np.complex128] +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] +AR_c16_2d: np.ndarray[tuple[int, int], np.dtype[np.complex128]] + +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] class ComplexObj: real: slice @@ -44,15 +53,33 @@ assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool]) assert_type(np.iscomplexobj(f8), bool) assert_type(np.isrealobj(f8), bool) +assert_type(np.nan_to_num(True), np.bool) +assert_type(np.nan_to_num(0), np.int_ | Any) +assert_type(np.nan_to_num(0.0), np.float64 | Any) +assert_type(np.nan_to_num(0j), np.complex128 | Any) +assert_type(np.nan_to_num(i4), np.int32) assert_type(np.nan_to_num(f8), np.float64) -assert_type(np.nan_to_num(f, copy=True), Any) -assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) -assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) +assert_type(np.nan_to_num(m8_ns), np.timedelta64[int]) +assert_type(np.nan_to_num(M8_ns), np.datetime64[int]) +assert_type(np.nan_to_num(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.nan_to_num(AR_LIKE_i), npt.NDArray[np.int_]) +assert_type(np.nan_to_num(AR_LIKE_f), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_c), npt.NDArray[np.complex128]) +assert_type(np.nan_to_num(AR_f8), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.nan_to_num(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.nan_to_num(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.nan_to_num(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.nan_to_num(AR_c16_2d), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) -assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) -assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.real_if_close(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.real_if_close(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64 | np.complex128]]) +assert_type(np.real_if_close(AR_c16_2d), np.ndarray[tuple[int, int], np.dtype[np.float64 | np.complex128]]) assert_type(np.typename("h"), Literal["short"]) # type: ignore[deprecated] assert_type(np.typename("B"), Literal["unsigned char"]) # type: ignore[deprecated] From 64d60eed82b1c578631e51109ce76b9da1c4c3b5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 12 Apr 2026 17:48:33 +0200 Subject: [PATCH 1635/1718] TYP: ``lib.NumpyVersion`` final instance attributes and pos-only dunder params (#31224) --- numpy/lib/_version.pyi | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index c53ef795f926..7d5b03f0cc25 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,17 +1,22 @@ +from typing import Final + __all__ = ["NumpyVersion"] class NumpyVersion: - vstring: str - version: str - major: int - minor: int - bugfix: int - pre_release: str - is_devversion: bool - def __init__(self, vstring: str) -> None: ... - def __lt__(self, other: str | NumpyVersion) -> bool: ... - def __le__(self, other: str | NumpyVersion) -> bool: ... - def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] - def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] - def __gt__(self, other: str | NumpyVersion) -> bool: ... - def __ge__(self, other: str | NumpyVersion) -> bool: ... + __module__ = "numpy.lib" + + vstring: Final[str] + version: Final[str] + major: Final[int] + minor: Final[int] + bugfix: Final[int] + pre_release: Final[str] + is_devversion: Final[bool] + + def __init__(self, /, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion, /) -> bool: ... + def __le__(self, other: str | NumpyVersion, /) -> bool: ... + def __eq__(self, other: str | NumpyVersion, /) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion, /) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion, /) -> bool: ... + def __ge__(self, other: str | NumpyVersion, /) -> bool: ... From 3cd33b12bd6682b04a6443951031cfe795d8accf Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 12 Apr 2026 17:50:20 +0200 Subject: [PATCH 1636/1718] TYP: ``fft.[i]fftshift`` shape-typing (#31225) --- numpy/fft/_helper.pyi | 13 ++++++++++++- numpy/typing/tests/data/reveal/fft.pyi | 8 +++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 05603a2046c3..f67737244805 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -22,12 +22,23 @@ type _1D = tuple[int] integer_types: Final[tuple[type[int], type[np.integer]]] = ... +# keep in sync with `ifftshift` below +@overload +def fftshift[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + axes: _ShapeLike | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload def fftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... -# +# keep in sync with `fftshift` above +@overload +def ifftshift[ShapeT: _Shape, DTypeT: np.dtype]( + x: np.ndarray[ShapeT, DTypeT], + axes: _ShapeLike | None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... @overload def ifftshift[ScalarT: np.generic](x: _ArrayLike[ScalarT], axes: _ShapeLike | None = None) -> NDArray[ScalarT]: ... @overload diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index 38a3d2f8e692..c7aa2ded556c 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -19,13 +19,13 @@ _c64: np.complex64 _c160: np.clongdouble _i64_2d: _Array2D[np.int64] -_f32_2d: _Array2D[np.float16] +_f32_2d: _Array2D[np.float32] _f80_2d: _Array2D[np.longdouble] _c64_2d: _Array2D[np.complex64] _c160_2d: _Array2D[np.clongdouble] _i64_nd: npt.NDArray[np.int64] -_f32_nd: npt.NDArray[np.float16] +_f32_nd: npt.NDArray[np.float32] _f80_nd: npt.NDArray[np.longdouble] _c64_nd: npt.NDArray[np.complex64] _c160_nd: npt.NDArray[np.clongdouble] @@ -34,12 +34,14 @@ _c160_nd: npt.NDArray[np.clongdouble] # fftshift -assert_type(np.fft.fftshift(_f64_nd), npt.NDArray[np.float64]) assert_type(np.fft.fftshift(_py_float_1d, axes=0), npt.NDArray[Any]) +assert_type(np.fft.fftshift(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.fftshift(_f64_nd), npt.NDArray[np.float64]) # ifftshift assert_type(np.fft.ifftshift(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(_f32_2d), _Array2D[np.float32]) assert_type(np.fft.ifftshift(_py_float_1d, axes=0), npt.NDArray[Any]) # fftfreq From 7a0dfadc88c7a21746d827767cad5296df765e6e Mon Sep 17 00:00:00 2001 From: Maarten Baert Date: Mon, 13 Apr 2026 13:23:30 +0200 Subject: [PATCH 1637/1718] BUG: incorrect temp elision for new-style (NEP 43) user-defined dtypes (#31193) Co-authored-by: Maarten Baert Co-authored-by: Sebastian Berg --- numpy/_core/include/numpy/ndarraytypes.h | 5 +++-- numpy/_core/src/multiarray/calculation.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index cc3b6a7da569..7aa23dd3426b 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1669,8 +1669,9 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) ((type) <= NPY_LONGDOUBLE)) || \ ((type) == NPY_HALF)) -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) +#define PyTypeNum_ISNUMBER(type) (((type) >= 0) && \ + (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF))) #define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ ((type) == NPY_UNICODE)) diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index a9d2f219760d..a34bb89b286f 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -829,7 +829,7 @@ NPY_NO_EXPORT PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { if (PyArray_ISCOMPLEX(self) || PyArray_ISOBJECT(self) || - PyArray_ISUSERDEF(self)) { + PyArray_ISUSERDEF(self) || !NPY_DT_is_legacy(PyArray_DESCR(self))) { if (out == NULL) { return PyArray_GenericUnaryFunction(self, n_ops.conjugate); From 90637009f716d847d8fa0383d55cedfd8eade64b Mon Sep 17 00:00:00 2001 From: RoomWithOutRoof Date: Tue, 14 Apr 2026 09:25:12 +0800 Subject: [PATCH 1638/1718] TYP: fix sliding_window_view axis parameter typing The axis parameter now accepts int | tuple[int, ...] instead of just SupportsIndex, matching the documentation and implementation. Fixes #31233 --- numpy/lib/_stride_tricks_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index faba9ab80cd4..7891d8f69bc7 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -40,7 +40,7 @@ def as_strided( def sliding_window_view[ScalarT: np.generic]( x: _ArrayLike[ScalarT], window_shape: int | Iterable[int], - axis: SupportsIndex | None = None, + axis: int | tuple[int, ...] | None = None, *, subok: bool = False, writeable: bool = False, @@ -49,7 +49,7 @@ def sliding_window_view[ScalarT: np.generic]( def sliding_window_view( x: ArrayLike, window_shape: int | Iterable[int], - axis: SupportsIndex | None = None, + axis: int | tuple[int, ...] | None = None, *, subok: bool = False, writeable: bool = False, From 4a126b31f5d1c03d4cea052abc49d31b8667cff3 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 14 Apr 2026 08:08:41 -0600 Subject: [PATCH 1639/1718] BUG: Remove thread-local allocator cache implementation (#31237) --- benchmarks/benchmarks/bench_alloc_cache.py | 45 ++++++++++++++ numpy/_core/code_generators/genapi.py | 2 +- numpy/_core/meson.build | 2 +- .../src/multiarray/{alloc.cpp => alloc.c} | 60 ++++--------------- numpy/_core/src/multiarray/alloc.h | 7 --- numpy/_core/src/multiarray/multiarraymodule.h | 7 --- 6 files changed, 60 insertions(+), 63 deletions(-) create mode 100644 benchmarks/benchmarks/bench_alloc_cache.py rename numpy/_core/src/multiarray/{alloc.cpp => alloc.c} (91%) diff --git a/benchmarks/benchmarks/bench_alloc_cache.py b/benchmarks/benchmarks/bench_alloc_cache.py new file mode 100644 index 000000000000..48793caeea7f --- /dev/null +++ b/benchmarks/benchmarks/bench_alloc_cache.py @@ -0,0 +1,45 @@ +"""Benchmarks for the NumPy small-allocation cache. + +NumPy caches data allocations smaller than 1024 bytes (up to 7 per size +bucket) to avoid repeated malloc/free calls. For float64 arrays this +means arrays with fewer than 128 elements hit the cache. + +These benchmarks measure tight create-and-discard loops so that the +allocator cache is exercised on every iteration after the first. +""" + +import numpy as np + +from .common import Benchmark + + +class SmallArrayCreation(Benchmark): + # Sizes chosen so that data bytes = size * 8 (float64). + # Cached: 1..127 → 8..1016 bytes (< 1024, hits the cache) + # Uncached: 128+ → 1024+ bytes (bypasses the cache) + params = [[1, 4, 16, 64, 127, 128, 256, 512]] + param_names = ['size'] + timeout = 60 + + def setup(self, size): + self.dtype = np.float64 + + def time_empty_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.empty(size, dtype=dt) + + def time_full_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.full(size, 1.0, dtype=dt) + + def time_ones_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.ones(size, dtype=dt) + + def time_zeros_loop(self, size): + dt = self.dtype + for _ in range(10_000): + np.zeros(size, dtype=dt) diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 00155319c5b0..e5c041d1af8b 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -37,7 +37,7 @@ def get_processor(): __docformat__ = 'restructuredtext' # The files under src/ that are scanned for API functions -API_FILES = [join('multiarray', 'alloc.cpp'), +API_FILES = [join('multiarray', 'alloc.c'), join('multiarray', 'abstractdtypes.c'), join('multiarray', 'arrayfunction_override.c'), join('multiarray', 'array_api_standard.c'), diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index d47af9345201..dc3985a0f5a3 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1106,7 +1106,7 @@ endif src_multiarray = multiarray_gen_headers + [ 'src/multiarray/abstractdtypes.c', - 'src/multiarray/alloc.cpp', + 'src/multiarray/alloc.c', 'src/multiarray/arrayobject.c', 'src/multiarray/array_coercion.c', 'src/multiarray/array_converter.c', diff --git a/numpy/_core/src/multiarray/alloc.cpp b/numpy/_core/src/multiarray/alloc.c similarity index 91% rename from numpy/_core/src/multiarray/alloc.cpp rename to numpy/_core/src/multiarray/alloc.c index b3e73211f6a2..386fcb086863 100644 --- a/numpy/_core/src/multiarray/alloc.cpp +++ b/numpy/_core/src/multiarray/alloc.c @@ -1,6 +1,5 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE -extern "C" { #define PY_SSIZE_T_CLEAN #include @@ -29,10 +28,17 @@ extern "C" { #endif -/* Do not enable the alloc cache if ASAN or MSAN instrumentation is enabled. - * The cache makes ASAN use-after-free or MSAN - * use-of-uninitialized-memory warnings less useful. */ -#if defined(__has_feature) +/* + * CPython uses mimalloc on the free-threaded build, which we trust to cache + * allocations better than we can. + */ +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +/* + * The cache makes ASAN use-after-free or MSAN use-of-uninitialized-memory + * warnings less useful. + */ +#elif defined(__has_feature) # if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) # define USE_ALLOC_CACHE 0 # endif @@ -50,46 +56,8 @@ typedef struct { npy_uintp available; /* number of cached pointers */ void * ptrs[NCACHE]; } cache_bucket; - -static NPY_TLS cache_bucket _datacache[NBUCKETS]; -static NPY_TLS cache_bucket _dimcache[NBUCKETS_DIM]; - -// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61991 -// gcc has a bug where if the thread local variable -// is unused then in some cases it's destructor may not get -// called at thread exit. So to workaround this, we access the -// datacache and dimcache through this struct so that -// cache_destructor gets initialized and used, ensuring that -// the destructor gets called properly at thread exit. -// The datacache and dimcache are not embedded in this struct -// because that would make this struct very large and certain -// platforms like armhf can crash while allocating that large -// TLS block. -typedef struct cache_destructor { - cache_bucket *dimcache; - cache_bucket *datacache; - cache_destructor() { - dimcache = &_dimcache[0]; - datacache = &_datacache[0]; - } - ~cache_destructor() { - for (npy_uint i = 0; i < NBUCKETS; ++i) { - while (datacache[i].available > 0) { - PyMem_RawFree(datacache[i].ptrs[--datacache[i].available]); - } - } - for (npy_uint i = 0; i < NBUCKETS_DIM; ++i) { - while (dimcache[i].available > 0) { - PyMem_RawFree(dimcache[i].ptrs[--dimcache[i].available]); - } - } - } -} cache_destructor; - -static NPY_TLS cache_destructor tls_cache_destructor; - -#define datacache tls_cache_destructor.datacache -#define dimcache tls_cache_destructor.dimcache +static cache_bucket datacache[NBUCKETS]; +static cache_bucket dimcache[NBUCKETS_DIM]; /* * This function tells whether NumPy attempts to call `madvise` with @@ -694,5 +662,3 @@ _Npy_MallocWithOverflowCheck(npy_intp size, npy_intp elsize) } return PyMem_MALLOC(total_size); } - -} /* extern "C" */ diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index c7c0f6d2154e..bef6407a28a3 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -1,9 +1,6 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ -#ifdef __cplusplus -extern "C" { -#endif #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #include "numpy/ndarraytypes.h" @@ -119,8 +116,4 @@ _npy_free_workspace(void *buf, void *static_buf) #define npy_free_workspace(NAME) \ _npy_free_workspace(NAME, NAME##_static) -#ifdef __cplusplus -} /* extern "C" */ -#endif - #endif /* NUMPY_CORE_SRC_MULTIARRAY_ALLOC_H_ */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index 4ce211f4339b..7b1da2ea9765 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,10 +1,6 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -#ifdef __cplusplus -extern "C" { -#endif - /* * A struct storing global state for the _multiarray_umath * module. The state is initialized when the module is imported @@ -41,8 +37,5 @@ NPY_VISIBILITY_HIDDEN extern npy_global_state_struct npy_global_state; NPY_NO_EXPORT int get_legacy_print_mode(void); -#ifdef __cplusplus -} -#endif #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ From b065cce2ca8b390c7f289896bcdc8c0e5decc95a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 14 Apr 2026 22:43:56 +0200 Subject: [PATCH 1640/1718] MAINT: fix ruff error (#31246) --- numpy/lib/_stride_tricks_impl.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index 7891d8f69bc7..efa3c7bb4e63 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,5 +1,5 @@ from collections.abc import Iterable -from typing import Any, SupportsIndex, overload +from typing import Any, overload import numpy as np from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike From 4f74f0adcfd3637a48da0ba9bf3b1a7e088db843 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 15 Apr 2026 01:18:17 +0200 Subject: [PATCH 1641/1718] TYP: ``fft.*fft``, ``fft.*fftn`` and ``fft.*fft2`` shape-typing and improved dtype support (#31226) --- doc/release/upcoming_changes/31226.typing.rst | 6 + numpy/fft/_pocketfft.pyi | 1084 ++++++++++++++++- numpy/typing/tests/data/reveal/fft.pyi | 88 +- 3 files changed, 1125 insertions(+), 53 deletions(-) create mode 100644 doc/release/upcoming_changes/31226.typing.rst diff --git a/doc/release/upcoming_changes/31226.typing.rst b/doc/release/upcoming_changes/31226.typing.rst new file mode 100644 index 000000000000..d4ea37779cf7 --- /dev/null +++ b/doc/release/upcoming_changes/31226.typing.rst @@ -0,0 +1,6 @@ +``numpy.fft`` typing improvements and preliminary shape-typing support +---------------------------------------------------------------------- +The ``numpy.fft`` functions now support non-``float64``/``complex128`` dtypes and gain +preliminary shape-typing support. For example, the return type of ``numpy.fft.fft`` now +depends on the shape-type of its inputs, falling back to the backward-compatible return +type when the shape-types are unknown at type-checking time. diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index d34404edb149..cb7c2a2db8f9 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,8 +1,15 @@ from collections.abc import Sequence -from typing import Literal as L +from typing import Literal as L, overload -from numpy import complex128, float64 -from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co +import numpy as np +from numpy._typing import ( + NDArray, + _ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _Shape, +) +from numpy._typing._array_like import _DualArrayLike __all__ = [ "fft", @@ -23,115 +30,1102 @@ __all__ = [ type _NormKind = L["backward", "ortho", "forward"] | None +### + +# keep in sync with `ifft` +@overload # Nd complexfloating +def fft[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def fft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fft[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def fft( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def fft( - a: ArrayLike, + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fft` +@overload # Nd complexfloating +def ifft[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifft[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def ifft( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ifft( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ihfft` +@overload # Nd float64 | +integer +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfft( + a: Sequence[float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float def rfft( - a: ArrayLike, + a: Sequence[Sequence[float]], n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float +def rfft( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def rfft( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `hfft` +@overload # Nd floating +def irfft[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex def irfft( - a: ArrayLike, + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfft[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer +def irfft( + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfft( + a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfft[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... -# Input array must be compatible with `np.conjugate` +# keep in sync with `irfft` above +@overload # Nd floating +def hfft[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def hfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def hfft( + a: Sequence[complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def hfft( + a: Sequence[Sequence[complex]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def hfft[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer +def hfft( + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback def hfft( a: _ArrayLikeNumber_co, n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def hfft[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `rfft` +@overload # Nd float64 | +integer +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ihfft[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def ihfft( + a: Sequence[float], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def ihfft( + a: Sequence[Sequence[float]], + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float def ihfft( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], n: int | None = None, axis: int = -1, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ihfft( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ihfft[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ifftn` +@overload # Nd complexfloating +def fftn[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def fftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fftn[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def fftn( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def fftn( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fftn` +@overload # Nd complexfloating +def ifftn[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifftn[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def ifftn( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def ifftn( - a: ArrayLike, + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd float64 | +integer +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfftn( + a: Sequence[float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def rfftn( + a: Sequence[Sequence[float]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float +def rfftn( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback def rfftn( - a: ArrayLike, + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfftn[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd floating +def irfftn[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfftn[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfftn( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def irfftn( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfftn[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer def irfftn( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], s: Sequence[int] | None = None, axes: Sequence[int] | None = None, norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfftn( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfftn[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `ifft2` +@overload # Nd complexfloating +def fft2[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def fft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def fft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex def fft2( - a: ArrayLike, + a: Sequence[Sequence[complex]], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def fft2[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex +def fft2( + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def fft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def fft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# keep in sync with `fft2` +@overload # Nd complexfloating +def ifft2[ShapeT: _Shape, DTypeT: np.dtype[np.complexfloating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd float64 | +integer +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def ifft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +complex +def ifft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +complex +def ifft2( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d complexfloating +def ifft2[ScalarT: np.complexfloating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex def ifft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], complex], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def ifft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def ifft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd float64 | +integer +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float64 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex128]]: ... +@overload # Nd float32 | float16 +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.float32 | np.float16]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.complex64]]: ... +@overload # Nd longdouble +def rfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.longdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.clongdouble]]: ... +@overload # 1d +float +def rfft2( + a: Sequence[float], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.complex128]]: ... +@overload # 2d +float +def rfft2( + a: Sequence[Sequence[float]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.complex128]]: ... +@overload # ?d +float def rfft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float], s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[complex128] | None = None, -) -> NDArray[complex128]: ... + out: None = None, +) -> NDArray[np.complex128]: ... +@overload # fallback +def rfft2( + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.complexfloating]: ... +@overload # out: +def rfft2[ArrayT: NDArray[np.complexfloating]]( + a: _ArrayLikeFloat_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + *, + out: ArrayT, +) -> ArrayT: ... +# +@overload # Nd floating +def irfft2[ShapeT: _Shape, DTypeT: np.dtype[np.floating]]( + a: np.ndarray[ShapeT, DTypeT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, DTypeT]: ... +@overload # Nd complex128 | +integer +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex128 | np.integer | np.bool]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float64]]: ... +@overload # Nd complex64 +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.complex64]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.float32]]: ... +@overload # Nd clongdouble +def irfft2[ShapeT: _Shape]( + a: np.ndarray[ShapeT, np.dtype[np.clongdouble]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[ShapeT, np.dtype[np.longdouble]]: ... +@overload # 1d +complex +def irfft2( + a: Sequence[complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +@overload # 2d +complex +def irfft2( + a: Sequence[Sequence[complex]], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> np.ndarray[tuple[int, int], np.dtype[np.float64]]: ... +@overload # ?d floating +def irfft2[ScalarT: np.floating]( + a: _ArrayLike[ScalarT], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[ScalarT]: ... +@overload # ?d +complex | complex128 | +integer def irfft2( - a: ArrayLike, + a: _DualArrayLike[np.dtype[np.complex128 | np.integer | np.bool], complex], + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.float64]: ... +@overload # fallback +def irfft2( + a: _ArrayLikeNumber_co, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: None = None, +) -> NDArray[np.floating]: ... +@overload # out: +def irfft2[ArrayT: NDArray[np.floating]]( + a: _ArrayLikeNumber_co, s: Sequence[int] | None = None, axes: Sequence[int] | None = (-2, -1), norm: _NormKind = None, - out: NDArray[float64] | None = None, -) -> NDArray[float64]: ... + *, + out: ArrayT, +) -> ArrayT: ... diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index c7aa2ded556c..c5875209603f 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -11,6 +11,7 @@ type _Array2D[ScalarT: np.generic] = np.ndarray[tuple[int, int], np.dtype[Scalar _f64_nd: npt.NDArray[np.float64] _c128_nd: npt.NDArray[np.complex128] _py_float_1d: list[float] +_py_complex_1d: list[complex] _i64: np.int64 _f32: np.float16 @@ -19,6 +20,7 @@ _c64: np.complex64 _c160: np.clongdouble _i64_2d: _Array2D[np.int64] +_f16_2d: _Array2D[np.float16] _f32_2d: _Array2D[np.float32] _f80_2d: _Array2D[np.longdouble] _c64_2d: _Array2D[np.complex64] @@ -95,23 +97,93 @@ assert_type(np.fft.rfftfreq(5, _f32_nd), npt.NDArray[np.float64]) assert_type(np.fft.rfftfreq(5, _f80_nd), npt.NDArray[np.longdouble]) assert_type(np.fft.rfftfreq(5, _c64_nd), npt.NDArray[np.complex128]) assert_type(np.fft.rfftfreq(5, _c160_nd), npt.NDArray[np.clongdouble]) -... -# the other fft functions +# *fft assert_type(np.fft.fft(_f64_nd), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft(_f64_nd, axis=1), npt.NDArray[np.complex128]) -assert_type(np.fft.rfft(_f64_nd, n=None), npt.NDArray[np.complex128]) -assert_type(np.fft.irfft(_f64_nd, norm="ortho"), npt.NDArray[np.float64]) -assert_type(np.fft.hfft(_f64_nd, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.fft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.ifft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.rfft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfft(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.irfft(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfft(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfft(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfft(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft(_py_complex_1d), _Array1D[np.float64]) + +assert_type(np.fft.hfft(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.hfft(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.hfft(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.hfft(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.hfft(_py_complex_1d), _Array1D[np.float64]) + assert_type(np.fft.ihfft(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ihfft(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ihfft(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ihfft(_py_float_1d), _Array1D[np.complex128]) + +# *fftn assert_type(np.fft.fftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fftn(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fftn(_py_float_1d), _Array1D[np.complex128]) + assert_type(np.fft.ifftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifftn(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifftn(_py_float_1d), _Array1D[np.complex128]) + assert_type(np.fft.rfftn(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfftn(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfftn(_py_float_1d), _Array1D[np.complex128]) + assert_type(np.fft.irfftn(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfftn(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfftn(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfftn(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfftn(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfftn(_py_complex_1d), _Array1D[np.float64]) + +# *fft2 -assert_type(np.fft.rfft2(_f64_nd), npt.NDArray[np.complex128]) -assert_type(np.fft.ifft2(_f64_nd), npt.NDArray[np.complex128]) assert_type(np.fft.fft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.fft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft2(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.fft2(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.ifft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.ifft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft2(_c64_2d), _Array2D[np.complex64]) +assert_type(np.fft.ifft2(_py_float_1d), _Array1D[np.complex128]) + +assert_type(np.fft.rfft2(_f64_nd), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft2(_i64_2d), _Array2D[np.complex128]) +assert_type(np.fft.rfft2(_f32_2d), _Array2D[np.complex64]) +assert_type(np.fft.rfft2(_py_float_1d), _Array1D[np.complex128]) + assert_type(np.fft.irfft2(_f64_nd), npt.NDArray[np.float64]) +assert_type(np.fft.irfft2(_i64_2d), _Array2D[np.float64]) +assert_type(np.fft.irfft2(_f16_2d), _Array2D[np.float16]) +assert_type(np.fft.irfft2(_f32_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft2(_c64_2d), _Array2D[np.float32]) +assert_type(np.fft.irfft2(_py_complex_1d), _Array1D[np.float64]) From df889e7b545f0ba5b9552d797662eacb058c26b4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 16 Apr 2026 14:18:56 +0200 Subject: [PATCH 1642/1718] BUG: Make a bug sweep for missing error handling, etc. (#31251) --- numpy/_core/src/multiarray/convert_datatype.c | 12 +++++- numpy/_core/src/multiarray/ctors.c | 5 +++ numpy/_core/src/multiarray/descriptor.c | 1 + numpy/_core/src/multiarray/dtype_transfer.c | 5 +++ numpy/_core/src/multiarray/dtypemeta.c | 3 ++ numpy/_core/src/multiarray/iterators.c | 4 +- numpy/_core/src/multiarray/nditer_api.c | 9 +++- numpy/_core/src/multiarray/nditer_constr.c | 4 ++ .../_core/src/multiarray/stringdtype/dtype.c | 1 + .../multiarray/stringdtype/static_string.c | 3 ++ .../src/multiarray/textreading/field_types.c | 2 + numpy/_core/src/umath/stringdtype_ufuncs.cpp | 42 ++++++++++++++++--- numpy/_core/src/umath/ufunc_object.c | 34 +++++++++++---- numpy/_core/src/umath/wrapping_array_method.c | 2 +- 14 files changed, 107 insertions(+), 20 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index caea50760dc3..f404faab022a 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -232,6 +232,7 @@ PyArray_GetBoundCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) res->dtypes = PyMem_Malloc(2 * sizeof(PyArray_DTypeMeta *)); if (res->dtypes == NULL) { Py_DECREF(res); + PyErr_NoMemory(); return NULL; } Py_INCREF(from); @@ -1935,14 +1936,21 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn) PyArray_Descr *common_descr = NULL; PyArrayObject **mps = NULL; - *retn = n = PySequence_Length(op); - if (n == 0) { + Py_ssize_t length = PySequence_Length(op); + if (length == 0) { PyErr_SetString(PyExc_ValueError, "0-length sequence."); } if (PyErr_Occurred()) { *retn = 0; return NULL; } + if (length > INT_MAX) { + PyErr_SetString(PyExc_ValueError, + "sequence too large to convert in common type."); + *retn = 0; + return NULL; + } + *retn = n = (int)length; mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); if (mps == NULL) { *retn = 0; diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 34964b3fabfa..49b83d7c9e79 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1885,6 +1885,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyObject *ret; if (requirements & NPY_ARRAY_ENSURENOCOPY) { PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(obj); return NULL; } ret = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); @@ -3620,6 +3621,10 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre thisbuf += 1; dptr += dtype->elsize; if (num < 0 && thisbuf == size) { + if (totalbytes > NPY_MAX_INTP - bytes) { + err = 1; + break; + } totalbytes += bytes; /* The handler is always valid */ tmp = PyDataMem_UserRENEW(PyArray_DATA(r), totalbytes, diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index e80ec01dfe83..a347ff4cca52 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -413,6 +413,7 @@ _convert_from_array_descr(PyObject *obj, int align) int totalsize = 0; PyObject *fields = PyDict_New(); if (!fields) { + Py_DECREF(nameslist); return NULL; } for (int i = 0; i < n; i++) { diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 1b98c9add0a9..daea361cefeb 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -169,6 +169,9 @@ _any_to_object_auxdata_clone(NpyAuxData *auxdata) _any_to_object_auxdata *data = (_any_to_object_auxdata *)auxdata; _any_to_object_auxdata *res = PyMem_Malloc(sizeof(_any_to_object_auxdata)); + if (res == NULL) { + return NULL; + } res->base = data->base; res->getitem = data->getitem; @@ -348,6 +351,7 @@ object_to_any_get_loop( /* NOTE: auxdata is only really necessary to flag `move_references` */ _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); if (data == NULL) { + PyErr_NoMemory(); return -1; } data->base.free = &_object_to_any_auxdata_free; @@ -1576,6 +1580,7 @@ static NpyAuxData *_n_to_n_data_clone(NpyAuxData *data) if (NPY_cast_info_copy(&newdata->wrapped, &d->wrapped) < 0) { _n_to_n_data_free((NpyAuxData *)newdata); + return NULL; } return (NpyAuxData *)newdata; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index d1f5a3f93cab..7320c5c9fa9a 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -201,6 +201,7 @@ dtypemeta_initialize_struct_from_spec( DType->flags = spec->flags; DType->dt_slots = PyMem_Calloc(1, sizeof(NPY_DType_Slots)); if (DType->dt_slots == NULL) { + PyErr_NoMemory(); return -1; } @@ -1121,6 +1122,7 @@ dtypemeta_wrap_legacy_descriptor( NPY_DType_Slots *dt_slots = PyMem_Malloc(sizeof(NPY_DType_Slots)); if (dt_slots == NULL) { + PyErr_NoMemory(); return NULL; } memset(dt_slots, '\0', sizeof(NPY_DType_Slots)); @@ -1129,6 +1131,7 @@ dtypemeta_wrap_legacy_descriptor( PyArray_DTypeMeta *dtype_class = PyMem_Malloc(sizeof(PyArray_DTypeMeta)); if (dtype_class == NULL) { PyMem_Free(dt_slots); + PyErr_NoMemory(); return NULL; } diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index ae4797f59a86..589961f51615 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -129,11 +129,11 @@ PyArray_IterNew(PyObject *obj) } it = (PyArrayIterObject *)PyArray_malloc(sizeof(PyArrayIterObject)); - PyObject_Init((PyObject *)it, &PyArrayIter_Type); - /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ if (it == NULL) { + PyErr_NoMemory(); return NULL; } + PyObject_Init((PyObject *)it, &PyArrayIter_Type); Py_INCREF(ao); /* PyArray_RawIterBaseInit steals a reference */ PyArray_RawIterBaseInit(it, ao); diff --git a/numpy/_core/src/multiarray/nditer_api.c b/numpy/_core/src/multiarray/nditer_api.c index da58489c6b9d..e0baeeeadb53 100644 --- a/numpy/_core/src/multiarray/nditer_api.c +++ b/numpy/_core/src/multiarray/nditer_api.c @@ -1738,7 +1738,12 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) */ if (!(flags&NPY_OP_ITFLAG_BUFNEVER)) { npy_intp itemsize = op_dtype[iop]->elsize; - buffer = PyArray_malloc(itemsize*buffersize); + npy_intp alloc_size; + buffer = NULL; + if (!npy_mul_sizes_with_overflow( + &alloc_size, itemsize, buffersize)) { + buffer = PyArray_malloc(alloc_size); + } if (buffer == NULL) { if (errmsg == NULL) { PyErr_NoMemory(); @@ -1749,7 +1754,7 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) goto fail; } if (PyDataType_FLAGCHK(op_dtype[iop], NPY_NEEDS_INIT)) { - memset(buffer, '\0', itemsize*buffersize); + memset(buffer, '\0', alloc_size); } buffers[iop] = buffer; } diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index ffe37e80c9be..4e6324c2ad64 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -538,6 +538,10 @@ NpyIter_Copy(NpyIter *iter) /* Allocate memory for the new iterator */ size = NIT_SIZEOF_ITERATOR(itflags, ndim, nop); newiter = (NpyIter*)PyObject_Malloc(size); + if (newiter == NULL) { + PyErr_NoMemory(); + return NULL; + } /* Copy the raw values to the new iterator */ memcpy(newiter, iter, size); diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 7abf4b9303af..60f3db200d32 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -101,6 +101,7 @@ new_stringdtype_instance(PyObject *na_object, int coerce) na_name.buf = PyMem_RawMalloc(size); if (na_name.buf == NULL) { Py_DECREF(na_pystr); + PyErr_NoMemory(); goto fail; } memcpy((char *)na_name.buf, utf8_ptr, size); diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index c437fab2d336..84ad9646838e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -697,6 +697,9 @@ NpyString_dup(const npy_packed_static_string *in, int used_malloc = 0; if (in_allocator == out_allocator && !is_short_string(in)) { in_buf = in_allocator->malloc(size); + if (in_buf == NULL) { + return -1; + } memcpy(in_buf, vstring_buffer(arena, in_u), size); used_malloc = 1; } diff --git a/numpy/_core/src/multiarray/textreading/field_types.c b/numpy/_core/src/multiarray/textreading/field_types.c index 87b86a64940f..0a7ad3b8b815 100644 --- a/numpy/_core/src/multiarray/textreading/field_types.c +++ b/numpy/_core/src/multiarray/textreading/field_types.c @@ -158,6 +158,7 @@ field_type_grow_recursive(PyArray_Descr *descr, field_type *new_ft = PyMem_Realloc(*ft, alloc_size); if (new_ft == NULL) { field_types_xclear(num_field_types, *ft); + PyErr_NoMemory(); return -1; } *ft = new_ft; @@ -195,6 +196,7 @@ field_types_create(PyArray_Descr *descr, field_type **ft) npy_intp ft_size = 4; *ft = PyMem_Malloc(ft_size * sizeof(field_type)); if (*ft == NULL) { + PyErr_NoMemory(); return -1; } return field_type_grow_recursive(descr, 0, ft, &ft_size, 0); diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 8e9c3ddbe40c..169c789a5651 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1119,6 +1119,11 @@ string_lrstrip_chars_strided_loop( } { char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } Buffer buf1((char *)s1.buf, s1.size); Buffer buf2((char *)s2.buf, s2.size); Buffer outbuf(new_buf, s1.size); @@ -1241,6 +1246,11 @@ string_lrstrip_whitespace_strided_loop( } { char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } Buffer buf((char *)s.buf, s.size); Buffer outbuf(new_buf, s.size); size_t new_buf_size = string_lrstrip_whitespace( @@ -1249,6 +1259,7 @@ string_lrstrip_whitespace_strided_loop( if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", ufunc_name); + PyMem_RawFree(new_buf); goto fail; } @@ -1444,18 +1455,30 @@ string_replace_strided_loop( Buffer buf3((char *)i3s.buf, i3s.size); // conservatively overallocate - // TODO check overflow - size_t max_size; + size_t num_repl, growth; if (i2s.size == 0) { // interleaving - max_size = i1s.size + (i1s.size + 1)*(i3s.size); + num_repl = (size_t)i1s.size + 1; + growth = i3s.size; } else { // replace i2 with i3 - size_t change = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; - max_size = i1s.size + count * change; + num_repl = (size_t)count; + growth = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; + } + char *new_buf = NULL; + size_t max_size; + if (!npy_mul_with_overflow_size_t(&max_size, num_repl, growth)) { + max_size += i1s.size; + if (max_size >= i1s.size) { + new_buf = (char *)PyMem_RawCalloc(max_size, 1); + } + } + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in replace"); + goto fail; } - char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); Buffer outbuf(new_buf, max_size); size_t new_buf_size = string_replace( @@ -1463,6 +1486,7 @@ string_replace_strided_loop( if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); + PyMem_RawFree(new_buf); goto fail; } @@ -1575,6 +1599,11 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, } char *new_buf = (char *)PyMem_RawCalloc(new_buf_size, 1); + if (new_buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in expandtabs"); + goto fail; + } Buffer outbuf(new_buf, new_buf_size); string_expandtabs(buf, tabsize, outbuf); @@ -1582,6 +1611,7 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { npy_gil_error( PyExc_MemoryError, "Failed to pack string in expandtabs"); + PyMem_RawFree(new_buf); goto fail; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 1f9da112df0a..7bc524bd6040 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -327,9 +327,11 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) } len = strlen(signature); ufunc->core_signature = PyArray_malloc(sizeof(char) * (len+1)); - if (ufunc->core_signature) { - strcpy(ufunc->core_signature, signature); + if (ufunc->core_signature == NULL) { + PyErr_NoMemory(); + return -1; } + strcpy(ufunc->core_signature, signature); /* Allocate sufficient memory to store pointers to all dimension names */ var_names = PyArray_malloc(sizeof(char const*) * len); if (var_names == NULL) { @@ -479,14 +481,28 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) parse_error = "incomplete signature: not all arguments found"; goto fail; } - ufunc->core_dim_ixs = PyArray_realloc(ufunc->core_dim_ixs, + void *tmp; + tmp = PyArray_realloc(ufunc->core_dim_ixs, sizeof(int) * cur_core_dim); - ufunc->core_dim_sizes = PyArray_realloc( - ufunc->core_dim_sizes, + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_ixs = tmp; + tmp = PyArray_realloc(ufunc->core_dim_sizes, sizeof(npy_intp) * ufunc->core_num_dim_ix); - ufunc->core_dim_flags = PyArray_realloc( - ufunc->core_dim_flags, + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_sizes = tmp; + tmp = PyArray_realloc(ufunc->core_dim_flags, sizeof(npy_uint32) * ufunc->core_num_dim_ix); + if (tmp == NULL) { + PyErr_NoMemory(); + goto fail; + } + ufunc->core_dim_flags = tmp; /* check for trivial core-signature, e.g. "(),()->()" */ if (cur_core_dim == 0) { @@ -6688,6 +6704,10 @@ ufunc_get_types(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) return NULL; } t = PyArray_malloc(no+ni+2); + if (t == NULL) { + Py_DECREF(list); + return PyErr_NoMemory(); + } n = 0; for (k = 0; k < nt; k++) { for (j = 0; jmethod->wrapped_meth->get_reduction_initial( &orig_context, reduction_is_empty, item); for (int i = 0; i < nin + nout; i++) { - Py_DECREF(orig_descrs); + Py_DECREF(orig_descrs[i]); } return res; } From d333067878bf77c809283cdf178917630f9f0b8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Apr 2026 06:20:39 -0600 Subject: [PATCH 1643/1718] MAINT: Bump pyrefly from 0.59.1 to 0.60.0 in /requirements (#31245) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index f8e9c6f73b8b..4d799640ee83 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.0 -pyrefly==0.59.1 +pyrefly==0.60.0 From 67a64d172eb95fa51a1c719bb227fa0033fcedef Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Fri, 17 Apr 2026 01:42:21 +0530 Subject: [PATCH 1644/1718] ENH: mark ``_import_array`` function as inline (#31258) --- numpy/_core/code_generators/generate_numpy_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index 23d678872ca4..b77cc842c927 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -58,7 +58,7 @@ #include "numpy/_public_dtype_api_table.h" #if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int +static inline int _import_array(void) { int st; From 927c7bff607a2c8123a53ee71da774652ebad21c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Apr 2026 17:56:28 -0600 Subject: [PATCH 1645/1718] MAINT: Bump actions/github-script from 8.0.0 to 9.0.0 (#31260) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/labeler.yml | 2 +- .github/workflows/mypy_primer_comment.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index be3e2a925a72..a75319a7fd18 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Label the PR continue-on-error: true - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: script: | const yaml = require('js-yaml'); diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index e05c0dc5c697..d9a8ab776049 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -18,7 +18,7 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} steps: - name: Download diffs - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: script: | const fs = require('fs'); @@ -42,7 +42,7 @@ jobs: - name: Get PR number id: get-pr-number - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: script: | const fs = require('fs'); @@ -58,7 +58,7 @@ jobs: - name: Post comment id: post-comment - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | From 1c3b43524b0fa6d354fba010c4e67945ed58ef58 Mon Sep 17 00:00:00 2001 From: da-woods Date: Fri, 17 Apr 2026 17:53:00 +0100 Subject: [PATCH 1646/1718] MAINT: Expose ufunc vectorcall function on Limited API >= 3.12 Minor improvement to the Limited API - `vectorcallfunc` is part of the Limited API from Python 3.12 onwards so the guard can be adjusted to account for that. I doubt that anyone particularly wants to modify it but there's no reason to specifically hide it. --- numpy/_core/include/numpy/ufuncobject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index 3d0d2deac6bd..b5a8b6852468 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -179,7 +179,7 @@ typedef struct _tagPyUFuncObject { * but this was never implemented. (This is also why the above * selector is called the "legacy" selector.) */ - #ifndef Py_LIMITED_API + #if !defined(Py_LIMITED_API) || Py_LIMITED_API >= 0x030C0000 vectorcallfunc vectorcall; #else void *vectorcall; From 3d9a4515be0cc4d501095474ed63810523d0b3a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 17:53:36 +0000 Subject: [PATCH 1647/1718] MAINT: Bump int128/hide-comment-action from 1.54.0 to 1.55.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.54.0 to 1.55.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/7e00bd46796b28fdf74b2f4b0e4f0568bd024b00...42badf94b3efd95bf2138bd9c74da19203e83f40) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.55.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index d9a8ab776049..68a0978e7a12 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@7e00bd46796b28fdf74b2f4b0e4f0568bd024b00 # v1.54.0 + uses: int128/hide-comment-action@42badf94b3efd95bf2138bd9c74da19203e83f40 # v1.55.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 641c0096b068bffb1755e31d681f1b51589a6501 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 17:53:47 +0000 Subject: [PATCH 1648/1718] MAINT: Bump actions/upload-artifact from 7.0.0 to 7.0.1 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 7.0.0 to 7.0.1. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/bbbca2ddaa5d8feaa63e36b76fdaad77386f024f...043fb46d1a93c77aae656e7c1c64a875d1fc6a0a) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 7.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/cygwin.yml | 2 +- .github/workflows/mypy_primer.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- .github/workflows/wheels.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index b530bdee0f71..4c2b5db17488 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -68,7 +68,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/mypy_primer.yml b/.github/workflows/mypy_primer.yml index 21ba1beb7a1c..4572d84aa884 100644 --- a/.github/workflows/mypy_primer.yml +++ b/.github/workflows/mypy_primer.yml @@ -76,7 +76,7 @@ jobs: run: | echo ${{ github.event.pull_request.number }} | tee pr_number.txt - name: Upload mypy_primer diff + PR number - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: ${{ matrix.shard-index == 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -84,7 +84,7 @@ jobs: diff_${{ matrix.shard-index }}.txt pr_number.txt - name: Upload mypy_primer diff - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 if: ${{ matrix.shard-index != 0 }} with: name: mypy_primer_diffs-${{ matrix.shard-index }} @@ -98,7 +98,7 @@ jobs: contents: read steps: - name: Merge artifacts - uses: actions/upload-artifact/merge@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact/merge@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: mypy_primer_diffs pattern: mypy_primer_diffs-* diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 04c95da1cb9e..6812aceca602 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 610b3de3fdb0..9ef380bba68c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -106,7 +106,7 @@ jobs: env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 + - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl From 5ebb902144d3c09a9cc1655c49a54a87cd85cb0e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Apr 2026 17:58:35 +0000 Subject: [PATCH 1649/1718] MAINT: Bump pyrefly from 0.60.0 to 0.60.1 in /requirements Bumps [pyrefly](https://github.com/facebook/pyrefly) from 0.60.0 to 0.60.1. - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.60.0...0.60.1) --- updated-dependencies: - dependency-name: pyrefly dependency-version: 0.60.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 4d799640ee83..27ef828fbb94 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.0 -pyrefly==0.60.0 +pyrefly==0.60.1 From 12e16ab322390059fffee12a153f10c92881f29d Mon Sep 17 00:00:00 2001 From: Kate Hudson Date: Fri, 17 Apr 2026 15:58:08 -0400 Subject: [PATCH 1650/1718] BUG: Fix Stirling correction sign error in the BTPE algorithm (#31238) --- .../upcoming_changes/31238.compatibility.rst | 21 +++ .../random/src/distributions/distributions.c | 18 +- .../random/src/legacy/legacy-distributions.c | 175 +++++++++++++++++- .../test_generator_mt19937_regressions.py | 21 ++- .../tests/test_randomstate_regression.py | 34 ++++ 5 files changed, 259 insertions(+), 10 deletions(-) create mode 100644 doc/release/upcoming_changes/31238.compatibility.rst diff --git a/doc/release/upcoming_changes/31238.compatibility.rst b/doc/release/upcoming_changes/31238.compatibility.rst new file mode 100644 index 000000000000..46eccb403b17 --- /dev/null +++ b/doc/release/upcoming_changes/31238.compatibility.rst @@ -0,0 +1,21 @@ +Corrections to the BTPE binomial sampler +---------------------------------------- + +Two independent errors in the Stirling series of the acceptance/rejection +step of the BTPE algorithm used by `numpy.random.Generator.binomial` +have been corrected: + +* The third and fourth error terms were added rather than + subtracted. This sign error was inherited from section 5.3 of the + original 1988 paper by Kachitvichyanukul & Schmeiser, which incorrectly + adds all four terms. + +* The leading coefficient had a digit-swap typo (``13680`` instead of ``13860``) + that was introduced in the initial implementation. + +As a result, ``Generator.binomial`` and ``Generator.multinomial`` (which uses +binomial internally) may now return different samples for the same seed. + +The legacy `numpy.random.RandomState.binomial` and +`numpy.random.RandomState.multinomial` are not affected: they preserve the +original (incorrect) behavior, so existing streams remain reproducible. diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 79cacb2df4a4..39c2b1916c6c 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -741,15 +741,21 @@ RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n, f2 = f1 * f1; z2 = z * z; w2 = w * w; + /* + * Note that the third and fourth error terms are subtracted. + * This is a correction from the original 1988 paper + * (Kachitvichyanukul & Schmeiser) which erroneously adds + * all four terms + */ if (A > (xm * log(f1 / x1) + (n - m + 0.5) * log(z / w) + (y - m) * log(w * r / (x1 * q)) + - (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / - 166320. + - (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / - 166320. + - (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + (13860. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / 166320. + - (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / + (13860. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / + 166320. - + (13860. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + 166320. - + (13860. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / 166320.)) { goto Step10; } diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index e84bd19fdaee..f6fdf53bf6f2 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -267,6 +267,156 @@ static RAND_INT_TYPE legacy_random_binomial_inversion( return X; } +/* + * BTPE implementation preserved for compatibility. The last two error terms of + * the Stirling approximation are incorrectly added + */ +static RAND_INT_TYPE legacy_random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial) { + double r, q, fm, p1, xm, xl, xr, c, laml, lamr, p2, p3, p4; + double a, u, v, s, F, rho, t, A, nrq, x1, x2, f1, f2, z, z2, w, w2, x; + RAND_INT_TYPE m, y, k, i; + + if (!(binomial->has_binomial) || (binomial->nsave != n) || + (binomial->psave != p)) { + /* initialize */ + binomial->nsave = n; + binomial->psave = p; + binomial->has_binomial = 1; + binomial->r = r = MIN(p, 1.0 - p); + binomial->q = q = 1.0 - r; + binomial->fm = fm = n * r + r; + binomial->m = m = (RAND_INT_TYPE)floor(binomial->fm); + binomial->p1 = p1 = floor(2.195 * sqrt(n * r * q) - 4.6 * q) + 0.5; + binomial->xm = xm = m + 0.5; + binomial->xl = xl = xm - p1; + binomial->xr = xr = xm + p1; + binomial->c = c = 0.134 + 20.5 / (15.3 + m); + a = (fm - xl) / (fm - xl * r); + binomial->laml = laml = a * (1.0 + a / 2.0); + a = (xr - fm) / (xr * q); + binomial->lamr = lamr = a * (1.0 + a / 2.0); + binomial->p2 = p2 = p1 * (1.0 + 2.0 * c); + binomial->p3 = p3 = p2 + c / laml; + binomial->p4 = p4 = p3 + c / lamr; + } else { + r = binomial->r; + q = binomial->q; + fm = binomial->fm; + m = binomial->m; + p1 = binomial->p1; + xm = binomial->xm; + xl = binomial->xl; + xr = binomial->xr; + c = binomial->c; + laml = binomial->laml; + lamr = binomial->lamr; + p2 = binomial->p2; + p3 = binomial->p3; + p4 = binomial->p4; + } + +/* sigh ... */ +Step10: + nrq = n * r * q; + u = next_double(bitgen_state) * p4; + v = next_double(bitgen_state); + if (u > p1) + goto Step20; + y = (RAND_INT_TYPE)floor(xm - p1 * v + u); + goto Step60; + +Step20: + if (u > p2) + goto Step30; + x = xl + (u - p1) / c; + v = v * c + 1.0 - fabs(m - x + 0.5) / p1; + if (v > 1.0) + goto Step10; + y = (RAND_INT_TYPE)floor(x); + goto Step50; + +Step30: + if (u > p3) + goto Step40; + y = (RAND_INT_TYPE)floor(xl + log(v) / laml); + /* Reject if v==0.0 since previous cast is undefined */ + if ((y < 0) || (v == 0.0)) + goto Step10; + v = v * (u - p2) * laml; + goto Step50; + +Step40: + y = (RAND_INT_TYPE)floor(xr - log(v) / lamr); + /* Reject if v==0.0 since previous cast is undefined */ + if ((y > n) || (v == 0.0)) + goto Step10; + v = v * (u - p3) * lamr; + +Step50: + k = llabs(y - m); + if ((k > 20) && (k < ((nrq) / 2.0 - 1))) + goto Step52; + + s = r / q; + a = s * (n + 1); + F = 1.0; + if (m < y) { + for (i = m + 1; i <= y; i++) { + F *= (a / i - s); + } + } else if (m > y) { + for (i = y + 1; i <= m; i++) { + F /= (a / i - s); + } + } + if (v > F) + goto Step10; + goto Step60; + +Step52: + rho = + (k / (nrq)) * ((k * (k / 3.0 + 0.625) + 0.16666666666666666) / nrq + 0.5); + t = -k * k / (2 * nrq); + /* log(0.0) ok here */ + A = log(v); + if (A < (t - rho)) + goto Step60; + if (A > (t + rho)) + goto Step10; + + x1 = (double)y + 1; + f1 = (double)m + 1; + z = (double)n + 1 - (double)m; + w = (double)n - (double)y + 1; + x2 = x1 * x1; + f2 = f1 * f1; + z2 = z * z; + w2 = w * w; + /* The last two terms are subtracted in the corrected version */ + if (A > (xm * log(f1 / x1) + (n - m + 0.5) * log(z / w) + + (y - m) * log(w * r / (x1 * q)) + + (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 / + 166320. + + (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z / + 166320. + + (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 / + 166320. + + (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w / + 166320.)) { + goto Step10; + } + +Step60: + if (p > 0.5) { + y = n - y; + } + + return y; +} + static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, double p, RAND_INT_TYPE n, @@ -277,14 +427,14 @@ static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state, if (p * n <= 30.0) { return legacy_random_binomial_inversion(bitgen_state, n, p, binomial); } else { - return random_binomial_btpe(bitgen_state, n, p, binomial); + return legacy_random_binomial_btpe(bitgen_state, n, p, binomial); } } else { q = 1.0 - p; if (q * n <= 30.0) { return n - legacy_random_binomial_inversion(bitgen_state, n, q, binomial); } else { - return n - random_binomial_btpe(bitgen_state, n, q, binomial); + return n - legacy_random_binomial_btpe(bitgen_state, n, q, binomial); } } } @@ -469,7 +619,26 @@ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) { void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, double *pix, npy_intp d, binomial_t *binomial) { - random_multinomial(bitgen_state, n, mnix, pix, d, binomial); + /* + * Mirrors random_multinomial but dispatches to legacy_random_binomial, + * since bug fixes to random_binomial would otherwise change the + * RandomState stream. + */ + double remaining_p = 1.0; + npy_intp j; + RAND_INT_TYPE dn = n; + for (j = 0; j < (d - 1); j++) { + mnix[j] = (RAND_INT_TYPE)legacy_random_binomial( + bitgen_state, pix[j] / remaining_p, dn, binomial); + dn = dn - mnix[j]; + if (dn <= 0) { + break; + } + remaining_p -= pix[j]; + } + if (dn > 0) { + mnix[d - 1] = dn; + } } double legacy_vonmises(bitgen_t *bitgen_state, double mu, double kappa) { diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 3b82ab414917..1c345ac49a8a 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,7 +1,7 @@ import pytest import numpy as np -from numpy.random import MT19937, Generator +from numpy.random import MT19937, PCG64, Generator from numpy.testing import assert_, assert_array_equal @@ -219,3 +219,22 @@ def test_zipf_a_near_1(self): # discrete distribution truncated to signed 64 bit integers, more # than half should be less than 2**62. assert np.count_nonzero(sample < 2**62) > n / 2 + + def test_binomial_btpe_sign_fix(self): + # Regression test guarding the BTPE sign correction fix in + # distributions.c. The PCG64 state below exercises the rejection + # window where the corrected error terms flip the accept/reject + # outcome. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = PCG64() + bg.state = state + rng = Generator(bg) + assert rng.binomial(500, 0.5) == 238 diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index e5d68c2f7ab7..d15c2ec258e8 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -190,6 +190,40 @@ def test_p_zero_stream(self): assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), [0, 0, 0, 1, 1]) + def test_binomial_btpe_legacy_stream(self): + # Regression test for the BTPE sign correction fix: RandomState + # must preserve the pre-fix stream for compatibility. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = random.PCG64() + bg.state = state + rs = random.RandomState(bg) + assert rs.binomial(500, 0.5) == 227 + + def test_multinomial_btpe_legacy_stream(self): + # See also test_binomial_btpe_legacy_stream. + # RandomState.multinomial relies on binomial internally. + state = { + 'bit_generator': 'PCG64', + 'state': { + 'state': 339225526786748945562563845880185242573, + 'inc': 114135179160287400024908587472913682319, + }, + 'has_uint32': 0, + 'uinteger': 0, + } + bg = random.PCG64() + bg.state = state + rs = random.RandomState(bg) + assert_array_equal(rs.multinomial(500, [0.5, 0.5]), [227, 273]) + def test_n_zero_stream(self): # Regression test for gh-14522. Ensure that future versions # generate the same variates as version 1.16. From b6bc683cce907d21575536c12b2ad993939fd556 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 20 Apr 2026 09:26:13 +0200 Subject: [PATCH 1651/1718] ENH: Allow user-dtypes return views on `.conjugate()` (#31264) This uses the fact that if something doesn't define an imag, then conjugate() should be defined as a simple view (similar to .real). This is a follow-up of a previous PR, where I didn't implement this isn't really backportable. With this, QuadDType for example does return a view (and a complex new-style dtype can opt out this). This doesn't yet support for .conjugate() to return a view for complex numbers (which would make a lot of sense and could be added but take a bit more effort). Co-authored-by: Swayam --- numpy/_core/src/multiarray/calculation.c | 12 +++++++----- numpy/_core/tests/test_custom_dtypes.py | 6 ++++++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index a34bb89b286f..6585982efec3 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -828,8 +828,8 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o NPY_NO_EXPORT PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { - if (PyArray_ISCOMPLEX(self) || PyArray_ISOBJECT(self) || - PyArray_ISUSERDEF(self) || !NPY_DT_is_legacy(PyArray_DESCR(self))) { + if (NPY_DT_SLOTS(NPY_DTYPE(PyArray_DTYPE(self)))->imag_meth != NULL) { + /* The dtype has `arr.imag` so `conjugate` must exist (or error) */ if (out == NULL) { return PyArray_GenericUnaryFunction(self, n_ops.conjugate); @@ -841,12 +841,14 @@ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) } } else { - PyArrayObject *ret; - if (!PyArray_ISNUMBER(self)) { + if (!NPY_DT_is_numeric(NPY_DTYPE(PyArray_DTYPE(self)))) { PyErr_SetString(PyExc_TypeError, - "cannot conjugate non-numeric dtype"); + "cannot conjugate non-numeric dtype"); return NULL; } + + /* Numeric but no `.imag`: real-valued (or `.imag` should error) */ + PyArrayObject *ret; if (out) { if (PyArray_AssignArray(out, self, NULL, NPY_DEFAULT_ASSIGN_CASTING) < 0) { diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index 2acb4adf4c7c..13be3dfb7325 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -373,6 +373,12 @@ def test_flatiter_index(self, index): np.testing.assert_array_equal( arr.view(np.float64), arr2.view(np.float64)) + def test_conjugate(self): + # Also user dtype can just return self if conjugate should be no-op. + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + assert arr.conjugate() is arr + + @pytest.mark.thread_unsafe( reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" ) From 49d0be449e43a871a08ef1f991f0cf6cf419e4b6 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Mon, 20 Apr 2026 16:32:10 +0900 Subject: [PATCH 1652/1718] BUG: raise warning when `np.timedelta64()` is called (#31213) Also raise a DeprecationWarning when following codes are called without specific unit: np.timedelta() np.datetime() np.datetime("NaT") Co-authored-by: Sebastian Berg --- .../_core/code_generators/ufunc_docstrings.py | 2 +- numpy/_core/src/multiarray/datetime.c | 21 +++++++ numpy/_core/src/multiarray/datetime_busday.c | 3 +- .../_core/src/multiarray/datetime_busdaycal.c | 3 +- numpy/_core/src/multiarray/scalartypes.c.src | 11 ++++ numpy/_core/tests/test_array_coercion.py | 4 +- numpy/_core/tests/test_arrayprint.py | 4 +- numpy/_core/tests/test_casting_unittests.py | 13 +++- numpy/_core/tests/test_datetime.py | 62 ++++++++++++------- numpy/_core/tests/test_deprecations.py | 8 +++ numpy/_core/tests/test_multiarray.py | 16 ++--- numpy/_core/tests/test_regression.py | 7 ++- numpy/_core/tests/test_scalarmath.py | 18 ++++-- numpy/_core/tests/test_stringdtype.py | 4 +- numpy/lib/tests/test_arraysetops.py | 2 +- numpy/testing/tests/test_utils.py | 3 +- 16 files changed, 129 insertions(+), 52 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index e3e0f9bb134b..6973ba34322e 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -1870,7 +1870,7 @@ def add_newdoc(place, name, doc): Examples -------- >>> import numpy as np - >>> np.isnat(np.datetime64("NaT")) + >>> np.isnat(np.datetime64("NaT", "D")) True >>> np.isnat(np.datetime64("2016-01-01")) False diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index edaceedb3b01..26e78f6301d2 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2367,6 +2367,16 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, return -1; } + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } + Py_DECREF(utf8); return 0; } @@ -2492,6 +2502,17 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, meta->num = 1; } *out = NPY_DATETIME_NAT; + + if(meta->base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "This includes implicit conversion of bare integers (e.g. `+ 1`)." + "Please use a specific unit instead.") < 0) { + return -1; + } + } + return 0; } else { diff --git a/numpy/_core/src/multiarray/datetime_busday.c b/numpy/_core/src/multiarray/datetime_busday.c index 73c88811a0a9..ad4c66be0e1b 100644 --- a/numpy/_core/src/multiarray/datetime_busday.c +++ b/numpy/_core/src/multiarray/datetime_busday.c @@ -1281,8 +1281,7 @@ array_is_busday(PyObject *NPY_UNUSED(self), else { PyArray_Descr *datetime_dtype; - /* Use the datetime dtype with generic units so it fills it in */ - datetime_dtype = PyArray_DescrFromType(NPY_DATETIME); + datetime_dtype = create_datetime_dtype_with_unit(NPY_DATETIME, NPY_FR_D); if (datetime_dtype == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 4c6986544f6c..9d1e4e90c202 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -283,8 +283,7 @@ PyArray_HolidaysConverter(PyObject *dates_in, npy_holidayslist *holidays) else { PyArray_Descr *datetime_dtype; - /* Use the datetime dtype with generic units so it fills it in */ - datetime_dtype = PyArray_DescrFromType(NPY_DATETIME); + datetime_dtype = create_datetime_dtype_with_unit(NPY_DATETIME, NPY_FR_D); if (datetime_dtype == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 8e4613db10f8..684fff1bea1d 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -3684,6 +3684,17 @@ static PyObject * #else ret->obval = 0; #endif + + if (ret->obmeta.base == NPY_FR_GENERIC) { + if (DEPRECATE( + "The 'generic' unit for NumPy timedelta is deprecated, " + "and will raise an error in the future. " + "Please use a specific unit instead.") < 0) { + Py_DECREF(ret); + return NULL; + } + } + } else if (convert_pyobject_to_@name@(&ret->obmeta, obj, NPY_SAME_KIND_CASTING, &ret->obval) < 0) { diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 476f6d3a6f7d..03397b6fd9e9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -125,7 +125,7 @@ def scalar_instances(times=True, extended_precision=True, user_dtype=True): yield param(np.timedelta64(23, "s"), id="timedelta64[s]") yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") - yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("NaT", "D"), id="datetime64[D](NaT)") yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") # Strings and unstructured void: @@ -401,7 +401,7 @@ class TestTimeScalars: @pytest.mark.parametrize("scalar", [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), param(np.timedelta64(123, "s"), id="timedelta64[s]"), - param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64("NaT", "D"), id="datetime64[D](NaT)"), param(np.datetime64(1, "D"), id="datetime64[D]")],) def test_coercion_basic(self, dtype, scalar): # Note the `[scalar]` is there because np.array(scalar) uses stricter diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 685156f51502..297462544c6d 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -278,7 +278,7 @@ def test_structure_format_mixed(self): try: # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) - A[5:].fill(np.datetime64('NaT')) + A[5:].fill(np.datetime64('NaT', 'D')) date_string = '1970-01-01T00:00:00' assert_equal( np.array2string(A), @@ -303,7 +303,7 @@ def test_structure_format_mixed(self): # and again, with timedeltas A = np.full(10, 123456, dtype=[("A", "m8[s]")]) - A[5:].fill(np.datetime64('NaT')) + A[5:].fill(np.datetime64('NaT', 'D')) assert_equal( np.array2string(A), textwrap.dedent("""\ diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 1f9b8e07104f..d6d1ccb484b9 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -385,7 +385,7 @@ def test_numeric_to_times(self, from_Dt): int64_dt = np.dtype(np.int64) arr1, arr2, values = self.get_data(from_dt, int64_dt) arr2 = arr2.view(time_dt) - arr2[...] = np.datetime64("NaT") + arr2[...] = np.datetime64("NaT", "D") if time_dt == np.dtype("M8"): # This is a bit of a strange path, and could probably be removed @@ -460,7 +460,14 @@ def test_time_to_time(self, from_dt, to_dt, if nom is not None: expected_out = (values * nom // denom).view(to_res) - expected_out[0] = "NaT" + if to_dt == np.dtype("M8"): + with pytest.warns( + DeprecationWarning, + match="The 'generic' unit for NumPy timedelta is deprecated", + ): + expected_out[0] = "NaT" + else: + expected_out[0] = "NaT" else: expected_out = np.empty_like(values) expected_out[...] = denom @@ -814,6 +821,8 @@ def test_object_casts_NULL_None_equivalence(self, dtype): assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes try: + if dtype == "M": + dtype = "M[D]" expected = arr_normal.astype(dtype) except TypeError: with pytest.raises(TypeError): diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index df61e61d4656..4825e6926179 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -222,7 +222,7 @@ def test_datetime_nat_argsort_stability(self, size): # NaT < NaT should be False internally for # sort stability expected = np.arange(size) - arr = np.tile(np.datetime64('NaT'), size) + arr = np.tile(np.datetime64('NaT', 'D'), size) assert_equal(np.argsort(arr, kind='mergesort'), expected) @pytest.mark.parametrize("size", [ @@ -267,25 +267,35 @@ def test_datetime_scalar_construction(self): assert_equal(np.datetime64('1950-03-12T13', 's'), np.datetime64('1950-03-12T13', 'm')) - # Default construction means NaT - assert_equal(np.datetime64(), np.datetime64('NaT')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + # Default construction means NaT + assert_equal(np.datetime64(), np.datetime64('NaT')) + + # Some basic strings and repr + assert_equal(str(np.datetime64('NaT')), 'NaT') + assert_equal(repr(np.datetime64('NaT')), + "np.datetime64('NaT','generic')") + + # None gets constructed as NaT + assert_equal(np.datetime64(None), np.datetime64('NaT')) - # Some basic strings and repr - assert_equal(str(np.datetime64('NaT')), 'NaT') - assert_equal(repr(np.datetime64('NaT')), - "np.datetime64('NaT','generic')") assert_equal(str(np.datetime64('2011-02')), '2011-02') assert_equal(repr(np.datetime64('2011-02')), "np.datetime64('2011-02')") - assert_equal(repr(np.datetime64('NaT').astype(np.dtype("datetime64[ns]"))), + assert_equal(repr(np.datetime64('NaT', 'D').astype(np.dtype("datetime64[ns]"))), "np.datetime64('NaT','ns')") - # None gets constructed as NaT - assert_equal(np.datetime64(None), np.datetime64('NaT')) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + # Default construction of NaT is in generic units + assert_equal(np.datetime64().dtype, np.dtype('M8')) - # Default construction of NaT is in generic units - assert_equal(np.datetime64().dtype, np.dtype('M8')) - assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) + assert_equal(np.datetime64('NaT').dtype, np.dtype('M8')) # Construction from integers requires a specified unit assert_raises(ValueError, np.datetime64, 17) @@ -2366,12 +2376,12 @@ def test_datetime_busday_offset(self): np.datetime64('2007-02-25')) # NaT values when roll is not raise - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'), - np.datetime64('NaT')) - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'), - np.datetime64('NaT')) - assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'), - np.datetime64('NaT')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='nat'), + np.datetime64('NaT', 'D')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='following'), + np.datetime64('NaT', 'D')) + assert_equal(np.busday_offset(np.datetime64('NaT', 'D'), 1, roll='preceding'), + np.datetime64('NaT', 'D')) def test_datetime_busdaycalendar(self): # Check that it removes NaT, duplicates, and weekends @@ -2795,11 +2805,15 @@ def test_cast_to_truncated_string_doesnt_overflow(self): assert_array_equal(a.astype('U1'), ['1', '-', '1']) def test_datetime_hash_nat(self): - nat1 = np.datetime64() - nat2 = np.datetime64() - assert nat1 is not nat2 - assert nat1 != nat2 - assert hash(nat1) != hash(nat2) + with pytest.warns( + DeprecationWarning, + match=self.generic_unit_deprecation_message + ): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) def test_datetime_hash_weeks(self, unit): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index a5c7f7a78399..110c29d1611a 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -408,6 +408,14 @@ def test_raise_warning_for_operation_with_generic_unit( ): self.assert_deprecated(op, args=(value, generic_value)) + def test_raise_warning_for_default_constructor(self): + self.assert_deprecated(lambda: np.timedelta64()) + self.assert_deprecated(lambda: np.datetime64()) + + def test_raise_warning_for_NAT_construction(self): + self.assert_deprecated(lambda: np.datetime64('NaT')) + self.assert_deprecated(lambda: np.datetime64(None)) + class TestTriDeprecationWithNonInteger(_DeprecationTestCase): # Deprecation in NumPy 2.5, 2026-03 diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index f8a2b08997c0..fb2bc126e70a 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5154,15 +5154,15 @@ class TestArgmax: np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), # Assorted tests with NaTs - ([np.datetime64('NaT'), - np.datetime64('NaT'), + ([np.datetime64('NaT', 'D'), + np.datetime64('NaT', 'D'), np.datetime64('2010-01-03T05:14:12'), - np.datetime64('NaT'), + np.datetime64('NaT', 'D'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 0), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), - np.datetime64('NaT'), + np.datetime64('NaT', 'D'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 2), @@ -5298,15 +5298,15 @@ class TestArgmin: np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), # Assorted tests with NaTs - ([np.datetime64('NaT'), - np.datetime64('NaT'), + ([np.datetime64('NaT', 'D'), + np.datetime64('NaT', 'D'), np.datetime64('2010-01-03T05:14:12'), - np.datetime64('NaT'), + np.datetime64('NaT', 'D'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 0), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), - np.datetime64('NaT'), + np.datetime64('NaT', 'D'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 2), diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 24c321348fd6..ef04a241dcc5 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2316,7 +2316,12 @@ def test_correct_hash_dict(self): # gh-8887 - __hash__ would be None despite tp_hash being set all_types = set(np._core.sctypeDict.values()) - {np.void} for t in all_types: - val = t() + if t is np.timedelta64: + val = t(0, 's') + elif t is np.datetime64: + val = t('NAT', 'D') + else: + val = t() try: hash(val) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 3e69d6cffd8f..877ea8b8ffba 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -587,18 +587,18 @@ def test_scalar_comparison_to_none(self): assert_(not np.float32(1) == None) # noqa: E711 assert_(not np.str_('test') == None) # noqa: E711 # This is dubious (see below): - assert_(not np.datetime64('NaT') == None) # noqa: E711 + assert_(not np.datetime64('NaT', 'D') == None) # noqa: E711 assert_(np.float32(1) != None) # noqa: E711 assert_(np.str_('test') != None) # noqa: E711 # This is dubious (see below): - assert_(np.datetime64('NaT') != None) # noqa: E711 + assert_(np.datetime64('NaT', 'D') != None) # noqa: E711 assert_(len(w) == 0) # For documentation purposes, this is why the datetime is dubious. # At the time of deprecation this was no behaviour change, but # it has to be considered when the deprecations are done. - assert_(np.equal(np.datetime64('NaT'), None)) + assert_(np.equal(np.datetime64('NaT', 'D'), None)) #class TestRepr: @@ -663,8 +663,13 @@ def test_seq_repeat(self): # change. accepted_types = set(np.typecodes["AllInteger"]) deprecated_types = {'?'} + datetime_types = set(np.typecodes['Datetime']) forbidden_types = ( - set(np.typecodes["All"]) - accepted_types - deprecated_types) + set(np.typecodes["All"]) + - accepted_types + - deprecated_types + - datetime_types + ) forbidden_types -= {'V'} # can't default-construct void scalars for seq_type in (list, tuple): @@ -684,6 +689,11 @@ def test_seq_repeat(self): assert_raises(TypeError, operator.mul, seq, i) assert_raises(TypeError, operator.mul, i, seq) + for numpy_type in datetime_types: + i = np.dtype(numpy_type).type(1, "D") + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + def test_no_seq_repeat_basic_array_like(self): # Test that an array-like which does not know how to be multiplied # does not attempt sequence repeat (raise TypeError). diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 8f2b654844bc..b3edddab0ae5 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1141,7 +1141,7 @@ def test_center_promoter(): np.datetime64("1923-04-14T12:43:12"), np.datetime64("1994-06-21T14:43:15"), np.datetime64("2001-10-15T04:10:32"), - np.datetime64("NaT"), + np.datetime64("NaT", "D"), np.datetime64("1995-11-25T16:02:16"), np.datetime64("2005-01-04T03:14:12"), np.datetime64("2041-12-03T14:05:03"), @@ -1203,7 +1203,7 @@ def test_nat_casts(): s = 'nat' all_nats = itertools.product(*zip(s.upper(), s.lower())) all_nats = list(map(''.join, all_nats)) - NaT_dt = np.datetime64('NaT') + NaT_dt = np.datetime64('NaT', 'D') NaT_td = np.timedelta64('NaT', 's') for na_object in [np._NoValue, None, np.nan, 'nat', '']: # numpy treats empty string and all case combinations of 'nat' as NaT diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index ef541cf4892b..4ecbeef953c4 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -785,7 +785,7 @@ def test_unique_1d(self): assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) # test for ticket 2111 - datetime64 - nat = np.datetime64('nat') + nat = np.datetime64('nat', 'D') a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] ua_idx = [2, 0, 1] diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 30613f451b60..3dd0ca6857e5 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -428,14 +428,15 @@ def test_datetime(self): def test_nat_items(self): # not a datetime - nadt_no_unit = np.datetime64("NaT") nadt_s = np.datetime64("NaT", "s") nadt_d = np.datetime64("NaT", "ns") + # not a timedelta with pytest.warns( DeprecationWarning, match="The 'generic' unit for NumPy timedelta is deprecated", ): + nadt_no_unit = np.datetime64("NaT") natd_no_unit = np.timedelta64("NaT") natd_s = np.timedelta64("NaT", "s") natd_d = np.timedelta64("NaT", "ns") From d3a73173a9ef321d014e10255b62ace2104af6f2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 20 Apr 2026 09:38:29 +0200 Subject: [PATCH 1653/1718] BUG,DEP: Change logic for ``arr.view()`` to use ``_set_dtype`` (#31234) This modifies the logic for `arr.view()` to: 1. Use `arr._set_dtype(new_dtype)` for subclasses to have a simple way to migrate away from 2. 2. Keep using `arr.dtype = new_dtype` if dtype is a setter property. 3. If neither is defined, we now call `__array_finalize__` with a new array that has the new dtype+shape. (Previously `__array_finalize__` was skipped, a subclass that wanted to know this happened had to implememt a `dtype` attribute setter). The first two should be completely safe, with the only downside that we have to support `_set_dtype()` indefinitely :(. (Maybe a NumPy 3 can switch over to just using step 3 always, but that would be a big transition). OTOH, calling `__array_finalize__` with mismatching dtypes _could_ fail admitted. I.e. `MaskedArrays` would fail for non-trivial cases (same itemsize, no structured dtypes) and even the trivial ones may give "invalid value" warnings due to the `_fill_value`. However, this is also arguably a fix. If a class dealt with it before correctly, it implemented `.dtype` attribute setting. And if not and it stumbles on it, it was likely subtly broken around `dtype` handling with views. --- numpy/_core/src/multiarray/convert.c | 146 +++++++++++++++--- numpy/_core/src/multiarray/getset.c | 30 +--- numpy/_core/src/multiarray/multiarraymodule.c | 12 +- numpy/_core/src/multiarray/npy_static_data.c | 1 + numpy/_core/src/multiarray/npy_static_data.h | 8 + numpy/_core/tests/test_deprecations.py | 24 ++- numpy/_core/tests/test_multiarray.py | 16 ++ numpy/ma/core.py | 23 ++- numpy/ma/tests/test_core.py | 10 +- numpy/ma/tests/test_deprecations.py | 8 + 10 files changed, 220 insertions(+), 58 deletions(-) diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index ccd883f2b0f4..841a873ce839 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -6,6 +6,7 @@ #include #include "npy_config.h" +#include "npy_pycompat.h" // PyObject_GetOptionalAttr #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -24,6 +25,8 @@ #include "convert.h" #include "array_coercion.h" #include "refcount.h" +#include "getset.h" +#include "npy_static_data.h" #if defined(HAVE_FALLOCATE) && defined(__linux__) #include @@ -352,17 +355,17 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) || (PyArray_IS_F_CONTIGUOUS(self) && (order == NPY_FORTRANORDER))) { return PyBytes_FromStringAndSize(PyArray_DATA(self), (Py_ssize_t) numbytes); } - + /* Avoid Ravel where possible for fewer copies. */ - if (!PyDataType_REFCHK(PyArray_DESCR(self)) && + if (!PyDataType_REFCHK(PyArray_DESCR(self)) && ((PyArray_DESCR(self)->flags & NPY_NEEDS_INIT) == 0)) { - + /* Allocate final Bytes Object */ ret = PyBytes_FromStringAndSize(NULL, (Py_ssize_t) numbytes); if (ret == NULL) { return NULL; } - + /* Writable Buffer */ char* dest = PyBytes_AS_STRING(ret); @@ -388,14 +391,14 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) Py_DECREF(ret); return NULL; } - + /* Copy directly from source to destination with proper ordering */ if (PyArray_CopyInto(dest_array, self) < 0) { Py_DECREF(dest_array); Py_DECREF(ret); return NULL; } - + Py_DECREF(dest_array); return ret; @@ -406,7 +409,7 @@ PyArray_ToString(PyArrayObject *self, NPY_ORDER order) if (contig == NULL) { return NULL; } - + ret = PyBytes_FromStringAndSize(PyArray_DATA(contig), numbytes); Py_DECREF(contig); return ret; @@ -545,26 +548,129 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) dtype = PyArray_DESCR(self); flags = PyArray_FLAGS(self); + if (type == NULL) { + /* No dtype change: just create the view */ + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + return (PyObject *)ret; + } + + /* + * Changing dtype on a subclass. We support three paths: + * + * 1. subclass overrides _set_dtype: create subclass view first, + * then call _set_dtype (subclass handles dtype change). + * 2. subclass overrides the dtype descriptor (e.g. property with + * setter): create subclass view first, use the setter, but + * emit a deprecation asking to implement _set_dtype instead. + * 3. Otherwise (including plain ndarray): create an ndarray base + * view, set dtype internally, then create the subclass view + * if needed. __array_finalize__ sees the final dtype+shape. + */ + int use_set_dtype = 0; + int use_dtype_prop = 0; + + if (subtype != &PyArray_Type) { + PyObject *sub_set_dtype; + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, + npy_interned_str._set_dtype, &sub_set_dtype) < 0) { + goto finish; + } + use_set_dtype = (sub_set_dtype != NULL && + sub_set_dtype != npy_static_pydata.ndarray_set_dtype); + Py_XDECREF(sub_set_dtype); + + if (!use_set_dtype) { + PyObject *sub_dtype_descr; + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, + npy_interned_str.dtype, &sub_dtype_descr) < 0) { + goto finish; + } + use_dtype_prop = (sub_dtype_descr != NULL && + sub_dtype_descr != npy_static_pydata.ndarray_dtype_descr && + Py_TYPE(sub_dtype_descr)->tp_descr_set != NULL); + Py_XDECREF(sub_dtype_descr); + } + } + + if (use_set_dtype || use_dtype_prop) { + /* + * Paths 1 & 2: create subclass view with original dtype, + * then let the subclass handle the dtype change. + */ + Py_INCREF(dtype); + ret = (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); + if (ret == NULL) { + goto finish; + } + if (use_set_dtype) { + PyObject *res = PyObject_CallMethodOneArg( + (PyObject *)ret, + npy_interned_str._set_dtype, (PyObject *)type); + if (res == NULL) { + Py_CLEAR(ret); + goto finish; + } + Py_DECREF(res); + } + else { + if (PyObject_GenericSetAttr( + (PyObject *)ret, npy_interned_str.dtype, + (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; + } + /* DEPRECATED 2026-04-13, NumPy 2.5 */ + if (DEPRECATE( + "numpy.ndarray.view() used a custom `dtype` setter " + "to change the dtype of the view. Subclasses should " + "implement `_set_dtype` instead.") < 0) { + Py_CLEAR(ret); + goto finish; + } + } + goto finish; + } + + /* Path 3: create ndarray base view and set dtype internally */ Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescr_int( - subtype, dtype, - PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self), - PyArray_DATA(self), + &PyArray_Type, dtype, + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), PyArray_DATA(self), flags, (PyObject *)self, (PyObject *)self, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); if (ret == NULL) { - Py_XDECREF(type); - return NULL; + goto finish; + } + if (array_descr_set_internal(ret, (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; } - if (type != NULL) { - if (PyObject_SetAttrString((PyObject *)ret, "dtype", - (PyObject *)type) < 0) { - Py_DECREF(ret); - Py_DECREF(type); - return NULL; - } - Py_DECREF(type); + if (subtype != &PyArray_Type) { + Py_INCREF(PyArray_DESCR(ret)); + Py_SETREF(ret, (PyArrayObject *)PyArray_NewFromDescr_int( + subtype, PyArray_DESCR(ret), + PyArray_NDIM(ret), PyArray_DIMS(ret), + PyArray_STRIDES(ret), PyArray_DATA(ret), + PyArray_FLAGS(ret), (PyObject *)self, (PyObject *)self, + _NPY_ARRAY_ENSURE_DTYPE_IDENTITY)); } + +finish: + Py_DECREF(type); return (PyObject *)ret; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index dfe0e962b06e..721d7a73738c 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -491,18 +491,6 @@ array_descr_set_internal(PyArrayObject *self, PyObject *arg) return -1; } -static int -non_unique_reference(PyObject *lhs) -{ - // Return 1 if we have a guaranteed non-unique reference - // When 0 is returned, the object can be unique or non-unique -#if defined(PYPY_VERSION) - // on pypy we cannot use reference counting - return 0; -#endif - return Py_REFCNT(lhs) > 1; -} - static int array_descr_set(PyArrayObject *self, PyObject *arg) { @@ -512,18 +500,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg) return -1; } - if (non_unique_reference((PyObject *)self)) { - // this will not emit deprecation warnings for all cases, but for most it will - // we skip unique references, so that we will not get a deprecation warning - // when array.view(new_dtype) is called - /* DEPRECATED 2026-02-06, NumPy 2.5 */ - int ret = PyErr_WarnEx(PyExc_DeprecationWarning, - "Setting the dtype on a NumPy array has been deprecated in NumPy 2.5.\n" - "Instead of changing the dtype on an array x, create a new array with x.view(new_dtype)", - 1); - if (ret) { - return -1; - } + /* DEPRECATED 2026-02-06, NumPy 2.5 */ + if (DEPRECATE( + "Setting the dtype on a NumPy array has been deprecated in NumPy 2.5.\n" + "Instead of changing the dtype on an array x, create a new array " + "with x.view(new_dtype)") < 0) { + return -1; } return array_descr_set_internal(self, arg); } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 359793bec0d8..eb97b0ff267d 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1983,7 +1983,7 @@ array_copyto(PyObject *NPY_UNUSED(ignored), PyArray_DTypeMeta *dst_DType = NPY_DTYPE(PyArray_DESCR(dst)); bool is_npy_nan = PyFloat_Check(src_obj) && npy_isnan(PyFloat_AsDouble(src_obj)); if (!is_npy_nan && dst_DType->type_num == NPY_TIMEDELTA) { - descr = PyArray_DESCR(dst); + descr = PyArray_DESCR(dst); Py_INCREF(descr); } else { descr = npy_find_descr_for_scalar(src_obj, PyArray_DESCR(src), DType, @@ -5229,6 +5229,16 @@ _multiarray_umath_exec(PyObject *m) { if (npy_static_pydata.ndarray_array_function == NULL) { return -1; } + npy_static_pydata.ndarray_set_dtype = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "_set_dtype"); + if (npy_static_pydata.ndarray_set_dtype == NULL) { + return -1; + } + npy_static_pydata.ndarray_dtype_descr = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "dtype"); + if (npy_static_pydata.ndarray_dtype_descr == NULL) { + return -1; + } /* * Initialize np.dtypes.StringDType diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index bb5c23a59d3c..40dfc2dddaf7 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -74,6 +74,7 @@ intern_strings(void) INTERN_STRING(imag, "imag"); INTERN_STRING(sort, "sort"); INTERN_STRING(argsort, "argsort"); + INTERN_STRING(_set_dtype, "_set_dtype"); return 0; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 0411f969f678..abf98b5f0c09 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -53,6 +53,7 @@ typedef struct npy_interned_str_struct { PyObject *imag; PyObject *sort; PyObject *argsort; + PyObject *_set_dtype; } npy_interned_str_struct; /* @@ -90,6 +91,13 @@ typedef struct npy_static_pydata_struct { PyObject *ndarray_array_finalize; PyObject *ndarray_array_function; + /* + * References to ndarray._set_dtype and ndarray.dtype descriptor, + * used in PyArray_View to detect subclass overrides. + */ + PyObject *ndarray_set_dtype; + PyObject *ndarray_dtype_descr; + /* * References to the '1' and '0' PyLong objects */ diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 110c29d1611a..0a579957e214 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -11,7 +11,7 @@ import numpy as np from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 -from numpy.testing import IS_PYPY, assert_raises +from numpy.testing import assert_raises class _DeprecationTestCase: @@ -243,6 +243,7 @@ def __array_wrap__(self, arr): self.assert_deprecated(lambda: np.negative(test2)) assert test2.called + class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): message = "Setting the .*on a NumPy array has been deprecated.*" @@ -250,7 +251,6 @@ def test_deprecated_strides_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) - @pytest.mark.skipif(IS_PYPY, reason="PyPy handles refcounts differently") def test_deprecated_dtype_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, "dtype", int)) @@ -259,6 +259,26 @@ def test_deprecated_shape_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) + +class TestDeprecatedViewDtypePropertySetter(_DeprecationTestCase): + # view() with dtype change on a subclass that overrides the + # dtype property should warn to implement _set_dtype instead. + message = r"numpy.ndarray.view\(\) used a custom `dtype` setter.*" + + def test_view_dtype_property_setter(self): + class MyArray(np.ndarray): + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MyArray, type(self))._set_dtype(self, dtype) + + arr = np.arange(6).view(MyArray) + self.assert_deprecated(arr.view, args=(np.float64,)) + + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index fb2bc126e70a..dcb6ea192b3e 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6605,6 +6605,22 @@ def test_basic(self): assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) + def test_view_dtype_change_subclass_finalize(self): + # gh-31192: view() with dtype change on a subclass must call + # __array_finalize__ and return the correct subclass type. + + class MyArray(np.ndarray): + def __array_finalize__(self, obj): + self.finalized_from = obj + self._dtype_at_finalize = self.dtype + + arr = np.arange(6).view(MyArray) + result = arr.view("i1") + assert isinstance(result, MyArray) + assert result.dtype == np.dtype("i1") + assert result._dtype_at_finalize == np.dtype("i1") + assert result.finalized_from is arr + def _mean(a, **args): return a.mean(**args) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 3bcf21885b83..560c7dcefdcf 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3488,6 +3488,15 @@ def __setitem__(self, indx, value): _mask[indx] = mindx return + def _set_dtype(self, dtype): + super()._set_dtype(dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + try: + self._mask = self._mask.reshape(self.shape) + except (AttributeError, TypeError): + pass + # Define so that we can overwrite the setter. @property def dtype(self): @@ -3495,15 +3504,13 @@ def dtype(self): @dtype.setter def dtype(self, dtype): + # DEPRECATED 2026-02-06, NumPy 2.5 + warnings.warn( + "Setting the dtype on a MaskedArray has been deprecated in " + "NumPy 2.5.\nInstead of changing the dtype on an array x, " + "create a new array with x.view(new_dtype)", + DeprecationWarning, stacklevel=2) self._set_dtype(dtype) - if self._mask is not nomask: - self._mask = self._mask.view(make_mask_descr(dtype), ndarray) - # Try to reset the shape of the mask (if we don't have a void). - # This raises a ValueError if the dtype change won't work. - try: - self._mask = self._mask.reshape(self.shape) - except (AttributeError, TypeError): - pass @property def shape(self): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2dc7fe5a9b17..df26ce13c5fa 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2242,7 +2242,8 @@ def test_assign_dtype(self): a = np.zeros(4, dtype='f4,i4') m = np.ma.array(a) - m.dtype = np.dtype('f4') + with pytest.warns(DeprecationWarning, match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f4') repr(m) # raises? assert_equal(m.dtype, np.dtype('f4')) @@ -2250,7 +2251,9 @@ def test_assign_dtype(self): # are not allowed def assign(): m = np.ma.array(a) - m.dtype = np.dtype('f8') + with pytest.warns(DeprecationWarning, + match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f8') assert_raises(ValueError, assign) b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? @@ -2259,7 +2262,8 @@ def assign(): # check that nomask is preserved a = np.zeros(4, dtype='f4') m = np.ma.array(a) - m.dtype = np.dtype('f4,i4') + with pytest.warns(DeprecationWarning, match="Setting the dtype.*MaskedArray"): + m.dtype = np.dtype('f4,i4') assert_equal(m.dtype, np.dtype('f4,i4')) assert_equal(m._mask, np.ma.nomask) diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 07120b198bea..88b87486f59e 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -63,3 +63,11 @@ def test_axis_default(self): result = ma_max(data1d) assert_equal(result, ma_max(data1d, axis=None)) assert_equal(result, ma_max(data1d, axis=0)) + + +class TestDtypeSet: + def test_deprecated_dtype_set(self): + # gh-31192: setting dtype on a MaskedArray should emit DeprecationWarning + x = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float64) + with pytest.warns(DeprecationWarning, match="Setting the dtype"): + x.dtype = np.int64 From c7c1d23b13282f77403686a83902cefe2c26333a Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Mon, 20 Apr 2026 16:57:20 +0200 Subject: [PATCH 1654/1718] DOC/TYP: Improved typing docs for ``ndarray`` (#31277) --- numpy/typing/__init__.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index c46a381533ab..04b26d0faf64 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -64,10 +64,24 @@ ndarray ~~~~~~~ -It's possible (but deprecated) to mutate the dtype of an array at runtime. -This sort of mutation is not allowed by the types. Users who want to -write statically typed code should instead use the `numpy.ndarray.view` -method to create a view of the array with a different dtype. +The `numpy.ndarray` class is a `generic type`_ that accepts two type arguments: + +1. The type of `numpy.ndarray.shape`, which must be a `tuple` of `int`, e.g. + ``tuple[int, int]`` (2-D shape) or ``tuple[()]`` (0-D shape). + The default shape is ``tuple[Any, ...]``, which represents an unknown shape with + *any* number of dimensions. + Currently, ``Literal`` ints or other more specific types are not supported. +2. The type of `numpy.ndarray.dtype`, which must be a subtype of `numpy.dtype` such as + ``numpy.dtype[numpy.float64]``. If omitted, it will default to ``numpy.dtype[Any]``. + +.. code-block:: python + + >>> import numpy as np + + >>> type ImageRGB = np.ndarray[tuple[int, int, int], np.dtype[np.uint8]] + >>> type Vector[S: np.generic] = np.ndarray[tuple[int], np.dtype[S]] + +.. _generic type: https://typing.python.org/en/latest/spec/generics.html DTypeLike ~~~~~~~~~ From d0054199c2e45e1dd4432fdb5f9fdf4cc7abfdc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:13:54 +0000 Subject: [PATCH 1655/1718] MAINT: Bump actions/cache from 5.0.4 to 5.0.5 Bumps [actions/cache](https://github.com/actions/cache) from 5.0.4 to 5.0.5. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/668228422ae6a00e4ad889ee87cd7109ec5666a7...27d5ce7f107fe9357f9df03efb73ab90386fccae) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 4 ++-- .github/workflows/macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index f71eb304a9bb..91893520c2d6 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -82,7 +82,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -192,7 +192,7 @@ jobs: sudo apt install -y ninja-build gcc-14-${TOOLCHAIN_NAME} g++-14-${TOOLCHAIN_NAME} gfortran-14-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 9f59bb9f7002..8bc040a73fe5 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -50,7 +50,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -74,7 +74,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@668228422ae6a00e4ad889ee87cd7109ec5666a7 # v5.0.4 + uses: actions/cache@27d5ce7f107fe9357f9df03efb73ab90386fccae # v5.0.5 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 91c5a4f198d77bd546f423eabd15581b967422f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 18:15:53 -0600 Subject: [PATCH 1656/1718] MAINT: Bump pyrefly from 0.60.1 to 0.61.0 in /requirements (#31289) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 27ef828fbb94..7e02a17db1ea 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.0 -pyrefly==0.60.1 +pyrefly==0.61.0 From de10b50273e0f5e98834422b48ac897942464758 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 21 Apr 2026 02:17:05 +0200 Subject: [PATCH 1657/1718] DOC/TYP: remove outdated subsection on 0D arrays (#31288) --- numpy/typing/__init__.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 04b26d0faf64..11d57f814551 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -157,20 +157,6 @@ def phase(x: np.complexfloating) -> np.floating: `~numpy.signedinteger`, the former only inheriting from `~numpy.generic` while static type checking. -0D arrays -~~~~~~~~~ - -During runtime numpy aggressively casts any passed 0D arrays into their -corresponding `~numpy.generic` instance. Until the introduction of shape -typing (see :pep:`646`) it is unfortunately not possible to make the -necessary distinction between 0D and >0D arrays. While thus not strictly -correct, all operations that can potentially perform a 0D-array -> scalar -cast are currently annotated as exclusively returning an `~numpy.ndarray`. - -If it is known in advance that an operation *will* perform a -0D-array -> scalar cast, then one can consider manually remedying the -situation with either `typing.cast` or a ``# type: ignore`` comment. - Record array dtypes ~~~~~~~~~~~~~~~~~~~ From 388bd75829842d8230147f57eba5f86bb3ad6c3c Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 21 Apr 2026 16:57:21 +0200 Subject: [PATCH 1658/1718] DEP: Correct dtype in `__array_finalize__` if _set_dtype=None (#31293) This avoids using the correct dtype in array_finalize unless _set_dtype = None is set on the class. We also use this new path to make record arrays follow the main class deprecation correctly. This means that recarray does live in the future. --- .../upcoming_changes/31293.deprecation.rst | 20 ++++++++++ numpy/_core/records.py | 14 +++---- numpy/_core/src/multiarray/convert.c | 37 ++++++++++++++----- numpy/_core/tests/test_deprecations.py | 4 ++ numpy/_core/tests/test_multiarray.py | 2 + numpy/_core/tests/test_records.py | 3 +- 6 files changed, 62 insertions(+), 18 deletions(-) create mode 100644 doc/release/upcoming_changes/31293.deprecation.rst diff --git a/doc/release/upcoming_changes/31293.deprecation.rst b/doc/release/upcoming_changes/31293.deprecation.rst new file mode 100644 index 000000000000..f3dbf5e4b433 --- /dev/null +++ b/doc/release/upcoming_changes/31293.deprecation.rst @@ -0,0 +1,20 @@ +Custom ``dtype`` property and ``__array_finalize__`` +---------------------------------------------------- +Previously ``arr.view(dtype=new_dtype)`` called ``arr.dtype = new_dtype`` +also for subclasses (i.e. the attribute setting). +This path is now deprecated and refined, meaning that even subclasses +that do not see this ``DeprecationWarning`` may wish to update their code. + +A subclass that does any ``dtype`` specific logic (i.e. verifying the dtype +in ``__array_finalize__`` or has a ``dtype`` property) should now: + +* Set ``_set_dtype = None`` in which case ``arr.view(dtype=new_dtype)`` + will call ``__array_finalize__`` with the new dtype, ensuring that + any validation ``__array_finalize__`` will run is done. +* Or, for a quick fix, define ``_set_dtype`` as a function (calling + ``ndarray._set_dtype()`` to avoid ``DeprecationWarnings``. + (Future versions might migrate towards the ``_set_dtype = None`` path.) + +Ideally, follow NumPy's deprecation to prevent ``dtype`` mutation by users. +The use of ``ndarray._set_dtype()`` may be necessary for some subclass +finalization patterns, but should otherwise be avoided. diff --git a/numpy/_core/records.py b/numpy/_core/records.py index dbf84efcdd56..b531e2a886f4 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -402,11 +402,15 @@ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, ) return self + _set_dtype = None # __array_finalize__ can deal with dtype changes + def __array_finalize__(self, obj): - if self.dtype.type is not record and self.dtype.names is not None: + if (self.dtype.type is not record and + issubclass(self.dtype.type, nt.void) and + self.dtype.names is not None): # if self.dtype is not np.record, invoke __setattr__ which will # convert it to a record if it is a void dtype. - self.dtype = self.dtype + ndarray._set_dtype(self, sb.dtype((record, self.dtype))) def __getattribute__(self, attr): # See if ndarray has this attr, and return it if so. (note that this @@ -455,11 +459,7 @@ def __setattr__(self, attr, val): newattr = attr not in self.__dict__ try: - if attr == 'dtype': - # gh-29244 - ret = self._set_dtype(val) - else: - ret = object.__setattr__(self, attr, val) + ret = object.__setattr__(self, attr, val) except Exception: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 841a873ce839..88056930f11a 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -561,17 +561,24 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) } /* - * Changing dtype on a subclass. We support three paths: + * Changing dtype on a subclass. We support 4 paths: * * 1. subclass overrides _set_dtype: create subclass view first, * then call _set_dtype (subclass handles dtype change). + * If _set_dtype is set to None, we use path 3 below. * 2. subclass overrides the dtype descriptor (e.g. property with * setter): create subclass view first, use the setter, but * emit a deprecation asking to implement _set_dtype instead. - * 3. Otherwise (including plain ndarray): create an ndarray base - * view, set dtype internally, then create the subclass view - * if needed. __array_finalize__ sees the final dtype+shape. + * 3. If _set_dtype is None: create an ndarray base view, set dtype + * internally, then create the subclass view if needed: + * __array_finalize__ sees the final dtype+shape. + * 4. If _set_dtype (and dtype) are not set, call `__array_finalize__` + * with the old dtype and forcibly update the dtype (a subclass will be + * unaware of the change) which is the unfortunate historic behavior. + * + * (Base class ndarray has no __array_finalize__ so effectively uses path 4.) */ + int use_dtype_in_finalize = 0; int use_set_dtype = 0; int use_dtype_prop = 0; @@ -582,11 +589,16 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) npy_interned_str._set_dtype, &sub_set_dtype) < 0) { goto finish; } - use_set_dtype = (sub_set_dtype != NULL && - sub_set_dtype != npy_static_pydata.ndarray_set_dtype); + if (sub_set_dtype == Py_None) { + use_dtype_in_finalize = 1; + } + else { + use_set_dtype = (sub_set_dtype != NULL && + sub_set_dtype != npy_static_pydata.ndarray_set_dtype); + } Py_XDECREF(sub_set_dtype); - if (!use_set_dtype) { + if (!use_set_dtype && !use_dtype_in_finalize) { PyObject *sub_dtype_descr; if (PyObject_GetOptionalAttr( (PyObject *)subtype, @@ -644,10 +656,10 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) goto finish; } - /* Path 3: create ndarray base view and set dtype internally */ + /* Path 3+4: create view and set dtype internally */ Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescr_int( - &PyArray_Type, dtype, + use_dtype_in_finalize ? &PyArray_Type : subtype, dtype, PyArray_NDIM(self), PyArray_DIMS(self), PyArray_STRIDES(self), PyArray_DATA(self), flags, (PyObject *)self, (PyObject *)self, @@ -660,7 +672,12 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) goto finish; } - if (subtype != &PyArray_Type) { + /* + * Path 3: `_set_dtype is None` and `ret` is a base-class array + * with correct dtype+shape, this will call `__array_finalize__` + * with the final dtype+shape. + */ + if (use_dtype_in_finalize) { Py_INCREF(PyArray_DESCR(ret)); Py_SETREF(ret, (PyArrayObject *)PyArray_NewFromDescr_int( subtype, PyArray_DESCR(ret), diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 0a579957e214..27a5b4c4c9ba 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -255,6 +255,10 @@ def test_deprecated_dtype_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, "dtype", int)) + def test_deprecated_dtype_set_record(self): + x = np.zeros(2, dtype="i4,i4").view(np.recarray) + self.assert_deprecated(setattr, args=(x, "dtype", np.dtype("f4,f4"))) + def test_deprecated_shape_set(self): x = np.eye(2) self.assert_deprecated(setattr, args=(x, "shape", (4, 1))) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index dcb6ea192b3e..9dce25e36298 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -6610,6 +6610,8 @@ def test_view_dtype_change_subclass_finalize(self): # __array_finalize__ and return the correct subclass type. class MyArray(np.ndarray): + _set_dtype = None + def __array_finalize__(self, obj): self.finalized_from = obj self._dtype_at_finalize = self.dtype diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 80f76a865eda..9387e8aa9a83 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -497,7 +497,8 @@ def test_assign_dtype_attribute(self, nfields): assert dt.type != np.record # ensure that the dtype remains a record even when assigned - data.dtype = dt + with pytest.warns(DeprecationWarning, match="Setting the dtype"): + data.dtype = dt assert data.dtype.type == np.record @pytest.mark.parametrize('nfields', [0, 1, 2]) From 95dfe3da93d0a74d93fca067011d1864a391ffcc Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 22 Apr 2026 13:12:01 +0200 Subject: [PATCH 1659/1718] DOC: wrong underline length and closing backtick (#31300) --- numpy/f2py/crackfortran.py | 7 +++++-- numpy/random/_mt19937.pyx | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 1f89aba8a841..6d2cf4f34cc7 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -97,8 +97,11 @@ character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) (see also fortran type declaration statement formats below) -Fortran 90 type declaration statement format (F77 is subset of F90) -==================================================================== +Fortran 90 type declaration statement format +============================================ + +F77 is subset of F90 + (Main source: IBM XL Fortran 5.1 Language Reference Manual) type declaration = [[]::] = byte | diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index c74498356dda..422b9d0b4fa5 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -155,7 +155,7 @@ cdef class MT19937(BitGenerator): seed : {None, int, array_like} Random seed initializing the pseudo-random number generator. Can be an integer in [0, 2**32-1], array of integers in - [0, 2**32-1], a `SeedSequence, or ``None``. If `seed` + [0, 2**32-1], a `SeedSequence`, or ``None``. If `seed` is ``None``, then fresh, unpredictable entropy will be pulled from the OS. From c1f6b30d39ef2648789db51a8493d06d1363695b Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 21 Apr 2026 21:18:24 -0600 Subject: [PATCH 1660/1718] MAINT: Update dependabot configuration Dependabot is not updating pytest even though pytest 9.0.3 was released April 7. Grok suggests this is a known dependabot problem with multiple files pinning the same dependency and suggests using groups to help it along. The same problem doesn't appear for actions, just the pip ecosystem. [skip azp] [skip actions] [skip cirrus] --- .github/dependabot.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ceebf268351d..4d1e5f9161df 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ updates: schedule: interval: daily cooldown: - default-days: 7 + default-days: 7 # optional commit-message: prefix: "MAINT" labels: @@ -17,7 +17,12 @@ updates: schedule: interval: daily cooldown: - default-days: 7 + default-days: 7 # optional + groups: + python-deps: + patterns: + - "*" # catches EVERY dependency in ALL requirements files + applies-to: "version-updates" commit-message: prefix: "MAINT" labels: From fa558407446c9bd2f37e0e25fac200e0156a9c80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2026 09:44:00 -0600 Subject: [PATCH 1661/1718] MAINT: Bump github/codeql-action from 4.35.1 to 4.35.2 (#31305) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 7ac309dfff1a..d8fe3a1f47b7 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 + uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 + uses: github/codeql-action/autobuild@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 + uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 6812aceca602..bff4c54758a2 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v2.1.27 + uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v2.1.27 with: sarif_file: results.sarif From e514ba849d8a6c27864e6eb44a296ffdfd43ede1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2026 09:56:44 -0600 Subject: [PATCH 1662/1718] MAINT: Bump the python-deps group in /requirements with 7 updates (#31306) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/build_requirements.txt | 4 ++-- requirements/doc_requirements.txt | 8 ++++---- requirements/linter_requirements.txt | 2 +- requirements/release_requirements.txt | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 18db99508a09..0c1aeec29cfb 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ -meson-python>=0.13.1 -Cython>=3.0.6 +meson-python>=0.19.0 +Cython>=3.2.4 ninja spin build diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index ea75103117a3..b08b64b78a6d 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,13 +1,13 @@ # doxygen required, use apt-get or dnf sphinx==7.2.6 numpydoc==1.10.0 -pydata-sphinx-theme>=0.15.2 +pydata-sphinx-theme>=0.16.1 sphinx-copybutton sphinx-design scipy matplotlib!=3.10.6 pandas -breathe>4.33.0 +breathe>=4.36.0 ipython!=8.1.0 # Needed for ipython>=8.17 # https://github.com/ipython/ipython/issues/14237 @@ -17,10 +17,10 @@ pickleshare towncrier toml -scipy-doctest>=1.8.0 +scipy-doctest>=2.2.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility -jupyterlite-sphinx>=0.18.0 +jupyterlite-sphinx>=0.22.1 # Works with Pyodide 0.27.1 jupyterlite-pyodide-kernel==0.5.2 diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 5563a31fdcc9..e60446249ea6 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint ruff==0.15.10 -GitPython>=3.1.30 +GitPython>=3.1.46 spin diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index 55079d795ed9..8462e8dedf14 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -3,7 +3,7 @@ # changelog.py pygithub -gitpython>=3.1.30 +gitpython>=3.1.46 # uploading release documentation packaging From 1794bb75675108eda13ac61c029a0a8580882fde Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 22 Apr 2026 09:15:24 -0600 Subject: [PATCH 1663/1718] MAINT: Use Python 3.12 for dependabot pip updates. Seems pytest has dropped support for older python version. Make dependabot use a newer version. This is only needed for the pip ecosystem. [skip azp] [skip cirrus] [skip actions] --- .github/dependabot.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4d1e5f9161df..df242f33f5e8 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,6 +16,7 @@ updates: directory: /requirements schedule: interval: daily + python-version: "3.12" # Dependencies are requiring newer python cooldown: default-days: 7 # optional groups: From e02facfc8fd2f70d7ea77e6a3405268a8fbd7d9d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 22 Apr 2026 20:22:22 +0200 Subject: [PATCH 1664/1718] DOC: Extra open parens in :math: polyvander (#31310) --- numpy/polynomial/polynomial.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 19cb12b9d02c..886cac65dfc3 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1229,7 +1229,7 @@ def polyvander2d(x, y, deg): ------- vander2d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same as the converted `x` and `y`. See Also @@ -1309,7 +1309,7 @@ def polyvander3d(x, y, z, deg): ------- vander3d : ndarray The shape of the returned matrix is ``x.shape + (order,)``, where - :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will be the same as the converted `x`, `y`, and `z`. See Also From 9546c7fe4d81a0a02ae224aa60a3c493db2ba92a Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 22 Apr 2026 13:02:54 -0600 Subject: [PATCH 1665/1718] REV: "MAINT: Use Python 3.12 for dependabot pip updates." (#31311) --- .github/dependabot.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index df242f33f5e8..4d1e5f9161df 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,7 +16,6 @@ updates: directory: /requirements schedule: interval: daily - python-version: "3.12" # Dependencies are requiring newer python cooldown: default-days: 7 # optional groups: From b7282420a6bea2c4c15b05f7300890a47460c796 Mon Sep 17 00:00:00 2001 From: riku-sakamoto <46015196+riku-sakamoto@users.noreply.github.com> Date: Thu, 23 Apr 2026 09:38:32 +0900 Subject: [PATCH 1666/1718] DOC: update datetime documentation to explain that "NaT" also requires specific unit (#31282) --- doc/source/reference/arrays.datetime.rst | 50 +++++++++++++++++------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 1d8201bbce1b..9adbe82e2728 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -95,8 +95,8 @@ as ``now``. NAT (not a time): - >>> np.datetime64('nat') - np.datetime64('NaT') + >>> np.datetime64('nat', 'D') + np.datetime64('NaT', 'D') The current time (UTC, default second precision): @@ -118,6 +118,12 @@ When creating an array of datetimes from a string, it is still possible to automatically select the unit from the inputs, by using the datetime type with generic units. +.. deprecated:: 2.5 + The generic units of `timedelta64` were deprecated in NumPy 2.5 and + will raise an error in the future. With this change, + ``NaT`` in `datetime64` is now required to have an explicit time unit. + + .. admonition:: Example .. try_examples:: @@ -218,8 +224,8 @@ data type also accepts the string "NAT" in place of the number for a "Not A Time >>> np.timedelta64(4, 'h') np.timedelta64(4,'h') - >>> np.timedelta64('nAt') - np.timedelta64('NaT') + >>> np.timedelta64('nAt', 'D') + np.timedelta64('NaT', 'D') Datetimes and Timedeltas work together to provide ways for simple datetime calculations. @@ -245,11 +251,11 @@ simple datetime calculations. >>> np.timedelta64(1,'W') % np.timedelta64(10,'D') np.timedelta64(7,'D') - >>> np.datetime64('nat') - np.datetime64('2009-01-01') + >>> np.datetime64('nat', 'D') - np.datetime64('2009-01-01') np.timedelta64('NaT','D') - >>> np.datetime64('2009-01-01') + np.timedelta64('nat') - np.datetime64('NaT') + >>> np.datetime64('2009-01-01') + np.timedelta64('nat', 'D') + np.datetime64('NaT', 'D') There are two Timedelta units ('Y', years and 'M', months) which are treated specially, because how much time they represent changes depending @@ -285,7 +291,8 @@ The Datetime and Timedelta data types support a large number of time units, as well as generic units which can be coerced into any of the other units based on input data. The generic units are deprecated since NumPy 2.5 -and will raise an error in the future. Migration guidance is provided in the `migration guide for deprecation of generic units`_ section below. +and will raise an error in the future. Migration guidance is provided +in the `migration guide for deprecation of generic units`_ section below. Datetimes are always stored with an epoch of 1970-01-01T00:00. This means the supported dates are @@ -349,7 +356,7 @@ The protocol is described in the following table: .. deprecated:: 2.5 - The generic units of `timedelta64` are deprecated since NumPy 2.5 and + The generic units of `timedelta64` are deprecated in NumPy 2.5 and will raise an error in the future. @@ -359,10 +366,10 @@ The protocol is described in the following table: >>> import numpy as np - >>> type(np.datetime64('NaT').item()) + >>> type(np.datetime64('NaT', 'D').item()) - >>> type(np.timedelta64('NaT').item()) + >>> type(np.timedelta64('NaT', 'D').item()) >>> type(np.timedelta64(123, 'ns').item()) @@ -375,7 +382,9 @@ The protocol is described in the following table: -In the case where conversion of `datetime64` and/or `timedelta64` is done against Python types like ``int``, ``float``, and ``str`` the corresponding return types will be ``np.str_``, ``np.int64`` and ``np.float64``. +In the case where conversion of `datetime64` and/or `timedelta64` is done +against Python types like ``int``, ``float``, and ``str`` the corresponding return types +will be ``np.str_``, ``np.int64`` and ``np.float64``. .. admonition:: Example @@ -653,9 +662,12 @@ Migration guide for deprecation of generic units The generic units of `timedelta64` are deprecated since NumPy 2.5 and will raise an error in the future. This section provides guidance on how to update code -that uses generic units of `timedelta64` to avoid future errors. +that uses generic units of `timedelta64` and `datetime64` to avoid future errors. -The straight forward way is to replace the generic unit with a specific time unit such as 'D' (day), 'h' (hour), 'm' (minute), 's' (second), etc. The choice of the specific time unit will depend on the context of your code and the level of precision you require. +The straight forward way is to replace the generic unit with a specific time unit +such as 'D' (day), 'h' (hour), 'm' (minute), 's' (second), etc. +The choice of the specific time unit will depend on the context of your code and +the level of precision you require. .. admonition:: Example @@ -686,3 +698,13 @@ The straight forward way is to replace the generic unit with a specific time uni >>> arr = np.ones(5, dtype='m8[s]') >>> np.testing.assert_allclose(arr, np.timedelta64(1, "s"), atol=np.timedelta64(0, "s")) + + Previously, `datetime64` allowed `NaT` without specifying a time unit. + With the deprecation of generic units, `NaT` is now required to have an explicit time unit + for consistency with other datetime and timedelta operations. + + >>> np.datetime64("NAT") + DeprecationWarning: The 'generic' unit for NumPy timedelta is deprecated, and will raise an error in the future. This includes implicit conversion of bare integers (e.g. `+ 1`).Please use a specific unit instead. + + >>> np.datetime64("NAT", "D") + np.datetime64('NaT', 'D') From 065874ccab620c3d467fe99854dad707f2000faa Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 23 Apr 2026 02:40:42 +0200 Subject: [PATCH 1667/1718] DOC/TYP: Move ``ndarray`` typing docs to a more appropriate section (#31313) --- numpy/typing/__init__.py | 43 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 11d57f814551..59285c37bcbc 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -61,28 +61,6 @@ >>> np.array(array_like) array( at ...>, dtype=object) -ndarray -~~~~~~~ - -The `numpy.ndarray` class is a `generic type`_ that accepts two type arguments: - -1. The type of `numpy.ndarray.shape`, which must be a `tuple` of `int`, e.g. - ``tuple[int, int]`` (2-D shape) or ``tuple[()]`` (0-D shape). - The default shape is ``tuple[Any, ...]``, which represents an unknown shape with - *any* number of dimensions. - Currently, ``Literal`` ints or other more specific types are not supported. -2. The type of `numpy.ndarray.dtype`, which must be a subtype of `numpy.dtype` such as - ``numpy.dtype[numpy.float64]``. If omitted, it will default to ``numpy.dtype[Any]``. - -.. code-block:: python - - >>> import numpy as np - - >>> type ImageRGB = np.ndarray[tuple[int, int, int], np.dtype[np.uint8]] - >>> type Vector[S: np.generic] = np.ndarray[tuple[int], np.dtype[S]] - -.. _generic type: https://typing.python.org/en/latest/spec/generics.html - DTypeLike ~~~~~~~~~ @@ -176,6 +154,27 @@ def phase(x: np.complexfloating) -> np.floating: API --- +.. rubric:: ndarray + +The `numpy.ndarray` class is a `generic type`_ that accepts two type arguments: + +1. The type of `numpy.ndarray.shape`, which must be a `tuple` of `int`, e.g. + ``tuple[int, int]`` (2-D shape) or ``tuple[()]`` (0-D shape). + The default shape is ``tuple[Any, ...]``, which represents an unknown shape with + *any* number of dimensions. + Currently, ``Literal`` ints or other more specific types are not supported. +2. The type of `numpy.ndarray.dtype`, which must be a subtype of `numpy.dtype` such as + ``numpy.dtype[numpy.float64]``. If omitted, it will default to ``numpy.dtype[Any]``. + +.. code-block:: python + + >>> import numpy as np + + >>> type ImageRGB = np.ndarray[tuple[int, int, int], np.dtype[np.uint8]] + >>> type Vector[S: np.generic] = np.ndarray[tuple[int], np.dtype[S]] + +.. _generic type: https://typing.python.org/en/latest/spec/generics.html + """ # NOTE: The API section will be appended with additional entries # further down in this file From 3883a82b99418152aabcb0116c18ccaa36515e81 Mon Sep 17 00:00:00 2001 From: Guido Imperiale Date: Thu, 23 Apr 2026 01:46:20 +0100 Subject: [PATCH 1668/1718] Remove LICENSE workaround from pixi recipes (#31304) --- .github/workflows/pixi-packages.yml | 2 +- pixi-packages/asan/LICENSE.txt | 30 -------------------- pixi-packages/asan/pixi.toml | 7 ++--- pixi-packages/default/LICENSE.txt | 30 -------------------- pixi-packages/default/pixi.toml | 7 ++--- pixi-packages/freethreading/LICENSE.txt | 30 -------------------- pixi-packages/freethreading/pixi.toml | 7 ++--- pixi-packages/tsan-freethreading/LICENSE.txt | 30 -------------------- pixi-packages/tsan-freethreading/pixi.toml | 7 ++--- 9 files changed, 9 insertions(+), 141 deletions(-) delete mode 100644 pixi-packages/asan/LICENSE.txt delete mode 100644 pixi-packages/default/LICENSE.txt delete mode 100644 pixi-packages/freethreading/LICENSE.txt delete mode 100644 pixi-packages/tsan-freethreading/LICENSE.txt diff --git a/.github/workflows/pixi-packages.yml b/.github/workflows/pixi-packages.yml index 8408ae032bd3..dea9280b0f8f 100644 --- a/.github/workflows/pixi-packages.yml +++ b/.github/workflows/pixi-packages.yml @@ -40,7 +40,7 @@ jobs: - uses: prefix-dev/setup-pixi@1b2de7f3351f171c8b4dfeb558c639cb58ed4ec0 # v0.9.5 with: - pixi-version: v0.64.0 + pixi-version: v0.66.0 run-install: false - name: Build diff --git a/pixi-packages/asan/LICENSE.txt b/pixi-packages/asan/LICENSE.txt deleted file mode 100644 index f37a12cc4ccc..000000000000 --- a/pixi-packages/asan/LICENSE.txt +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2005-2025, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/asan/pixi.toml b/pixi-packages/asan/pixi.toml index afe9edc50cba..a71b4b740c48 100644 --- a/pixi-packages/asan/pixi.toml +++ b/pixi-packages/asan/pixi.toml @@ -2,10 +2,7 @@ channels = ["https://prefix.dev/conda-forge"] platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] - -[package] -# Work-around to https://github.com/prefix-dev/pixi/issues/5557 -license-file = "LICENSE.txt" +requires-pixi = ">=0.66.0" [package.build] source.path = "../.." @@ -23,7 +20,7 @@ extra-args = ["-Csetup-args=-Db_sanitize=address", "-Csetup-args=-Dbuildtype=deb [package.host-dependencies] python.git = "https://github.com/python/cpython" python.subdirectory = "Tools/pixi-packages/asan" -python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 +python.rev = "55ea59e7dc35e1363b203ae4dd9cfc3a0ac0a844" # v3.15.0a8 meson-python = "*" cython = "*" diff --git a/pixi-packages/default/LICENSE.txt b/pixi-packages/default/LICENSE.txt deleted file mode 100644 index f37a12cc4ccc..000000000000 --- a/pixi-packages/default/LICENSE.txt +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2005-2025, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/default/pixi.toml b/pixi-packages/default/pixi.toml index e3e201bece3e..0c17b5ba059f 100644 --- a/pixi-packages/default/pixi.toml +++ b/pixi-packages/default/pixi.toml @@ -2,10 +2,7 @@ channels = ["https://prefix.dev/conda-forge"] platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] - -[package] -# Work-around to https://github.com/prefix-dev/pixi/issues/5557 -license-file = "LICENSE.txt" +requires-pixi = ">=0.66.0" [package.build] source.path = "../.." @@ -31,7 +28,7 @@ python = "*" # Use cpython git tip # python.git = "https://github.com/python/cpython" # python.subdirectory = "Tools/pixi-packages/default" -# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 +# python.rev = "55ea59e7dc35e1363b203ae4dd9cfc3a0ac0a844" # v3.15.0a8 meson-python = "*" cython = "*" diff --git a/pixi-packages/freethreading/LICENSE.txt b/pixi-packages/freethreading/LICENSE.txt deleted file mode 100644 index f37a12cc4ccc..000000000000 --- a/pixi-packages/freethreading/LICENSE.txt +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2005-2025, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/freethreading/pixi.toml b/pixi-packages/freethreading/pixi.toml index 372fcbaa2c48..1f3888f9a967 100644 --- a/pixi-packages/freethreading/pixi.toml +++ b/pixi-packages/freethreading/pixi.toml @@ -2,10 +2,7 @@ channels = ["https://prefix.dev/conda-forge"] platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] - -[package] -# Work-around to https://github.com/prefix-dev/pixi/issues/5557 -license-file = "LICENSE.txt" +requires-pixi = ">=0.66.0" [package.build] source.path = "../.." @@ -31,7 +28,7 @@ python-freethreading = "*" # Use cpython git tip # python.git = "https://github.com/python/cpython" # python.subdirectory = "Tools/pixi-packages/freethreading" -# python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 +# python.rev = "55ea59e7dc35e1363b203ae4dd9cfc3a0ac0a844" # v3.15.0a8 meson-python = "*" cython = "*" diff --git a/pixi-packages/tsan-freethreading/LICENSE.txt b/pixi-packages/tsan-freethreading/LICENSE.txt deleted file mode 100644 index f37a12cc4ccc..000000000000 --- a/pixi-packages/tsan-freethreading/LICENSE.txt +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2005-2025, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pixi-packages/tsan-freethreading/pixi.toml b/pixi-packages/tsan-freethreading/pixi.toml index 8dab473f605d..8beef908b021 100644 --- a/pixi-packages/tsan-freethreading/pixi.toml +++ b/pixi-packages/tsan-freethreading/pixi.toml @@ -2,10 +2,7 @@ channels = ["https://prefix.dev/conda-forge"] platforms = ["linux-64", "linux-aarch64", "osx-arm64"] preview = ["pixi-build"] - -[package] -# Work-around to https://github.com/prefix-dev/pixi/issues/5557 -license-file = "LICENSE.txt" +requires-pixi = ">=0.66.0" [package.build] source.path = "../.." @@ -25,7 +22,7 @@ extra-args = ["-Csetup-args=-Db_sanitize=thread", "-Csetup-args=-Dbuildtype=debu [package.host-dependencies] python.git = "https://github.com/python/cpython" python.subdirectory = "Tools/pixi-packages/tsan-freethreading" -python.rev = "15b216f30d0445469ec31bc7509fcc55a216ef7c" # v3.15.0a6 +python.rev = "55ea59e7dc35e1363b203ae4dd9cfc3a0ac0a844" # v3.15.0a8 meson-python = "*" cython = "*" From 1bcc77c7f02b0cd119f089c46daa416349f04bb8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 22 Apr 2026 19:12:09 -0600 Subject: [PATCH 1669/1718] MAINT: Update some requirements. (#31312) --- environment.yml | 2 +- requirements/doc_requirements.txt | 4 ++-- requirements/emscripten_test_requirements.txt | 4 ++-- requirements/pkgconf_requirements.txt | 2 +- requirements/test_requirements.txt | 4 ++-- requirements/typing_requirements.txt | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/environment.yml b/environment.yml index 38801afcb52c..4f361f43de22 100644 --- a/environment.yml +++ b/environment.yml @@ -24,7 +24,7 @@ dependencies: - pytest-xdist - hypothesis # For type annotations - - mypy=1.20.0 + - mypy==1.20.2 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index b08b64b78a6d..d1dd30ac46e8 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -5,10 +5,10 @@ pydata-sphinx-theme>=0.16.1 sphinx-copybutton sphinx-design scipy -matplotlib!=3.10.6 +matplotlib>=3.10.8 pandas breathe>=4.36.0 -ipython!=8.1.0 +ipython>=9.12.0 # Needed for ipython>=8.17 # https://github.com/ipython/ipython/issues/14237 pickleshare diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 71e736ceed90..4c9385422f3f 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ -hypothesis==6.151.9 -pytest==9.0.2 +hypothesis==6.152.1 +pytest==9.0.3 tzdata pytest-xdist diff --git a/requirements/pkgconf_requirements.txt b/requirements/pkgconf_requirements.txt index 6d366c39a7f2..444f63b7f213 100644 --- a/requirements/pkgconf_requirements.txt +++ b/requirements/pkgconf_requirements.txt @@ -1 +1 @@ -pkgconf==2.5.1.post1 \ No newline at end of file +pkgconf==2.5.1.post1 # post2 has problems. diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 6ade6d771052..3223305e07de 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,6 +1,6 @@ Cython -hypothesis==6.151.9 -pytest==9.0.2 +hypothesis==6.152.1 +pytest==9.0.3 pytest-cov==7.1.0 meson ninja; sys_platform != "emscripten" diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 7e02a17db1ea..c631ced8ac3f 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -2,5 +2,5 @@ -r test_requirements.txt -mypy==1.20.0 +mypy==1.20.2 pyrefly==0.61.0 From a253537e7a06e8e41a96d17b3f0ece62d93071ad Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 23 Apr 2026 10:39:17 +0200 Subject: [PATCH 1670/1718] BUG: Prevent deadlock due to downstream importing NumPy in dlopen (#31303) Downstream should probably never import numpy (i.e. activate the C-API) during dlopen (rather than the Python module init function). But if it does so, this check introduced a deadlock regression because dlopen has a recursive lock, but this breaks if the matmul is called and causes threads to spawn that also need to dlopen. So, as a hot-fix, just only run the check on MacOS, since we are not currently aware of reports on other system (possible or not), although this did happen with certain OpenBLAS versions (and not just accelerate). (I cannot fully rule out that there won't be regressions on some linux setups, but they were not yet reported. -- The "limit to 1 thread" solution is an alternative as well.) --- numpy/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.py b/numpy/__init__.py index ce0452ffe8d0..be46f6b09c75 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -848,7 +848,14 @@ def _mac_os_check(): del _mac_os_check def blas_fpe_check(): - # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + if sys.platform != "darwin": + # We currently assume this is limited to MacOS as downstream NumPy + # import during dlopen caused a deadlock regression: gh-31284 + return + + # Check if BLAS adds spurious FPEs, seen on M4 arms with Accelerate. + # In this case we disable FPE reporting since the use of SME poisons + # it and Accelerate doesn't sanitize them. with errstate(all='raise'): x = ones((20, 20)) try: From e751d2889df4321ec72d378cd4dfbe65e53235e1 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sat, 18 Apr 2026 18:22:12 +0200 Subject: [PATCH 1671/1718] MAINT: use new dtype in view creation rather than going through two views. This includes moving the new dtype checking stuff to common.c and use that in dtype setting as well. --- numpy/_core/src/multiarray/common.c | 97 +++++++++++++++ numpy/_core/src/multiarray/common.h | 11 ++ numpy/_core/src/multiarray/convert.c | 177 ++++++++++++++------------- numpy/_core/src/multiarray/getset.c | 105 ++-------------- 4 files changed, 212 insertions(+), 178 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 954179c66cb3..80423b5a8174 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -370,6 +370,103 @@ _may_have_objects(PyArray_Descr *dtype) PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) ); } +/* + * Check whether self can be viewed with the given dtype. + * If so, return a new reference to the dtype (possibly changed). + * If needed, also determine new dimensions and strides for the last axis. + * If no change is needed, newlastdim is set to -1. + */ +NPY_NO_EXPORT PyArray_Descr* +_check_compatibility_with_new_dtype( + PyArrayObject *self, PyArray_Descr *type, + npy_intp *newlastdim, npy_intp *newlaststride) +{ + PyArray_Descr *dtype = PyArray_DESCR(self); + *newlastdim = -1; /* By default, no change needed. */ + + /* Check that we are not reinterpreting memory containing Objects. */ + if (_may_have_objects(dtype) || _may_have_objects(type)) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_view_is_safe", + &npy_runtime_imports._view_is_safe) == -1) { + return NULL; + } + PyObject *safe = PyObject_CallFunctionObjArgs( + npy_runtime_imports._view_is_safe, dtype, type, NULL); + if (safe == NULL) { + return NULL; + } + Py_DECREF(safe); + } + + if (type->elsize != dtype->elsize) { + /* + * Viewing as an unsized void implies a void dtype matching + * the size of the current dtype. + */ + if (type->type_num == NPY_VOID && PyDataType_ISUNSIZED(type)) { + PyArray_Descr *newtype = PyArray_DescrNew(type); + if (newtype == NULL) { + return NULL; + } + newtype->elsize = dtype->elsize; + return newtype; + } + /* + * Otherwise, changing the size of the dtype results in a shape change, + * which we signal by setting newlastdim and newlaststride. + */ + /* Check forbidden cases. */ + if (PyDataType_HASSUBARRAY(type)) { + PyErr_SetString(PyExc_ValueError, + "Changing the dtype to a subarray type is only supported " + "if the total itemsize is unchanged"); + return NULL; + } + int nd = PyArray_NDIM(self); + if (nd == 0) { + PyErr_SetString(PyExc_ValueError, + "Changing the dtype of a 0d array is only supported " + "if the itemsize is unchanged"); + return NULL; + } + /* Resize on last axis only (we have nd>0 here). */ + npy_intp lastdim = PyArray_DIMS(self)[nd-1]; + if (lastdim != 1 && PyArray_SIZE(self) != 0 && + PyArray_STRIDES(self)[nd-1] != dtype->elsize) { + PyErr_SetString(PyExc_ValueError, + "To change to a dtype of a different size, the last axis " + "must be contiguous"); + return NULL; + } + if (type->elsize < dtype->elsize) { + /* If it is compatible, increase the size of the last axis. */ + if (type->elsize == 0 || dtype->elsize % type->elsize != 0) { + PyErr_SetString(PyExc_ValueError, + "When changing to a smaller dtype, its size must be a " + "divisor of the size of original dtype"); + return NULL; + } + *newlastdim = (dtype->elsize / type->elsize) * lastdim; + } + else /* type->elsize > dtype->elsize */ { + /* If it is compatible, decrease the size of the relevant axis. */ + npy_intp lastsize = lastdim * dtype->elsize; + if (lastsize % type->elsize != 0) { + PyErr_SetString(PyExc_ValueError, + "When changing to a larger dtype, its size must be a " + "divisor of the total size in bytes of the last axis " + "of the array."); + return NULL; + } + *newlastdim = lastsize / type->elsize; + } + *newlaststride = type->elsize; + } + Py_INCREF(type); + return type; +} + /* * Make a new empty array, of the passed size, of a type that takes the * priority of ap1 and ap2 into account. diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index f4d0e595aaa5..b0218c48a39d 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -95,6 +95,17 @@ _unpack_field_index( NPY_NO_EXPORT int _may_have_objects(PyArray_Descr *dtype); +/* + * Check whether self can be viewed with the given dtype. + * If so, return a new reference to the dtype (possibly changed). + * If needed, also determine new dimensions and strides for the last axis. + * If no change is needed, newlastdim is set to -1. + */ +NPY_NO_EXPORT PyArray_Descr* +_check_compatibility_with_new_dtype( + PyArrayObject *self, PyArray_Descr *type, + npy_intp *newlastdim, npy_intp *newlaststride); + /* * Returns -1 and sets an exception if *index is an invalid index for * an array of size max_item, otherwise adjusts it in place to be diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 88056930f11a..c5197b76169c 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -533,10 +533,13 @@ PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) NPY_NO_EXPORT PyObject * PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) { - PyArrayObject *ret = NULL; - PyArray_Descr *dtype; + PyObject *ret = NULL; + int nd = PyArray_NDIM(self); + npy_intp *dims = PyArray_DIMS(self); + npy_intp *strides = PyArray_STRIDES(self); + PyArray_Descr *dtype = PyArray_DESCR(self); + int flags = PyArray_FLAGS(self); PyTypeObject *subtype; - int flags; if (pytype) { subtype = pytype; @@ -545,42 +548,35 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) subtype = Py_TYPE(self); } - dtype = PyArray_DESCR(self); - flags = PyArray_FLAGS(self); - if (type == NULL) { /* No dtype change: just create the view */ Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr_int( - subtype, dtype, - PyArray_NDIM(self), PyArray_DIMS(self), - PyArray_STRIDES(self), PyArray_DATA(self), + return PyArray_NewFromDescr_int( + subtype, dtype, nd, dims, strides, PyArray_DATA(self), flags, (PyObject *)self, (PyObject *)self, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); - return (PyObject *)ret; } /* * Changing dtype on a subclass. We support 4 paths: * - * 1. subclass overrides _set_dtype: create subclass view first, + * 1. If _set_dtype is None: create a new view with the new dtype. + * This is the future: __array_finalize__ sees final dtype and shape. + * 2. subclass overrides _set_dtype: create subclass view first, * then call _set_dtype (subclass handles dtype change). - * If _set_dtype is set to None, we use path 3 below. - * 2. subclass overrides the dtype descriptor (e.g. property with + * 3. subclass overrides the dtype descriptor (e.g. property with * setter): create subclass view first, use the setter, but * emit a deprecation asking to implement _set_dtype instead. - * 3. If _set_dtype is None: create an ndarray base view, set dtype - * internally, then create the subclass view if needed: - * __array_finalize__ sees the final dtype+shape. - * 4. If _set_dtype (and dtype) are not set, call `__array_finalize__` + * 4. If _set_dtype and dtype are not set, call `__array_finalize__` * with the old dtype and forcibly update the dtype (a subclass will be - * unaware of the change) which is the unfortunate historic behavior. + * unaware of the change). This is the unfortunate historic behavior. * - * (Base class ndarray has no __array_finalize__ so effectively uses path 4.) + * (Base class ndarray uses path 1, but has no __array_finalize__, + * so it is the same as paths 2 and 4.) */ - int use_dtype_in_finalize = 0; - int use_set_dtype = 0; - int use_dtype_prop = 0; + npy_bool use_dtype_in_finalize = NPY_TRUE; + npy_bool use_set_dtype = NPY_FALSE; + npy_bool use_dtype_prop = NPY_FALSE; if (subtype != &PyArray_Type) { PyObject *sub_set_dtype; @@ -589,10 +585,8 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) npy_interned_str._set_dtype, &sub_set_dtype) < 0) { goto finish; } - if (sub_set_dtype == Py_None) { - use_dtype_in_finalize = 1; - } - else { + if (sub_set_dtype != Py_None) { + use_dtype_in_finalize = NPY_FALSE; use_set_dtype = (sub_set_dtype != NULL && sub_set_dtype != npy_static_pydata.ndarray_set_dtype); } @@ -612,82 +606,95 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) } } - if (use_set_dtype || use_dtype_prop) { + if (use_dtype_in_finalize) { /* - * Paths 1 & 2: create subclass view with original dtype, - * then let the subclass handle the dtype change. + * Path 1: subclass lives in the future and its __array_finalize__ + * can handle getting the correct dtype+shape. */ - Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr_int( - subtype, dtype, - PyArray_NDIM(self), PyArray_DIMS(self), - PyArray_STRIDES(self), PyArray_DATA(self), - flags, (PyObject *)self, (PyObject *)self, - _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); - if (ret == NULL) { - goto finish; + npy_intp newlastdim, newlaststride; + /* Check whether the type is compatible. */ + Py_SETREF(type, _check_compatibility_with_new_dtype( + self, type, &newlastdim, &newlaststride)); + if (type == NULL) { + return NULL; } - if (use_set_dtype) { - PyObject *res = PyObject_CallMethodOneArg( - (PyObject *)ret, - npy_interned_str._set_dtype, (PyObject *)type); - if (res == NULL) { - Py_CLEAR(ret); - goto finish; - } - Py_DECREF(res); + /* Take view with old or adjusted dims (steals reference to type) */ + if (newlastdim < 0) { + return PyArray_NewFromDescr_int(subtype, type, + nd, dims, strides, PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, 0); } else { - if (PyObject_GenericSetAttr( - (PyObject *)ret, npy_interned_str.dtype, - (PyObject *)type) < 0) { - Py_CLEAR(ret); - goto finish; - } - /* DEPRECATED 2026-04-13, NumPy 2.5 */ - if (DEPRECATE( - "numpy.ndarray.view() used a custom `dtype` setter " - "to change the dtype of the view. Subclasses should " - "implement `_set_dtype` instead.") < 0) { - Py_CLEAR(ret); + NPY_ALLOC_WORKSPACE(newdims, npy_intp, 2 * 4, 2 * nd); + if (newdims == NULL) { goto finish; } + npy_intp *newstrides = newdims + nd; + memcpy(newdims, dims, (nd-1)*sizeof(npy_intp)); + memcpy(newstrides, strides, (nd-1)*sizeof(npy_intp)); + newdims[nd-1] = newlastdim; + newstrides[nd-1] = newlaststride; + ret = PyArray_NewFromDescr_int(subtype, type, + nd, newdims, newstrides, PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, 0); + npy_free_workspace(newdims); + return ret; } - goto finish; } - - /* Path 3+4: create view and set dtype internally */ + /* + * Other paths: first create a view with the old dtype. + */ Py_INCREF(dtype); - ret = (PyArrayObject *)PyArray_NewFromDescr_int( - use_dtype_in_finalize ? &PyArray_Type : subtype, dtype, - PyArray_NDIM(self), PyArray_DIMS(self), - PyArray_STRIDES(self), PyArray_DATA(self), + ret = PyArray_NewFromDescr_int( + subtype, dtype, nd, dims, strides, PyArray_DATA(self), flags, (PyObject *)self, (PyObject *)self, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); if (ret == NULL) { goto finish; } - if (array_descr_set_internal(ret, (PyObject *)type) < 0) { - Py_CLEAR(ret); - goto finish; - } - /* - * Path 3: `_set_dtype is None` and `ret` is a base-class array - * with correct dtype+shape, this will call `__array_finalize__` - * with the final dtype+shape. - */ - if (use_dtype_in_finalize) { - Py_INCREF(PyArray_DESCR(ret)); - Py_SETREF(ret, (PyArrayObject *)PyArray_NewFromDescr_int( - subtype, PyArray_DESCR(ret), - PyArray_NDIM(ret), PyArray_DIMS(ret), - PyArray_STRIDES(ret), PyArray_DATA(ret), - PyArray_FLAGS(ret), (PyObject *)self, (PyObject *)self, - _NPY_ARRAY_ENSURE_DTYPE_IDENTITY)); + if (use_set_dtype) { + /* + * Path 2: subclass lives in future but needs to set dtype itself. + */ + PyObject *res = PyObject_CallMethodOneArg( + ret, npy_interned_str._set_dtype, (PyObject *)type); + if (res == NULL) { + Py_CLEAR(ret); + goto finish; + } + Py_DECREF(res); + } + else if (use_dtype_prop) { + /* + * Path 3: subclass overrides dtype property. + */ + if (PyObject_GenericSetAttr( + ret, npy_interned_str.dtype, (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; + } + /* DEPRECATED 2026-04-13, NumPy 2.5 */ + if (DEPRECATE( + "numpy.ndarray.view() used a custom `dtype` setter " + "to change the dtype of the view. Subclasses should " + "implement `_set_dtype` instead.") < 0) { + Py_CLEAR(ret); + goto finish; + } + } + else { + /* + * Path 4: set dtype internally. + */ + if (array_descr_set_internal( + (PyArrayObject*)ret, (PyObject *)type) < 0) { + Py_CLEAR(ret); + goto finish; + } } finish: Py_DECREF(type); - return (PyObject *)ret; + return ret; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 721d7a73738c..880e6b86de88 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -353,98 +353,17 @@ array_descr_set_internal(PyArrayObject *self, PyObject *arg) "invalid data-type for array"); return -1; } - - /* check that we are not reinterpreting memory containing Objects. */ - if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { - PyObject *safe; - - if (npy_cache_import_runtime( - "numpy._core._internal", "_view_is_safe", - &npy_runtime_imports._view_is_safe) == -1) { - goto fail; - } - - safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe, - "OO", PyArray_DESCR(self), newtype); - if (safe == NULL) { - goto fail; - } - Py_DECREF(safe); - } - - /* - * Viewing as an unsized void implies a void dtype matching the size of the - * current dtype. - */ - if (newtype->type_num == NPY_VOID && - PyDataType_ISUNSIZED(newtype) && - newtype->elsize != PyArray_ITEMSIZE(self)) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) { - return -1; - } - newtype->elsize = PyArray_ITEMSIZE(self); - } - - /* Changing the size of the dtype results in a shape change */ - if (newtype->elsize != PyArray_ITEMSIZE(self)) { - /* forbidden cases */ - if (PyArray_NDIM(self) == 0) { - PyErr_SetString(PyExc_ValueError, - "Changing the dtype of a 0d array is only supported " - "if the itemsize is unchanged"); - goto fail; - } - else if (PyDataType_HASSUBARRAY(newtype)) { - PyErr_SetString(PyExc_ValueError, - "Changing the dtype to a subarray type is only supported " - "if the total itemsize is unchanged"); - goto fail; - } - - /* resize on last axis only */ - int axis = PyArray_NDIM(self) - 1; - if (PyArray_DIMS(self)[axis] != 1 && - PyArray_SIZE(self) != 0 && - PyArray_STRIDES(self)[axis] != PyArray_ITEMSIZE(self)) { - PyErr_SetString(PyExc_ValueError, - "To change to a dtype of a different size, the last axis " - "must be contiguous"); - goto fail; - } - - npy_intp newdim; - - if (newtype->elsize < PyArray_ITEMSIZE(self)) { - /* if it is compatible, increase the size of the last axis */ - if (newtype->elsize == 0 || - PyArray_ITEMSIZE(self) % newtype->elsize != 0) { - PyErr_SetString(PyExc_ValueError, - "When changing to a smaller dtype, its size must be a " - "divisor of the size of original dtype"); - goto fail; - } - newdim = PyArray_ITEMSIZE(self) / newtype->elsize; - PyArray_DIMS(self)[axis] *= newdim; - PyArray_STRIDES(self)[axis] = newtype->elsize; - } - else /* newtype->elsize > PyArray_ITEMSIZE(self) */ { - /* if it is compatible, decrease the size of the relevant axis */ - newdim = PyArray_DIMS(self)[axis] * PyArray_ITEMSIZE(self); - if ((newdim % newtype->elsize) != 0) { - PyErr_SetString(PyExc_ValueError, - "When changing to a larger dtype, its size must be a " - "divisor of the total size in bytes of the last axis " - "of the array."); - goto fail; - } - PyArray_DIMS(self)[axis] = newdim / newtype->elsize; - PyArray_STRIDES(self)[axis] = newtype->elsize; - } + /* Check dtype and possibly give new dim & stride for last axis */ + npy_intp newlastdim, newlaststride; + Py_SETREF(newtype, _check_compatibility_with_new_dtype( + self, newtype, &newlastdim, &newlaststride)); + if (newtype == NULL) { + return -1; } /* Viewing as a subarray increases the number of dimensions */ if (PyDataType_HASSUBARRAY(newtype)) { + assert(newlastdim < 0); /* not allowed for subarrays */ /* * create new array object from data and update * dimensions, strides and descr from it @@ -480,15 +399,15 @@ array_descr_set_internal(PyArrayObject *self, PyObject *arg) Py_INCREF(newtype); Py_DECREF(temp); } - + else if (newlastdim >= 0) { + int lastaxis = PyArray_NDIM(self) - 1; + PyArray_DIMS(self)[lastaxis] = newlastdim; + PyArray_STRIDES(self)[lastaxis] = newlaststride; + } Py_DECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = newtype; PyArray_UpdateFlags(self, NPY_ARRAY_UPDATE_ALL); return 0; - - fail: - Py_DECREF(newtype); - return -1; } static int From 137c0797a132c3a48820de695cbc479bd62eda0b Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 19 Apr 2026 11:38:11 +0200 Subject: [PATCH 1672/1718] MAINT: simplify view implementations in MaskedArray and MaskedRecords. Possible after the changes in ndarray.view(). --- numpy/ma/core.py | 26 +++++------------------- numpy/ma/mrecords.py | 47 +++++++++++--------------------------------- 2 files changed, 17 insertions(+), 56 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 560c7dcefdcf..7b39da8d9461 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3256,28 +3256,12 @@ def view(self, dtype=None, type=None, fill_value=None): results. """ - if dtype is None: - if type is None: - output = ndarray.view(self) - else: - output = ndarray.view(self, type) - elif type is None: - try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) - dtype = None - else: - output = ndarray.view(self, dtype) - except TypeError: - output = ndarray.view(self, dtype) - else: - output = ndarray.view(self, dtype, type) + if type is None and (isinstance(dtype, builtins.type) + and issubclass(dtype, ndarray)): + type = dtype + dtype = None - # also make the mask be a view (so attr changes to the view's - # mask do no affect original object's mask) - # (especially important to avoid affecting np.masked singleton) - if getmask(output) is not nomask: - output._mask = output._mask.view() + output = super().view(*[a for a in (dtype, type) if a is not None]) # Make sure to reset the _fill_value if needed if getattr(output, '_fill_value', None) is not None: diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index d35bb9b79925..a7248617386d 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -12,7 +12,7 @@ # or whatever restricted keywords. An idea would be to no bother in the # first place, and then rename the invalid fields with a trailing # underscore. Maybe we could just overload the parser function ? - +import builtins import warnings import numpy as np @@ -353,40 +353,17 @@ def view(self, dtype=None, type=None): Returns a view of the mrecarray. """ - # OK, basic copy-paste from MaskedArray.view. - if dtype is None: - if type is None: - output = np.ndarray.view(self) - else: - output = np.ndarray.view(self, type) - # Here again. - elif type is None: - try: - if issubclass(dtype, np.ndarray): - output = np.ndarray.view(self, dtype) - else: - output = np.ndarray.view(self, dtype) - # OK, there's the change - except TypeError: - dtype = np.dtype(dtype) - # we need to revert to MaskedArray, but keeping the possibility - # of subclasses (eg, TimeSeriesRecords), so we'll force a type - # set to the first parent - if dtype.fields is None: - basetype = self.__class__.__bases__[0] - output = self.__array__().view(dtype, basetype) - output._update_from(self) - else: - output = np.ndarray.view(self, dtype) - output._fill_value = None - else: - output = np.ndarray.view(self, dtype, type) - # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', ma.nomask) is not ma.nomask): - mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, np.ndarray) - output._mask = output._mask.reshape(output.shape) - return output + # If the new dtype has no fields, we need to revert to MaskedArray, + # but keep the possibility of subclasses (eg, TimeSeriesRecords). + # So we'll force a type set to the first parent. + if (type is None + and dtype is not None + and not (isinstance(dtype, builtins.type) + and issubclass(dtype, np.ndarray)) + and (dtype := np.dtype(dtype)).fields is None): + type = self.__class__.__bases__[0] + + return super().view(*[a for a in (dtype, type) if a is not None]) def harden_mask(self): """ From ca4aa145deac9886f1f2f77e275d18d52419c323 Mon Sep 17 00:00:00 2001 From: Ed Bennett Date: Thu, 23 Apr 2026 17:26:21 +0100 Subject: [PATCH 1673/1718] DOC: use base min and max rather than aliases amin and amax in see-also (#31315) --- numpy/_core/fromnumeric.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index e5f4ec0e77f5..2d99bdcad011 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -3058,7 +3058,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, See Also -------- - amin : + min : The minimum value of an array along a given axis, propagating any NaNs. nanmax : The maximum value of an array along a given axis, ignoring any NaNs. @@ -3196,7 +3196,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, See Also -------- - amax : + max : The maximum value of an array along a given axis, propagating any NaNs. nanmin : The minimum value of an array along a given axis, ignoring any NaNs. From a9c745972cd7c76c6135005403a763e2ed6da60c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Apr 2026 14:23:30 -0600 Subject: [PATCH 1674/1718] MAINT: Bump astral-sh/setup-uv from 8.0.0 to 8.1.0 (#31318) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stubtest.yml | 2 +- .github/workflows/typecheck.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/stubtest.yml b/.github/workflows/stubtest.yml index 295ca31f3e13..a8298e0370f0 100644 --- a/.github/workflows/stubtest.yml +++ b/.github/workflows/stubtest.yml @@ -43,7 +43,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 + - uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 with: python-version: ${{ matrix.py }} activate-environment: true diff --git a/.github/workflows/typecheck.yml b/.github/workflows/typecheck.yml index 6f6ce50b9440..2ef3c21c60f4 100644 --- a/.github/workflows/typecheck.yml +++ b/.github/workflows/typecheck.yml @@ -61,7 +61,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 + - uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 with: python-version: ${{ matrix.os_python[1] }} activate-environment: true @@ -94,7 +94,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0 + - uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 with: activate-environment: true - name: Install dependencies From fa6d67432de1e7d5868c285b4076495d3614d227 Mon Sep 17 00:00:00 2001 From: Breno Favaretto Date: Thu, 23 Apr 2026 19:44:01 -0300 Subject: [PATCH 1675/1718] BUG: Fix segfault in nditer.multi_index when __getitem__ raises (#31314) --- numpy/_core/src/multiarray/nditer_pywrap.c | 3 +++ numpy/_core/tests/test_nditer.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 992bc013af3a..2770dfa650d3 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -1676,6 +1676,9 @@ npyiter_multi_index_set( } for (idim = 0; idim < ndim; ++idim) { PyObject *v = PySequence_GetItem(value, idim); + if (v == NULL) { + return -1; + } multi_index[idim] = PyLong_AsLong(v); Py_DECREF(v); if (error_converting(multi_index[idim])) { diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index fa158728285b..8ee74a136c13 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -3660,3 +3660,19 @@ def test_signature_methods(method): assert "self" in sig.parameters assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + +def test_nditer_multi_index_no_segfault(): + class BadSequence: + def __len__(self): + return 2 + + def __getitem__(self, i): + if i == 1: + raise RuntimeError("intentional error") + return 0 + + arr = np.zeros((3, 4)) + it = np.nditer(arr, flags=["multi_index"]) + with pytest.raises(RuntimeError, match="intentional error"): + it.multi_index = BadSequence() From 863dedfc3f2738bea50bb011e060f4661fa01d26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Apr 2026 17:56:36 +0000 Subject: [PATCH 1676/1718] MAINT: Bump ruff in /requirements in the python-deps group Bumps the python-deps group in /requirements with 1 update: [ruff](https://github.com/astral-sh/ruff). Updates `ruff` from 0.15.10 to 0.15.11 - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.15.10...0.15.11) --- updated-dependencies: - dependency-name: ruff dependency-version: 0.15.11 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: python-deps ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index e60446249ea6..c5fd608f0b9d 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.10 +ruff==0.15.11 GitPython>=3.1.46 spin From 36a770324c192ae23370fc271a83f54d11a61c38 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 17 Apr 2026 18:08:01 +0200 Subject: [PATCH 1677/1718] MAINT: export _update_descr_and_dimensions for use in descr setting. Put it in common.c under a new name, which also does the copying of dims & strides. --- numpy/_core/src/multiarray/common.c | 51 ++++++++++++++++++ numpy/_core/src/multiarray/common.h | 7 +++ numpy/_core/src/multiarray/ctors.c | 82 +++-------------------------- numpy/_core/src/multiarray/getset.c | 39 ++++++-------- 4 files changed, 81 insertions(+), 98 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 80423b5a8174..f753d6276bd9 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -370,6 +370,57 @@ _may_have_objects(PyArray_Descr *dtype) PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) ); } +/* + * Get a sub-array descriptor base, storing the subarray + * dimensions and strides, and updating the number of dimensions + * of the array. + * + * Strides are only stored if needed. + */ +NPY_NO_EXPORT PyArray_Descr* +_get_subarray_base_and_dimensions( + const PyArray_Descr *descr, + const int nd, const npy_intp *dims, const npy_intp *strides, + int *new_nd, npy_intp *new_dims, npy_intp *new_strides) +{ + PyObject *shape = PyDataType_SUBARRAY(descr)->shape; + PyArray_Descr *base = PyDataType_SUBARRAY(descr)->base; + assert(shape && base); + npy_bool tuple = PyTuple_Check(shape); + int sub_nd = tuple ? PyTuple_GET_SIZE(shape) : 1; + + if (nd + sub_nd > NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d]", NPY_MAXDIMS); + return NULL; + } + + *new_nd = nd + sub_nd; + memcpy(new_dims, dims, nd * sizeof(npy_intp)); + if (tuple) { + for (int i = 0; i < sub_nd; i++) { + new_dims[i+nd] = (npy_intp)PyLong_AsLong(PyTuple_GET_ITEM(shape, i)); + } + } + else { + new_dims[nd] = (npy_intp)PyLong_AsLong(shape); + } + + if (strides) { + memcpy(new_strides, strides, nd * sizeof(npy_intp)); + npy_intp tempsize; + /* Make new strides -- always C-contiguous */ + tempsize = base->elsize; + for (int i = nd + sub_nd - 1; i >= nd; i--) { + new_strides[i] = tempsize; + tempsize *= new_dims[i] ? new_dims[i] : 1; + } + } + + Py_INCREF(base); + return base; +} + /* * Check whether self can be viewed with the given dtype. * If so, return a new reference to the dtype (possibly changed). diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index b0218c48a39d..173243f1d672 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -95,6 +95,13 @@ _unpack_field_index( NPY_NO_EXPORT int _may_have_objects(PyArray_Descr *dtype); +/* For use in viewing as a new descriptor */ +NPY_NO_EXPORT PyArray_Descr* +_get_subarray_base_and_dimensions( + const PyArray_Descr *descr, + const int nd, const npy_intp *dims, const npy_intp *strides, + int *new_nd, npy_intp *new_dims, npy_intp *new_strides); + /* * Check whether self can be viewed with the given dtype. * If so, return a new reference to the dtype (possibly changed). diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 49b83d7c9e79..d5ba8228f987 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -276,72 +276,6 @@ fromfile_skip_separator(FILE **fp, const char *sep, void *NPY_UNUSED(stream_data return result; } -/* - * Change a sub-array field to the base descriptor - * and update the dimensions and strides - * appropriately. Dimensions and strides are added - * to the end. - * - * Strides are only added if given (because data is given). - */ -static int -_update_descr_and_dimensions(PyArray_Descr **des, npy_intp *newdims, - npy_intp *newstrides, int oldnd) -{ - _PyArray_LegacyDescr *old; - int newnd; - int numnew; - npy_intp *mydim; - int i; - int tuple; - - old = (_PyArray_LegacyDescr *)*des; /* guaranteed as it has subarray */ - *des = old->subarray->base; - - - mydim = newdims + oldnd; - tuple = PyTuple_Check(old->subarray->shape); - if (tuple) { - numnew = PyTuple_GET_SIZE(old->subarray->shape); - } - else { - numnew = 1; - } - - - newnd = oldnd + numnew; - if (newnd > NPY_MAXDIMS) { - goto finish; - } - if (tuple) { - for (i = 0; i < numnew; i++) { - mydim[i] = (npy_intp) PyLong_AsLong( - PyTuple_GET_ITEM(old->subarray->shape, i)); - } - } - else { - mydim[0] = (npy_intp) PyLong_AsLong(old->subarray->shape); - } - - if (newstrides) { - npy_intp tempsize; - npy_intp *mystrides; - - mystrides = newstrides + oldnd; - /* Make new strides -- always C-contiguous */ - tempsize = (*des)->elsize; - for (i = numnew - 1; i >= 0; i--) { - mystrides[i] = tempsize; - tempsize *= mydim[i] ? mydim[i] : 1; - } - } - - finish: - Py_INCREF(*des); - Py_DECREF(old); - return newnd; -} - NPY_NO_EXPORT void _unaligned_strided_byte_copy(char *dst, npy_intp outstrides, char *src, npy_intp instrides, npy_intp N, int elsize) @@ -703,18 +637,18 @@ PyArray_NewFromDescr_int( if (!(cflags & _NPY_ARRAY_ENSURE_DTYPE_IDENTITY)) { if (PyDataType_SUBARRAY(descr)) { PyObject *ret; + int newnd; npy_intp newdims[2*NPY_MAXDIMS]; - npy_intp *newstrides = NULL; - memcpy(newdims, dims, nd*sizeof(npy_intp)); - if (strides) { - newstrides = newdims + NPY_MAXDIMS; - memcpy(newstrides, strides, nd*sizeof(npy_intp)); + npy_intp *newstrides = strides ? newdims + NPY_MAXDIMS : NULL; + Py_SETREF(descr, _get_subarray_base_and_dimensions( + descr, nd, dims, strides, + &newnd, newdims, newstrides)); + if (descr == NULL) { + return NULL; } - nd =_update_descr_and_dimensions(&descr, newdims, - newstrides, nd); ret = PyArray_NewFromDescr_int( subtype, descr, - nd, newdims, newstrides, data, + newnd, newdims, newstrides, data, flags, obj, base, cflags); return ret; } diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index 880e6b86de88..c3fc0cb4025c 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -364,40 +364,31 @@ array_descr_set_internal(PyArrayObject *self, PyObject *arg) /* Viewing as a subarray increases the number of dimensions */ if (PyDataType_HASSUBARRAY(newtype)) { assert(newlastdim < 0); /* not allowed for subarrays */ - /* - * create new array object from data and update - * dimensions, strides and descr from it - */ - PyArrayObject *temp; - /* - * We would decref newtype here. - * temp will steal a reference to it - */ - temp = (PyArrayObject *) - PyArray_NewFromDescr(&PyArray_Type, newtype, PyArray_NDIM(self), - PyArray_DIMS(self), PyArray_STRIDES(self), - PyArray_DATA(self), PyArray_FLAGS(self), NULL); - if (temp == NULL) { - return -1; - } + PyObject *shape = PyDataType_SUBARRAY(newtype)->shape; + /* Just assert(PyTuple_Check(shape))?? */ + int nd = PyArray_NDIM(self); + int new_nd = nd + (PyTuple_Check(shape) ? PyTuple_GET_SIZE(shape) : 1); /* create new dimensions cache and fill it */ - npy_intp new_nd = PyArray_NDIM(temp); npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); if (new_dims == NULL) { - Py_DECREF(temp); PyErr_NoMemory(); + Py_DECREF(newtype); + return -1; + } + npy_intp *new_strides = new_dims + new_nd; + int chk_nd; + Py_SETREF(newtype, _get_subarray_base_and_dimensions( + newtype, nd, PyArray_DIMS(self), PyArray_STRIDES(self), + &chk_nd, new_dims, new_strides)); + if (newtype == NULL) { return -1; } - memcpy(new_dims, PyArray_DIMS(temp), new_nd * sizeof(npy_intp)); - memcpy(new_dims + new_nd, PyArray_STRIDES(temp), new_nd * sizeof(npy_intp)); + assert(chk_nd == new_nd); /* Update self with new cache */ npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->nd = new_nd; ((PyArrayObject_fields *)self)->dimensions = new_dims; - ((PyArrayObject_fields *)self)->strides = new_dims + new_nd; - newtype = PyArray_DESCR(temp); - Py_INCREF(newtype); - Py_DECREF(temp); + ((PyArrayObject_fields *)self)->strides = new_strides; } else if (newlastdim >= 0) { int lastaxis = PyArray_NDIM(self) - 1; From a0a039d9ae3fd75cbf5385eeec12e770b819f1c5 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 20 Apr 2026 13:32:10 +0200 Subject: [PATCH 1678/1718] BLD: use batched C99 function detection to speed up configure stage of build This reintroduces the `feature_detection_math.h` and `feature_detection_cmath.h` that we had in the distutils build (files reworked but adapted the idea from the 1.25.x branch - improved to avoid undefs and pragmas). It speeds up the configure stage significantly, especially on Windows where these checks are quite slow. On macOS the relative speedup is ~30%. The other benefit is to significantly reduce the length of the build log printed to stdout, so it's easier to find things in it. Note that the `_math.h` header in 1.25.x had `float` and `long double` signatures, but those were never used, so they're left out here. The most significant change from 1.25.x is to use: static const funcptr funcs[] = { (funcptr)sin, ... }; to take a static address of each function, which forces the linker to resolve each function in the batch check with `cc.links`. --- numpy/_core/feature_detection_cmath.h | 95 ++++++++++++++++++++ numpy/_core/feature_detection_math.h | 46 ++++++++++ numpy/_core/meson.build | 124 +++++++++++++++++++------- 3 files changed, 234 insertions(+), 31 deletions(-) create mode 100644 numpy/_core/feature_detection_cmath.h create mode 100644 numpy/_core/feature_detection_math.h diff --git a/numpy/_core/feature_detection_cmath.h b/numpy/_core/feature_detection_cmath.h new file mode 100644 index 000000000000..be3ac365a80a --- /dev/null +++ b/numpy/_core/feature_detection_cmath.h @@ -0,0 +1,95 @@ +/* + * Prototypes for C99 complex-math functions probed as a batch by meson.build. + * + * For MSVC we must include to pick up the non-standard + * _Fcomplex/_Dcomplex/_Lcomplex typedefs, since MSVC doesn't support the C99 `_Complex` + * keyword directly. For every other compiler we avoid including so that + * macro-based declarations (notably musl's `#define crealf(x) ((float)(x))`) don't + * conflict with the plain prototypes below. + */ + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#include +typedef _Fcomplex cfloat; +typedef _Dcomplex cdouble; +typedef _Lcomplex cldouble; +#else +typedef float _Complex cfloat; +typedef double _Complex cdouble; +typedef long double _Complex cldouble; +#endif + +/* Mandatory complex math (double) */ +cdouble csin(cdouble); +cdouble csinh(cdouble); +cdouble ccos(cdouble); +cdouble ccosh(cdouble); +cdouble ctan(cdouble); +cdouble ctanh(cdouble); +double creal(cdouble); +double cimag(cdouble); +cdouble conj(cdouble); + +/* float variants */ +cfloat csinf(cfloat); +cfloat csinhf(cfloat); +cfloat ccosf(cfloat); +cfloat ccoshf(cfloat); +cfloat ctanf(cfloat); +cfloat ctanhf(cfloat); +float crealf(cfloat); +float cimagf(cfloat); +cfloat conjf(cfloat); + +/* long double variants */ +cldouble csinl(cldouble); +cldouble csinhl(cldouble); +cldouble ccosl(cldouble); +cldouble ccoshl(cldouble); +cldouble ctanl(cldouble); +cldouble ctanhl(cldouble); +long double creall(cldouble); +long double cimagl(cldouble); +cldouble conjl(cldouble); + +/* C99 complex (double) */ +double cabs(cdouble); +cdouble cacos(cdouble); +cdouble cacosh(cdouble); +double carg(cdouble); +cdouble casin(cdouble); +cdouble casinh(cdouble); +cdouble catan(cdouble); +cdouble catanh(cdouble); +cdouble cexp(cdouble); +cdouble clog(cdouble); +cdouble cpow(cdouble, cdouble); +cdouble csqrt(cdouble); + +/* C99 complex (float) */ +float cabsf(cfloat); +cfloat cacosf(cfloat); +cfloat cacoshf(cfloat); +float cargf(cfloat); +cfloat casinf(cfloat); +cfloat casinhf(cfloat); +cfloat catanf(cfloat); +cfloat catanhf(cfloat); +cfloat cexpf(cfloat); +cfloat clogf(cfloat); +cfloat cpowf(cfloat, cfloat); +cfloat csqrtf(cfloat); + +/* C99 complex (long double) */ +long double cabsl(cldouble); +cldouble cacosl(cldouble); +cldouble cacoshl(cldouble); +long double cargl(cldouble); +cldouble casinl(cldouble); +cldouble casinhl(cldouble); +cldouble catanl(cldouble); +cldouble catanhl(cldouble); +cldouble cexpl(cldouble); +cldouble clogl(cldouble); +cldouble cpowl(cldouble, cldouble); +cldouble csqrtl(cldouble); diff --git a/numpy/_core/feature_detection_math.h b/numpy/_core/feature_detection_math.h new file mode 100644 index 000000000000..5f80a8103367 --- /dev/null +++ b/numpy/_core/feature_detection_math.h @@ -0,0 +1,46 @@ +/* + * Prototypes for math functions probed as a batch by meson.build. + * + * Intentionally omits and to avoid conflicting with + * calling-convention-modified declarations (e.g. MSVC's __cdecl) or + * macro-based declarations in system headers. + */ + +double sin(double); +double cos(double); +double tan(double); +double sinh(double); +double cosh(double); +double tanh(double); +double fabs(double); +double floor(double); +double ceil(double); +double sqrt(double); +double log10(double); +double log(double); +double exp(double); +double asin(double); +double acos(double); +double atan(double); +double fmod(double, double); +double modf(double, double*); +double frexp(double, int*); +double ldexp(double, int); +double expm1(double); +double log1p(double); +double acosh(double); +double asinh(double); +double atanh(double); +double rint(double); +double trunc(double); +double exp2(double); +double copysign(double, double); +double nextafter(double, double); +double cbrt(double); +double log2(double); +double pow(double, double); +double hypot(double, double); +double atan2(double, double); + +long long strtoll(const char*, char**, int); +unsigned long long strtoull(const char*, char**, int); diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dc3985a0f5a3..331afe21e0b5 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -200,9 +200,43 @@ foreach symbol_type: complex_types_to_check cdata.set(symbol_type[0], cc.sizeof(symbol_type[1], prefix: '#include ')) endforeach -# Mandatory functions: if not found, fail the build -# Some of these can still be blocklisted if the C99 implementation -# is buggy, see numpy/_core/src/common/npy_config.h +# Mandatory and optional math functions are detected via batched cc.links() +# probes that take each function's address through a static array, forcing the +# linker to resolve the symbol. On failure we fall back to per-function probes +# so a missing mandatory symbol is reported by name, and so optional functions +# that are partially available on an unusual libm are still detected +# individually. +# +# Platform-specific libm bugs are handled via `#undef HAVE_*` in +# numpy/_core/src/common/npy_config.h. + +inc_curdir = include_directories('.') + +# Shared C-source boilerplate. '@INCLUDES@' and '@REFS@' are substituted +# per-batch via .replace() below. +_batch_probe_template = ''' +#include +#include +@INCLUDES@ + +typedef void (*funcptr)(void); + +static const funcptr funcs[] = { +@REFS@}; + +int main(int argc, char **argv) { + volatile uintptr_t s = 0; + unsigned i; + (void)argc; (void)argv; + for (i = 0; i < sizeof(funcs)/sizeof(funcs[0]); i++) { + s ^= (uintptr_t)funcs[i]; + } + return (int)(s & 1); +} +''' + +# Mandatory functions: if the batched link fails, fall back to per-function +# checks to identify which symbol is actually missing. mandatory_math_funcs = [ 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'fabs', 'floor', 'ceil', 'sqrt', 'log10', 'log', 'exp', 'asin', @@ -211,26 +245,41 @@ mandatory_math_funcs = [ 'rint', 'trunc', 'exp2', 'copysign', 'nextafter', 'cbrt', 'log2', 'pow', 'hypot', 'atan2', ] -foreach func: mandatory_math_funcs - if not cc.has_function(func, prefix: '#include ', dependencies: m_dep) - error(f'Function `@func@` not found') - endif -endforeach - mandatory_complex_math_funcs = [ 'csin', 'csinh', 'ccos', 'ccosh', 'ctan', 'ctanh', 'creal', 'cimag', 'conj' ] -foreach func: mandatory_complex_math_funcs - if not cc.has_function(func, prefix: '#include ', dependencies: m_dep) - error(f'Function `@func@` not found') - endif -endforeach +mandatory_strto_funcs = ['strtoll', 'strtoull'] -foreach func: ['strtoll', 'strtoull'] - if not cc.has_function(func, prefix: '#include ') - error(f'Function `@func@` not found') - endif +_mandatory_refs = '' +foreach _f : mandatory_math_funcs + mandatory_complex_math_funcs + mandatory_strto_funcs + _mandatory_refs += ' (funcptr)' + _f + ',\n' endforeach +_mandatory_src = _batch_probe_template.replace( + '@INCLUDES@', + '#include "feature_detection_math.h"\n#include "feature_detection_cmath.h"' +).replace('@REFS@', _mandatory_refs) + +if not cc.links(_mandatory_src, + include_directories: inc_curdir, + dependencies: m_dep, + name: 'mandatory libm functions (batched)') + # Re-run per-function to name the missing symbol for the user. + foreach func: mandatory_math_funcs + if not cc.has_function(func, prefix: '#include ', dependencies: m_dep) + error(f'Function `@func@` not found') + endif + endforeach + foreach func: mandatory_complex_math_funcs + if not cc.has_function(func, prefix: '#include ', dependencies: m_dep) + error(f'Function `@func@` not found') + endif + endforeach + foreach func: mandatory_strto_funcs + if not cc.has_function(func, prefix: '#include ') + error(f'Function `@func@` not found') + endif + endforeach +endif c99_complex_funcs = [ 'cabs', 'cacos', 'cacosh', 'carg', 'casin', 'casinh', 'catan', @@ -239,20 +288,34 @@ c99_complex_funcs = [ # but are missing in FreeBSD. Issue gh-22850 'csin', 'csinh', 'ccos', 'ccosh', 'ctan', 'ctanh', ] -foreach func: c99_complex_funcs - func_single = func + 'f' - func_longdouble = func + 'l' - if cc.has_function(func, prefix: '#include ', dependencies: m_dep) - cdata.set10('HAVE_' + func.to_upper(), true) - endif - if cc.has_function(func_single, prefix: '#include ', dependencies: m_dep) - cdata.set10('HAVE_' + func_single.to_upper(), true) - endif - if cc.has_function(func_longdouble, prefix: '#include ', dependencies: m_dep) - cdata.set10('HAVE_' + func_longdouble.to_upper(), true) - endif +c99_complex_funcs_all = [] +foreach _f : c99_complex_funcs + c99_complex_funcs_all += [_f, _f + 'f', _f + 'l'] endforeach +_c99cplx_refs = '' +foreach _f : c99_complex_funcs_all + _c99cplx_refs += ' (funcptr)' + _f + ',\n' +endforeach +_c99cplx_src = _batch_probe_template.replace( + '@INCLUDES@', '#include "feature_detection_cmath.h"' +).replace('@REFS@', _c99cplx_refs) + +if cc.links(_c99cplx_src, + include_directories: inc_curdir, + dependencies: m_dep, + name: 'C99 complex funcs (batched)') + foreach func: c99_complex_funcs_all + cdata.set10('HAVE_' + func.to_upper(), true) + endforeach +else + foreach func: c99_complex_funcs_all + if cc.has_function(func, prefix: '#include ', dependencies: m_dep) + cdata.set10('HAVE_' + func.to_upper(), true) + endif + endforeach +endif + # We require C99 so these should always be found at build time. But for # libnpymath as a C99 compat layer, these may still be relevant. c99_macros = ['isfinite', 'isinf', 'isnan', 'signbit'] @@ -293,7 +356,6 @@ foreach optional_attr: optional_variable_attributes endif endforeach -inc_curdir = include_directories('.') optional_file_funcs = ['fallocate', 'ftello', 'fseeko'] foreach filefunc_maybe: optional_file_funcs config_value = 'HAVE_' + filefunc_maybe.to_upper() From bab58ec3127f95d7e63e305711bbd19f57cd0907 Mon Sep 17 00:00:00 2001 From: Maarten Baert Date: Mon, 27 Apr 2026 16:45:44 +0200 Subject: [PATCH 1679/1718] Merge pull request #31320 from MaartenBaert/main BUG: fix memory leak in np.zeros when fill-zero loop raises --- numpy/_core/src/multiarray/arrayobject.c | 2 +- numpy/_core/src/multiarray/ctors.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 460b007d7a7d..9440bec477bf 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -449,10 +449,10 @@ _clear_array_attributes(PyArrayObject *self, npy_bool unraisable) nbytes = 1; } PyDataMem_UserFREE(fa->data, nbytes, fa->mem_handler); - Py_CLEAR(fa->mem_handler); } fa->data = NULL; } + Py_CLEAR(fa->mem_handler); /* must match allocation in PyArray_NewFromDescr */ npy_free_cache_dim(fa->dimensions, 2 * fa->nd); diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 49b83d7c9e79..c657e025a915 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -906,6 +906,13 @@ PyArray_NewFromDescr_int( raise_memory_error(fa->nd, fa->dimensions, descr); goto fail; } + /* + * Set fa->data and NPY_ARRAY_OWNDATA immediately after allocation so + * that the fail path (Py_DECREF(fa) -> dealloc) frees the buffer if + * the fill-zero loop below raises an error. + */ + fa->data = data; + fa->flags |= NPY_ARRAY_OWNDATA; /* * If the array needs special dtype-specific zero-filling logic, do that @@ -919,8 +926,6 @@ PyArray_NewFromDescr_int( goto fail; } } - - fa->flags |= NPY_ARRAY_OWNDATA; } else { /* The handlers should never be called in this case */ @@ -929,8 +934,8 @@ PyArray_NewFromDescr_int( * If data is passed in, this object won't own it. */ fa->flags &= ~NPY_ARRAY_OWNDATA; + fa->data = data; } - fa->data = data; /* * Always update the aligned flag. Not owned data or input strides may @@ -998,7 +1003,6 @@ PyArray_NewFromDescr_int( fail: NPY_traverse_info_xfree(&fill_zero_info); - Py_XDECREF(fa->mem_handler); Py_DECREF(fa); return NULL; } From a6df9950bb0501e0572ae05aefef93fc26cb25d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Apr 2026 21:50:16 +0000 Subject: [PATCH 1680/1718] MAINT: Bump int128/hide-comment-action from 1.55.0 to 1.56.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.55.0 to 1.56.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/42badf94b3efd95bf2138bd9c74da19203e83f40...1f2fb354c37723970677f254382dd0324a77327a) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.56.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 68a0978e7a12..dc753f55252c 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@42badf94b3efd95bf2138bd9c74da19203e83f40 # v1.55.0 + uses: int128/hide-comment-action@1f2fb354c37723970677f254382dd0324a77327a # v1.56.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From cbda3c53eb8d45f6cff16708c00e64831335b89e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Apr 2026 21:54:04 +0000 Subject: [PATCH 1681/1718] MAINT: Bump pyrefly in /requirements in the python-deps group Bumps the python-deps group in /requirements with 1 update: [pyrefly](https://github.com/facebook/pyrefly). Updates `pyrefly` from 0.61.0 to 0.62.0 - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.61.0...0.62.0) --- updated-dependencies: - dependency-name: pyrefly dependency-version: 0.62.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: python-deps ... Signed-off-by: dependabot[bot] --- requirements/typing_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index c631ced8ac3f..dfeb1fe276f2 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.2 -pyrefly==0.61.0 +pyrefly==0.62.0 From b33f4d2b694baf9876864503a5d41fe11b9a34dd Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Mon, 27 Apr 2026 19:42:10 -0400 Subject: [PATCH 1682/1718] BUG/DEPR: Disallow noninteger types for the k parameter in rot90(). Closes gh-31342. --- numpy/lib/_function_base_impl.py | 11 +++++++++++ numpy/lib/tests/test_function_base.py | 8 ++++++++ 2 files changed, 19 insertions(+) diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 0d1b9a0331cf..ef237b8c239f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -1,6 +1,7 @@ import builtins import collections.abc import functools +import operator import re import warnings @@ -247,6 +248,16 @@ def rot90(m, k=1, axes=(0, 1)): or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.") + try: + k = operator.index(k) + except TypeError: + # DEPRECATED 2026-04-27, NumPy 2.5 + msg = (f"Passing a value for k ({k!r}) that is not an integer type has been " + "deprecated in NumPy 2.5. The value will be cast to an integer. In " + "the future this will be an error.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + k = int(k) + k %= 4 if k == 0: diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ac07c2388c3d..fd748ac73f44 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -149,6 +149,14 @@ def test_rotation_axes(self): assert_equal(rot90(a, k=k, axes=(2, 0)), rot90(a_rot90_20, k=k - 1, axes=(2, 0))) + @pytest.mark.parametrize('k', [2.0, 2.25]) + def test_noninteger_k_deprecation_warning(self, k): + a = np.array([[1, 2], [3, 4]]) + with pytest.warns(DeprecationWarning, + match="not an integer type has been deprecated"): + b = rot90(a, k=k) + assert_array_equal(b, np.rot90(a, k=2), strict=True) + class TestFlip: From 570b65858e85bca3b88c4429ab97026f3dbb3784 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 Apr 2026 11:36:22 +0200 Subject: [PATCH 1683/1718] DEP: Narrow dtype deprecation subclass handling even further (#31316) This goes to a bit extreme lengths, but avoids regressions for weird subclasses. Is it pretty? No, but the worst of it is just a transition and most subclasses shouldn't implement dtype attributes hopefully... Allows astropy's usage to work with only new deprecation warnings. (Of course fixing those is still slightly more involved). --- numpy/_core/src/multiarray/convert.c | 173 ++++++++++++++++++++----- numpy/_core/tests/test_deprecations.py | 23 ++++ 2 files changed, 162 insertions(+), 34 deletions(-) diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index c5197b76169c..483bdf26a070 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -526,6 +526,127 @@ PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order) return (PyObject *)ret; } + +static int +get_optional_set_dtype_and_dtype( + PyTypeObject *subtype, + PyObject **_set_dtype, PyObject **sub_dtype) +{ + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, npy_interned_str._set_dtype, + _set_dtype) < 0) { + return -1; + } + if (PyObject_GetOptionalAttr( + (PyObject *)subtype, npy_interned_str.dtype, sub_dtype) < 0) { + Py_XDECREF(*_set_dtype); + return -1; + } + return 0; +} + + +/* Pick how a view() dtype change is propagated for `subtype`. + * + * Sets (at most) one of the three output flags; all three zero means + * the legacy in-place path (subclass can't observe). + * + * use_dtype_in_finalize -- `_set_dtype = None`: class wants + * __array_finalize__ to see the final dtype. + * use_set_dtype -- subclass `_set_dtype` method: call it + * as an adjust hook after viewing. + * use_dtype_prop -- subclass `dtype` descriptor wins over + * `_set_dtype`: call the setter (deprecated). + * + * Walk the MRO and see whether `_set_dtype` or `dtype` was overridden + * more specifically: whichever's resolved value diverges from what + * `subtype` sees at a lower MRO level wins. Ties and "only + * `_set_dtype` was overridden" both go to `_set_dtype`, matching the + * direction numpy's deprecation points users toward. Blind to + * `__setattr__` / odd MI. */ +static int +decide_view_dtype_path( + PyTypeObject *subtype, + int *use_dtype_in_finalize, + int *use_set_dtype, + int *use_dtype_prop) +{ + int ret = -1; + *use_dtype_in_finalize = 1; /* Future defaults. */ + *use_set_dtype = 0; + *use_dtype_prop = 0; + + if (subtype == &PyArray_Type) { + return 0; + } + + PyObject *sub_set_dtype = NULL, *sub_dtype = NULL, *mro = NULL; + if (get_optional_set_dtype_and_dtype( + subtype, &sub_set_dtype, &sub_dtype) < 0) { + goto finish; + } + + int set_overridden = + (sub_set_dtype != npy_static_pydata.ndarray_set_dtype); + int dtype_overridden = + (sub_dtype != npy_static_pydata.ndarray_dtype_descr); + + /* Default: `_set_dtype` wins (either it was overridden, or nothing + * was); flipped only if the walk below finds `dtype` diverging + * first, or `dtype` was the sole override. */ + int set_wins = set_overridden || !dtype_overridden; + + if (set_overridden && dtype_overridden) { + /* Both overridden -- walk the MRO to see which was overridden + * more specifically. Pin `tp_mro`; under free-threading + * `cls.__bases__ = ...` can replace it concurrently. The tuple + * owns refs to its entries, so the base types stay alive too. */ + mro = Py_XNewRef(subtype->tp_mro); + Py_ssize_t n = mro != NULL ? PyTuple_GET_SIZE(mro) : 0; + for (Py_ssize_t i = 1; i < n; i++) { + PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(mro, i); + PyObject *v_set, *v_dtype; + if (get_optional_set_dtype_and_dtype(base, &v_set, &v_dtype) < 0) { + goto finish; + } + /* NULL means this base doesn't know the name at all (an MI + * sibling branch above ndarray). That's "no information", + * not divergence -- ndarray sits deeper in the MRO and will + * provide the real baseline. */ + int set_div = v_set != NULL && v_set != sub_set_dtype; + int dtype_div = v_dtype != NULL && v_dtype != sub_dtype; + Py_XDECREF(v_set); + Py_XDECREF(v_dtype); + + if (set_div || dtype_div) { + /* First to diverge wins; tie (both) -> `_set_dtype`. */ + set_wins = set_div; + break; + } + } + } + + if (set_wins) { + if (sub_set_dtype != Py_None) { + *use_dtype_in_finalize = 0; + *use_set_dtype = set_overridden; + } + } + else { + *use_dtype_in_finalize = 0; + *use_dtype_prop = (sub_dtype != NULL + && Py_TYPE(sub_dtype)->tp_descr_set != NULL); + } + + ret = 0; + finish: + Py_XDECREF(sub_set_dtype); + Py_XDECREF(sub_dtype); + Py_XDECREF(mro); + return ret; +} + + /*NUMPY_API * View * steals a reference to type -- accepts NULL @@ -549,7 +670,7 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) } if (type == NULL) { - /* No dtype change: just create the view */ + /* No dtype change. */ Py_INCREF(dtype); return PyArray_NewFromDescr_int( subtype, dtype, nd, dims, strides, PyArray_DATA(self), @@ -558,7 +679,9 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) } /* - * Changing dtype on a subclass. We support 4 paths: + * Changing dtype on a subclass. We support 4 paths, based on whether + * a subclass overrides _set_dtype or the dtype setter (where whichever + * is overridden most recently wins): * * 1. If _set_dtype is None: create a new view with the new dtype. * This is the future: __array_finalize__ sees final dtype and shape. @@ -574,36 +697,11 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) * (Base class ndarray uses path 1, but has no __array_finalize__, * so it is the same as paths 2 and 4.) */ - npy_bool use_dtype_in_finalize = NPY_TRUE; - npy_bool use_set_dtype = NPY_FALSE; - npy_bool use_dtype_prop = NPY_FALSE; - - if (subtype != &PyArray_Type) { - PyObject *sub_set_dtype; - if (PyObject_GetOptionalAttr( - (PyObject *)subtype, - npy_interned_str._set_dtype, &sub_set_dtype) < 0) { - goto finish; - } - if (sub_set_dtype != Py_None) { - use_dtype_in_finalize = NPY_FALSE; - use_set_dtype = (sub_set_dtype != NULL && - sub_set_dtype != npy_static_pydata.ndarray_set_dtype); - } - Py_XDECREF(sub_set_dtype); - - if (!use_set_dtype && !use_dtype_in_finalize) { - PyObject *sub_dtype_descr; - if (PyObject_GetOptionalAttr( - (PyObject *)subtype, - npy_interned_str.dtype, &sub_dtype_descr) < 0) { - goto finish; - } - use_dtype_prop = (sub_dtype_descr != NULL && - sub_dtype_descr != npy_static_pydata.ndarray_dtype_descr && - Py_TYPE(sub_dtype_descr)->tp_descr_set != NULL); - Py_XDECREF(sub_dtype_descr); - } + int use_dtype_in_finalize, use_set_dtype, use_dtype_prop; + if (decide_view_dtype_path( + subtype, &use_dtype_in_finalize, + &use_set_dtype, &use_dtype_prop) < 0) { + goto finish; } if (use_dtype_in_finalize) { @@ -674,11 +772,18 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) Py_CLEAR(ret); goto finish; } - /* DEPRECATED 2026-04-13, NumPy 2.5 */ + /* + * Path 3: subclass overrides dtype property. + * DEPRECATED 2026-04-13, NumPy 2.5. + * After the deprecation, the decide_view_dtype_path helper isn't + * needed. `_set_dtype` is used unless it is the base-class + * definition or None, which are the only 3 options left without + * need for MRO walking. + */ if (DEPRECATE( "numpy.ndarray.view() used a custom `dtype` setter " "to change the dtype of the view. Subclasses should " - "implement `_set_dtype` instead.") < 0) { + "implement `_set_dtype` instead. (Deprecated NumPy 2.5)") < 0) { Py_CLEAR(ret); goto finish; } diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 27a5b4c4c9ba..581958edb749 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -282,6 +282,29 @@ def dtype(self, dtype): arr = np.arange(6).view(MyArray) self.assert_deprecated(arr.view, args=(np.float64,)) + def test_view_dtype_property_setter_recarray_subclass(self): + # Recarray subclasses without a `_set_dtype` should still work + # but warn until they implement it. As recarray sets it to None + # such a subclass may have to do the same fix here (can't use super()). + class SideBranch: + pass + + class MyRecArray(SideBranch, np.recarray): + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + # would need to call `np.ndarray._set_dtype` to avoid the warning + # (but that side-steps the recarray dtype handling.) + with pytest.warns(DeprecationWarning, match="Setting the dtype"): + np.recarray.dtype.__set__(self, dtype) + + arr = np.rec.fromarrays( + [np.arange(3, dtype=np.int32)], names='x').view(MyRecArray) + self.assert_deprecated(arr.view, args=([('y', 'i4')],)) + class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): message = "Passing in a parenthesized single number" From 0f79a3a0921873c3d7a85c7b3d7cf1411b7d2143 Mon Sep 17 00:00:00 2001 From: bonpyt Date: Tue, 28 Apr 2026 15:44:34 +0100 Subject: [PATCH 1684/1718] BUG: Fix copy performance regression for structured arrays (#29270) This adds a new dtype flag used by structured dtypes to indicate that they _cannot_ trivially copy (`NPY_NOT_TRIVIALLY_COPYABLE`). This way, dtypes that do not set this (and _also_ don't have the references flag set) can be assumed to be trivially copyable, which should allow certain default implementations/fast-path improvements elsewhere as well. The current small caveat is that this flag cannot be unpickled yet, so it isn't pickled (safe but slower on round-trip) and it may also make sense to allow setting it explicitly. Co-authored-by: bonpyt --- benchmarks/benchmarks/bench_io.py | 26 +++++++++++++++ doc/release/upcoming_changes/29270.change.rst | 13 ++++++++ numpy/_core/include/numpy/ndarrayobject.h | 3 ++ numpy/_core/include/numpy/ndarraytypes.h | 5 ++- numpy/_core/src/multiarray/convert_datatype.c | 17 +++++++++- numpy/_core/src/multiarray/descriptor.c | 32 ++++++++++++++++--- 6 files changed, 90 insertions(+), 6 deletions(-) create mode 100644 doc/release/upcoming_changes/29270.change.rst diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index eea4a4ed4309..c672b0ac20dc 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -60,6 +60,32 @@ def time_copyto_8_dense(self): np.copyto(self.d, self.e, where=self.im8) +class CopyStructured(Benchmark): + params = [ + [10, 100, 1000], + [ + # Contiguous layout (trivially copyable, uses memcpy) + [('a', 'f8'), ('b', 'f8'), ('c', 'f8')], + # Many small fields (trivially copyable, uses memcpy) + [('x', 'i4'), ('y', 'i4'), ('z', 'i4'), + ('w', 'i4'), ('v', 'i4')], + ], + ] + param_names = ['n_kilo_elements', 'dtype'] + + def setup(self, n_kilo, dtype): + n = n_kilo * 1000 + self.dt = np.dtype(dtype) + self.src = np.ones(n, dtype=self.dt) + self.dst = np.empty(n, dtype=self.dt) + + def time_copy(self, n_kilo, dtype): + self.src.copy() + + def time_assign(self, n_kilo, dtype): + self.dst[...] = self.src + + class Savez(Benchmark): def setup(self): self.squares = get_squares() diff --git a/doc/release/upcoming_changes/29270.change.rst b/doc/release/upcoming_changes/29270.change.rst new file mode 100644 index 000000000000..1506384aa1e6 --- /dev/null +++ b/doc/release/upcoming_changes/29270.change.rst @@ -0,0 +1,13 @@ +Structured array copies now use ``memcpy`` for contiguous dtypes +---------------------------------------------------------------- +Copying structured arrays with identical dtypes now uses ``memcpy`` instead of +field-by-field transfer when the dtype has a contiguous layout (no gaps between +fields). A new ``NPY_NOT_TRIVIALLY_COPYABLE`` dtype flag is set on structured +dtypes that have gaps in their memory layout, such as those created with +explicit ``offsets`` or via multi-field indexing. Only these dtypes continue to +use the slower field-by-field copy. + +This means that padding bytes in contiguous structured dtypes (e.g. those +created without explicit ``offsets``) may now be copied as part of the +``memcpy``, whereas previously they were left untouched. Code that relies on +padding bytes being preserved during structured array copies may be affected. diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index 9cc1a4c1d000..e2974ad00e04 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -253,6 +253,9 @@ PyArray_ITEMSIZE(const PyArrayObject *arr) #define PyDataType_REFCHK(dtype) \ PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) +#define PyDataType_ISTRIVIALLYCOPYABLE(dtype) \ + (!(PyDataType_FLAGS(dtype) & (NPY_NOT_TRIVIALLY_COPYABLE | NPY_ITEM_REFCOUNT))) + #define NPY_BEGIN_THREADS_DESCR(dtype) \ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ NPY_BEGIN_THREADS;} while (0); diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 7aa23dd3426b..7af26ce3a07e 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -596,13 +596,16 @@ typedef struct { #define NPY_USE_SETITEM 0x40 /* A sticky flag specifically for structured arrays */ #define NPY_ALIGNED_STRUCT 0x80 +/* Structured dtype has non-contiguous field layout */ +#define NPY_NOT_TRIVIALLY_COPYABLE 0x100 /* *These are inherited for global data-type if any data-types in the * field have them */ #define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI | \ + NPY_NOT_TRIVIALLY_COPYABLE) #define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index f404faab022a..90222642f599 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -3281,7 +3281,22 @@ void_to_void_get_loop( { if (PyDataType_NAMES(context->descriptors[0]) != NULL || PyDataType_NAMES(context->descriptors[1]) != NULL) { - if (get_fields_transfer_function( + /* + * Fast path: if dtypes are equivalent and the destination is + * trivially copyable, use memcpy instead of field-by-field transfer. + */ + if ((context->descriptors[0] == context->descriptors[1] || + PyArray_EquivTypes(context->descriptors[0], context->descriptors[1])) && + PyDataType_ISTRIVIALLYCOPYABLE(context->descriptors[1])) { + if (PyArray_GetStridedZeroPadCopyFn( + 0, 0, strides[0], strides[1], + context->descriptors[0]->elsize, context->descriptors[1]->elsize, + out_loop, out_transferdata) == NPY_FAIL) { + return -1; + } + *flags = PyArrayMethod_MINIMAL_FLAGS; + } + else if (get_fields_transfer_function( aligned, strides[0], strides[1], context->descriptors[0], context->descriptors[1], move_references, out_loop, out_transferdata, diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a347ff4cca52..a45206e3b5ef 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -408,7 +408,7 @@ _convert_from_array_descr(PyObject *obj, int align) } /* Types with fields need the Python C API for field access */ - char dtypeflags = NPY_NEEDS_PYAPI; + npy_uint64 dtypeflags = NPY_NEEDS_PYAPI; int maxalign = 1; int totalsize = 0; PyObject *fields = PyDict_New(); @@ -631,7 +631,7 @@ _convert_from_list(PyObject *obj, int align) } /* Types with fields need the Python C API for field access */ - char dtypeflags = NPY_NEEDS_PYAPI; + npy_uint64 dtypeflags = NPY_NEEDS_PYAPI; int maxalign = 1; int totalsize = 0; for (int i = 0; i < n; i++) { @@ -1100,7 +1100,7 @@ _convert_from_dict(PyObject *obj, int align) } /* Types with fields need the Python C API for field access */ - char dtypeflags = NPY_NEEDS_PYAPI; + npy_uint64 dtypeflags = NPY_NEEDS_PYAPI; int totalsize = 0; int maxalign = 1; int has_out_of_order_fields = 0; @@ -1319,6 +1319,16 @@ _convert_from_dict(PyObject *obj, int align) new->elsize = itemsize; } + /* + * Mark as not trivially copyable only if explicit offsets were provided + * and the layout has holes. Alignment padding (without explicit offsets) + * is safe to overwrite with memcpy. + */ + if (offsets != NULL && + !is_dtype_struct_simple_unaligned_layout((PyArray_Descr *)new)) { + new->flags |= NPY_NOT_TRIVIALLY_COPYABLE; + } + /* Add the metadata if provided */ PyObject *metadata = PyMapping_GetItemString(obj, "metadata"); @@ -2817,7 +2827,8 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) } PyTuple_SET_ITEM(state, 5, PyLong_FromLong(elsize)); PyTuple_SET_ITEM(state, 6, PyLong_FromLong(alignment)); - PyTuple_SET_ITEM(state, 7, PyLong_FromUnsignedLongLong(self->flags)); + PyTuple_SET_ITEM(state, 7, PyLong_FromUnsignedLongLong( + self->flags & ~NPY_NOT_TRIVIALLY_COPYABLE)); PyTuple_SET_ITEM(ret, 2, state); return ret; @@ -3187,6 +3198,15 @@ arraydescr_setstate(_PyArray_LegacyDescr *self, PyObject *args) self->flags = _descr_find_object((PyArray_Descr *)self); } + /* + * Mark as not trivially copyable if layout is not simple (has padding). + * This flag is always recomputed on unpickle (not stored in pickle). + */ + if (PyDataType_HASFIELDS((PyArray_Descr *)self) && + !is_dtype_struct_simple_unaligned_layout((PyArray_Descr *)self)) { + self->flags |= NPY_NOT_TRIVIALLY_COPYABLE; + } + PyObject *old_metadata, *new_metadata; if (PyDataType_ISDATETIME(self)) { PyArray_DatetimeMetaData temp_dt_data; @@ -3755,6 +3775,10 @@ arraydescr_field_subset_view(_PyArray_LegacyDescr *self, PyObject *ind) view_dtype->names = names; view_dtype->fields = fields; view_dtype->flags = self->flags; + /* Mark as not trivially copyable if layout is not simple (has padding) */ + if (!is_dtype_struct_simple_unaligned_layout((PyArray_Descr *)view_dtype)) { + view_dtype->flags |= NPY_NOT_TRIVIALLY_COPYABLE; + } return (PyArray_Descr *)view_dtype; fail: From 425f631926c3ae6c817774b4e079ffdf207e3907 Mon Sep 17 00:00:00 2001 From: Igor Krivenko Date: Tue, 28 Apr 2026 17:47:17 +0200 Subject: [PATCH 1685/1718] BUG: np.einsum() fails with a 0-dimensional out argument and optimize='optimal' (#31354) It replaces the expression out[:] found in bmm_einsum() with the more universal out[...], which is also valid for a 0-dimensional out. TestEinsum.test_einsum_misc() in numpy/_core/tests/test_einsum.py has been extended. Co-authored-by: Sebastian Berg --- numpy/_core/einsumfunc.py | 2 +- numpy/_core/tests/test_einsum.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 7b19808a94f2..45d6a6e40166 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -1223,7 +1223,7 @@ def bmm_einsum(eq, a, b, out=None, **kwargs): if (out is not None) and (not matmul_out_compatible): # handle case where out is specified, but we also needed # to reshape / transpose ``ab`` after the matmul - out[:] = ab + out[...] = ab ab = out elif output_order is not None: ab = asanyarray(ab, order=output_order) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 6f4d38b8a97a..d7893f7a7821 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -762,6 +762,14 @@ def test_einsum_misc(self): # see issue gh-15776 and issue gh-15256 assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + def test_einsum_0d_out(self): + # Issue gh-31350, a zero-dimensional out must not cause an error + # with optimize='optimal' + a = np.ones(7) + out = np.array(0) + np.einsum('i,i->', a, a, out=out, optimize='optimal') + assert_equal(out, 7) + def test_object_loop(self): class Mult: From 6ec400a4683e5be68569e9b6a2203397df6893ad Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 28 Apr 2026 22:40:15 +0200 Subject: [PATCH 1686/1718] MAINT: Simplify C++ sorting tags (#31352) --- numpy/_core/src/common/numpy_tag.h | 431 +++++++++++------------ numpy/_core/src/npysort/npysort_common.h | 372 +------------------ 2 files changed, 209 insertions(+), 594 deletions(-) diff --git a/numpy/_core/src/common/numpy_tag.h b/numpy/_core/src/common/numpy_tag.h index ee0c36cacd73..8ff4b1069ed5 100644 --- a/numpy/_core/src/common/numpy_tag.h +++ b/numpy/_core/src/common/numpy_tag.h @@ -1,259 +1,224 @@ -#ifndef _NPY_COMMON_TAG_H_ -#define _NPY_COMMON_TAG_H_ +#ifndef NUMPY_CORE_SRC_COMMON_NUMPY_TAG_H_ +#define NUMPY_CORE_SRC_COMMON_NUMPY_TAG_H_ -#include "../npysort/npysort_common.h" +#include "numpy/ndarraytypes.h" +#include "numpy/npy_common.h" +#include "numpy/npy_math.h" + +#include +#include + +/* + * Per-dtype tags shared by the sort/partition/binsearch and clip + * implementations. + * + * Each tag exposes: + * + * - ``type`` -- the underlying C scalar type + * - ``type_value`` -- the corresponding ``NPY_TYPES`` enumerator + * - ``less`` / ``less_equal`` -- the sort-friendly comparisons that + * propagate NaN / NaT to the high end + * + * For the four numeric categories that need different NaN/NaT handling, + * comparisons are implemented once at the ``*_type`` + * template level (with ``if constexpr`` for the only per-scalar variation + * -- the three real/imag accessors used by the complex types). Each + * such template inherits from a tiny empty marker (``integral_tag``, + * ``floating_point_tag``, ``complex_tag``, ``date_tag``) so that + * ``clip.cpp`` can dispatch via ordinary overload resolution. + * + * Concrete tags (``bool_tag``, ``float_tag``, ...) are then plain aliases + * for an ``*_type`` instantiation. + * + * Distinct tag types matter because several of NumPy's scalar types alias + * one another at the C level -- most notably ``npy_half`` is a typedef for + * ``npy_uint16`` -- so the extra ``NPY_TYPES`` template argument is what + * keeps ``half_tag`` and ``ushort_tag`` apart. + */ namespace npy { -template -struct taglist { - static constexpr unsigned size = sizeof...(tags); -}; +// Category markers used by clip's overload-set dispatch. ``half_tag`` is +// its own marker (and the only half tag) so it does not need one here. +struct integral_tag {}; +struct floating_point_tag {}; +struct complex_tag {}; +struct date_tag {}; -struct integral_tag { -}; -struct floating_point_tag { -}; -struct complex_tag { -}; -struct date_tag { +template +struct integral_type : integral_tag { + using type = T; + static constexpr NPY_TYPES type_value = TypeNum; + static int less(T a, T b) { return a < b; } + static int less_equal(T a, T b) { return !(b < a); } }; -struct bool_tag : integral_tag { - using type = npy_bool; - static constexpr NPY_TYPES type_value = NPY_BOOL; - static int less(type const& a, type const& b) { - return BOOL_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct byte_tag : integral_tag { - using type = npy_byte; - static constexpr NPY_TYPES type_value = NPY_BYTE; - static int less(type const& a, type const& b) { - return BYTE_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct ubyte_tag : integral_tag { - using type = npy_ubyte; - static constexpr NPY_TYPES type_value = NPY_UBYTE; - static int less(type const& a, type const& b) { - return UBYTE_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct short_tag : integral_tag { - using type = npy_short; - static constexpr NPY_TYPES type_value = NPY_SHORT; - static int less(type const& a, type const& b) { - return SHORT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct ushort_tag : integral_tag { - using type = npy_ushort; - static constexpr NPY_TYPES type_value = NPY_USHORT; - static int less(type const& a, type const& b) { - return USHORT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct int_tag : integral_tag { - using type = npy_int; - static constexpr NPY_TYPES type_value = NPY_INT; - static int less(type const& a, type const& b) { - return INT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct uint_tag : integral_tag { - using type = npy_uint; - static constexpr NPY_TYPES type_value = NPY_UINT; - static int less(type const& a, type const& b) { - return UINT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct long_tag : integral_tag { - using type = npy_long; - static constexpr NPY_TYPES type_value = NPY_LONG; - static int less(type const& a, type const& b) { - return LONG_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct ulong_tag : integral_tag { - using type = npy_ulong; - static constexpr NPY_TYPES type_value = NPY_ULONG; - static int less(type const& a, type const& b) { - return ULONG_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct longlong_tag : integral_tag { - using type = npy_longlong; - static constexpr NPY_TYPES type_value = NPY_LONGLONG; - static int less(type const& a, type const& b) { - return LONGLONG_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct ulonglong_tag : integral_tag { - using type = npy_ulonglong; - static constexpr NPY_TYPES type_value = NPY_ULONGLONG; - static int less(type const& a, type const& b) { - return ULONGLONG_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } +template +struct floating_point_type : floating_point_tag { + using type = T; + static constexpr NPY_TYPES type_value = TypeNum; + // NaN sorts to the end: a is "less than" b if a is non-NaN and + // either a < b or b is NaN. ``x != x`` is the IEEE NaN test. + static int less(T a, T b) { return a < b || (b != b && a == a); } + static int less_equal(T a, T b) { return !less(b, a); } }; + +// Half is its own per-type tag; no template since there is only one half +// scalar. It also serves directly as its category marker for clip. struct half_tag { using type = npy_half; static constexpr NPY_TYPES type_value = NPY_HALF; - static int less(type const& a, type const& b) { - return HALF_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct float_tag : floating_point_tag { - using type = npy_float; - static constexpr NPY_TYPES type_value = NPY_FLOAT; - static int less(type const& a, type const& b) { - return FLOAT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct double_tag : floating_point_tag { - using type = npy_double; - static constexpr NPY_TYPES type_value = NPY_DOUBLE; - static int less(type const& a, type const& b) { - return DOUBLE_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct longdouble_tag : floating_point_tag { - using type = npy_longdouble; - static constexpr NPY_TYPES type_value = NPY_LONGDOUBLE; - static int less(type const& a, type const& b) { - return LONGDOUBLE_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct cfloat_tag : complex_tag { - using type = npy_cfloat; - static constexpr NPY_TYPES type_value = NPY_CFLOAT; - static int less(type const& a, type const& b) { - return CFLOAT_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } -}; -struct cdouble_tag : complex_tag { - using type = npy_cdouble; - static constexpr NPY_TYPES type_value = NPY_CDOUBLE; - static int less(type const& a, type const& b) { - return CDOUBLE_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); + + static int isnan(npy_half h) + { + return ((h & 0x7c00u) == 0x7c00u) && ((h & 0x03ffu) != 0x0000u); } -}; -struct clongdouble_tag : complex_tag { - using type = npy_clongdouble; - static constexpr NPY_TYPES type_value = NPY_CLONGDOUBLE; - static int less(type const& a, type const& b) { - return CLONGDOUBLE_LT(a, b); + + // Bit-level less-than that assumes neither operand is NaN. + static int lt_nonan(npy_half a, npy_half b) + { + if (a & 0x8000u) { + if (b & 0x8000u) { + return (a & 0x7fffu) > (b & 0x7fffu); + } + // Signed zeros compare equal. + return (a != 0x8000u) || (b != 0x0000u); + } + if (b & 0x8000u) { + return 0; + } + return (a & 0x7fffu) < (b & 0x7fffu); } - static int less_equal(type const& a, type const& b) { - return !less(b, a); + + static int less(npy_half a, npy_half b) + { + if (isnan(b)) { + return !isnan(a); + } + return !isnan(a) && lt_nonan(a, b); } + static int less_equal(npy_half a, npy_half b) { return !less(b, a); } }; -struct datetime_tag : date_tag { - using type = npy_datetime; - static constexpr NPY_TYPES type_value = NPY_DATETIME; - static int less(type const& a, type const& b) { - return DATETIME_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); + +template +struct complex_type : complex_tag { + using type = T; + static constexpr NPY_TYPES type_value = TypeNum; + + // Real / imag accessors picked at compile time so ``less`` can be + // written generically across the three complex scalar types. + static auto creal(T z) + { + if constexpr (std::is_same_v) return npy_crealf(z); + else if constexpr (std::is_same_v) return npy_creal(z); + else return npy_creall(z); + } + static auto cimag(T z) + { + if constexpr (std::is_same_v) return npy_cimagf(z); + else if constexpr (std::is_same_v) return npy_cimag(z); + else return npy_cimagl(z); } + + static int less(T a, T b) + { + const auto ra = creal(a), rb = creal(b); + const auto ia = cimag(a), ib = cimag(b); + if (ra < rb) { + return ia == ia || ib != ib; + } + if (ra > rb) { + return ib != ib && ia == ia; + } + if (ra == rb || (ra != ra && rb != rb)) { + return ia < ib || (ib != ib && ia == ia); + } + return rb != rb; + } + static int less_equal(T a, T b) { return !less(b, a); } }; -struct timedelta_tag : date_tag { - using type = npy_timedelta; - static constexpr NPY_TYPES type_value = NPY_TIMEDELTA; - static int less(type const& a, type const& b) { - return TIMEDELTA_LT(a, b); - } - static int less_equal(type const& a, type const& b) { - return !less(b, a); - } + +template +struct datetime_type : date_tag { + using type = T; + static constexpr NPY_TYPES type_value = TypeNum; + static int less(T a, T b) + { + if (a == NPY_DATETIME_NAT) return 0; + if (b == NPY_DATETIME_NAT) return 1; + return a < b; + } + static int less_equal(T a, T b) { return !less(b, a); } }; -struct string_tag { - using type = npy_char; - static constexpr NPY_TYPES type_value = NPY_STRING; - static int less(type const* a, type const* b, size_t len) { - return STRING_LT(a, b, len); - } - static int less_equal(type const* a, type const* b, size_t len) { - return !less(b, a, len); - } - static void swap(type* a, type* b, size_t len) { - STRING_SWAP(a, b, len); - } - static void copy(type * a, type const* b, size_t len) { - STRING_COPY(a, b, len); +// String / unicode tags work on runtime-length blocks. Comparison is +// unsigned (matches strcmp/wcscmp ordering) regardless of whether ``T`` is +// a signed ``char`` on the platform. +template +struct string_like_type { + using type = T; + static constexpr NPY_TYPES type_value = TypeNum; + + static int less(T const *a, T const *b, size_t n) + { + using U = std::make_unsigned_t; + const auto *ua = reinterpret_cast(a); + const auto *ub = reinterpret_cast(b); + for (size_t i = 0; i < n; ++i) { + if (ua[i] != ub[i]) { + return ua[i] < ub[i]; + } + } + return 0; + } + static int less_equal(T const *a, T const *b, size_t n) + { + return !less(b, a, n); + } + static void swap(T *a, T *b, size_t n) + { + for (size_t i = 0; i < n; ++i) { + T t = a[i]; + a[i] = b[i]; + b[i] = t; + } + } + static void copy(T *a, T const *b, size_t n) + { + std::memcpy(a, b, n * sizeof(T)); } }; -struct unicode_tag { - using type = npy_ucs4; - static constexpr NPY_TYPES type_value = NPY_UNICODE; - static int less(type const* a, type const* b, size_t len) { - return UNICODE_LT(a, b, len); - } - static int less_equal(type const* a, type const* b, size_t len) { - return !less(b, a, len); - } - static void swap(type* a, type* b, size_t len) { - UNICODE_SWAP(a, b, len); - } - static void copy(type * a, type const* b, size_t len) { - UNICODE_COPY(a, b, len); - } +// Concrete tags consumed by callers. +using bool_tag = integral_type; +using byte_tag = integral_type; +using ubyte_tag = integral_type; +using short_tag = integral_type; +using ushort_tag = integral_type; +using int_tag = integral_type; +using uint_tag = integral_type; +using long_tag = integral_type; +using ulong_tag = integral_type; +using longlong_tag = integral_type; +using ulonglong_tag = integral_type; +using float_tag = floating_point_type; +using double_tag = floating_point_type; +using longdouble_tag = floating_point_type; +using cfloat_tag = complex_type; +using cdouble_tag = complex_type; +using clongdouble_tag = complex_type; +using datetime_tag = datetime_type; +using timedelta_tag = datetime_type; +using string_tag = string_like_type; +using unicode_tag = string_like_type; + +// Type-list helper used by selection.cpp / binsearch.cpp to instantiate one +// function per supported tag. +template +struct taglist { + static constexpr unsigned size = sizeof...(Tags); }; } // namespace npy -#endif +#endif // NUMPY_CORE_SRC_COMMON_NUMPY_TAG_H_ diff --git a/numpy/_core/src/npysort/npysort_common.h b/numpy/_core/src/npysort/npysort_common.h index f2b99e3b7f66..f49ca4af8799 100644 --- a/numpy/_core/src/npysort/npysort_common.h +++ b/numpy/_core/src/npysort/npysort_common.h @@ -1,9 +1,7 @@ -#ifndef __NPY_SORT_COMMON_H__ -#define __NPY_SORT_COMMON_H__ +#ifndef NUMPY_CORE_SRC_NPYSORT_NPYSORT_COMMON_H_ +#define NUMPY_CORE_SRC_NPYSORT_NPYSORT_COMMON_H_ #include -#include -#include #include "dtypemeta.h" #ifdef __cplusplus @@ -11,40 +9,14 @@ extern "C" { #endif /* - ***************************************************************************** - ** SWAP MACROS ** - ***************************************************************************** + * Shared helpers used by the per-dtype sort implementations. Per-dtype + * less-than comparisons live on the tags in ``numpy_tag.h``; this header + * only carries the small handful of helpers used by the generic (cmp- + * function-driven) sort code and by the argsort variants. */ -#define BOOL_SWAP(a,b) {npy_bool tmp = (b); (b)=(a); (a) = tmp;} -#define BYTE_SWAP(a,b) {npy_byte tmp = (b); (b)=(a); (a) = tmp;} -#define UBYTE_SWAP(a,b) {npy_ubyte tmp = (b); (b)=(a); (a) = tmp;} -#define SHORT_SWAP(a,b) {npy_short tmp = (b); (b)=(a); (a) = tmp;} -#define USHORT_SWAP(a,b) {npy_ushort tmp = (b); (b)=(a); (a) = tmp;} -#define INT_SWAP(a,b) {npy_int tmp = (b); (b)=(a); (a) = tmp;} -#define UINT_SWAP(a,b) {npy_uint tmp = (b); (b)=(a); (a) = tmp;} -#define LONG_SWAP(a,b) {npy_long tmp = (b); (b)=(a); (a) = tmp;} -#define ULONG_SWAP(a,b) {npy_ulong tmp = (b); (b)=(a); (a) = tmp;} -#define LONGLONG_SWAP(a,b) {npy_longlong tmp = (b); (b)=(a); (a) = tmp;} -#define ULONGLONG_SWAP(a,b) {npy_ulonglong tmp = (b); (b)=(a); (a) = tmp;} -#define HALF_SWAP(a,b) {npy_half tmp = (b); (b)=(a); (a) = tmp;} -#define FLOAT_SWAP(a,b) {npy_float tmp = (b); (b)=(a); (a) = tmp;} -#define DOUBLE_SWAP(a,b) {npy_double tmp = (b); (b)=(a); (a) = tmp;} -#define LONGDOUBLE_SWAP(a,b) {npy_longdouble tmp = (b); (b)=(a); (a) = tmp;} -#define CFLOAT_SWAP(a,b) {npy_cfloat tmp = (b); (b)=(a); (a) = tmp;} -#define CDOUBLE_SWAP(a,b) {npy_cdouble tmp = (b); (b)=(a); (a) = tmp;} -#define CLONGDOUBLE_SWAP(a,b) {npy_clongdouble tmp = (b); (b)=(a); (a) = tmp;} -#define DATETIME_SWAP(a,b) {npy_datetime tmp = (b); (b)=(a); (a) = tmp;} -#define TIMEDELTA_SWAP(a,b) {npy_timedelta tmp = (b); (b)=(a); (a) = tmp;} - -/* Need this for the argsort functions */ -#define INTP_SWAP(a,b) {npy_intp tmp = (b); (b)=(a); (a) = tmp;} - -/* - ****************************************************************************** - ** SORTING WRAPPERS ** - ****************************************************************************** - */ +/* Argsort works on indices, swap macro for npy_intp. */ +#define INTP_SWAP(a, b) {npy_intp tmp = (b); (b) = (a); (a) = tmp;} static inline void get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp) @@ -54,339 +26,17 @@ get_sort_data_from_array(void *varr, npy_intp *elsize, PyArray_CompareFunc **cmp *cmp = PyDataType_GetArrFuncs(PyArray_DESCR(arr))->compare; } -/* - ***************************************************************************** - ** COMPARISON FUNCTIONS ** - ***************************************************************************** - */ - -static inline int -BOOL_LT(npy_bool a, npy_bool b) -{ - return a < b; -} - - -static inline int -BYTE_LT(npy_byte a, npy_byte b) -{ - return a < b; -} - - -static inline int -UBYTE_LT(npy_ubyte a, npy_ubyte b) -{ - return a < b; -} - - -static inline int -SHORT_LT(npy_short a, npy_short b) -{ - return a < b; -} - - -static inline int -USHORT_LT(npy_ushort a, npy_ushort b) -{ - return a < b; -} - - -static inline int -INT_LT(npy_int a, npy_int b) -{ - return a < b; -} - - -static inline int -UINT_LT(npy_uint a, npy_uint b) -{ - return a < b; -} - - -static inline int -LONG_LT(npy_long a, npy_long b) -{ - return a < b; -} - - -static inline int -ULONG_LT(npy_ulong a, npy_ulong b) -{ - return a < b; -} - - -static inline int -LONGLONG_LT(npy_longlong a, npy_longlong b) -{ - return a < b; -} - - -static inline int -ULONGLONG_LT(npy_ulonglong a, npy_ulonglong b) -{ - return a < b; -} - - -static inline int -FLOAT_LT(npy_float a, npy_float b) -{ - return a < b || (b != b && a == a); -} - - -static inline int -DOUBLE_LT(npy_double a, npy_double b) -{ - return a < b || (b != b && a == a); -} - - -static inline int -LONGDOUBLE_LT(npy_longdouble a, npy_longdouble b) -{ - return a < b || (b != b && a == a); -} - - -static inline int -_npy_half_isnan(npy_half h) -{ - return ((h&0x7c00u) == 0x7c00u) && ((h&0x03ffu) != 0x0000u); -} - - -static inline int -_npy_half_lt_nonan(npy_half h1, npy_half h2) -{ - if (h1&0x8000u) { - if (h2&0x8000u) { - return (h1&0x7fffu) > (h2&0x7fffu); - } - else { - /* Signed zeros are equal, have to check for it */ - return (h1 != 0x8000u) || (h2 != 0x0000u); - } - } - else { - if (h2&0x8000u) { - return 0; - } - else { - return (h1&0x7fffu) < (h2&0x7fffu); - } - } -} - - -static inline int -HALF_LT(npy_half a, npy_half b) -{ - int ret; - - if (_npy_half_isnan(b)) { - ret = !_npy_half_isnan(a); - } - else { - ret = !_npy_half_isnan(a) && _npy_half_lt_nonan(a, b); - } - - return ret; -} - -/* - * For inline functions SUN recommends not using a return in the then part - * of an if statement. It's a SUN compiler thing, so assign the return value - * to a variable instead. - */ -static inline int -CFLOAT_LT(npy_cfloat a, npy_cfloat b) -{ - int ret; - - if (npy_crealf(a) < npy_crealf(b)) { - ret = npy_cimagf(a) == npy_cimagf(a) || npy_cimagf(b) != npy_cimagf(b); - } - else if (npy_crealf(a) > npy_crealf(b)) { - ret = npy_cimagf(b) != npy_cimagf(b) && npy_cimagf(a) == npy_cimagf(a); - } - else if (npy_crealf(a) == npy_crealf(b) || (npy_crealf(a) != npy_crealf(a) && npy_crealf(b) != npy_crealf(b))) { - ret = npy_cimagf(a) < npy_cimagf(b) || (npy_cimagf(b) != npy_cimagf(b) && npy_cimagf(a) == npy_cimagf(a)); - } - else { - ret = npy_crealf(b) != npy_crealf(b); - } - - return ret; -} - - -static inline int -CDOUBLE_LT(npy_cdouble a, npy_cdouble b) -{ - int ret; - - if (npy_creal(a) < npy_creal(b)) { - ret = npy_cimag(a) == npy_cimag(a) || npy_cimag(b) != npy_cimag(b); - } - else if (npy_creal(a) > npy_creal(b)) { - ret = npy_cimag(b) != npy_cimag(b) && npy_cimag(a) == npy_cimag(a); - } - else if (npy_creal(a) == npy_creal(b) || (npy_creal(a) != npy_creal(a) && npy_creal(b) != npy_creal(b))) { - ret = npy_cimag(a) < npy_cimag(b) || (npy_cimag(b) != npy_cimag(b) && npy_cimag(a) == npy_cimag(a)); - } - else { - ret = npy_creal(b) != npy_creal(b); - } - - return ret; -} - - -static inline int -CLONGDOUBLE_LT(npy_clongdouble a, npy_clongdouble b) -{ - int ret; - - if (npy_creall(a) < npy_creall(b)) { - ret = npy_cimagl(a) == npy_cimagl(a) || npy_cimagl(b) != npy_cimagl(b); - } - else if (npy_creall(a) > npy_creall(b)) { - ret = npy_cimagl(b) != npy_cimagl(b) && npy_cimagl(a) == npy_cimagl(a); - } - else if (npy_creall(a) == npy_creall(b) || (npy_creall(a) != npy_creall(a) && npy_creall(b) != npy_creall(b))) { - ret = npy_cimagl(a) < npy_cimagl(b) || (npy_cimagl(b) != npy_cimagl(b) && npy_cimagl(a) == npy_cimagl(a)); - } - else { - ret = npy_creall(b) != npy_creall(b); - } - - return ret; -} - - -static inline void -STRING_COPY(char *s1, char const*s2, size_t len) -{ - memcpy(s1, s2, len); -} - - -static inline void -STRING_SWAP(char *s1, char *s2, size_t len) -{ - while(len--) { - const char t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -static inline int -STRING_LT(const char *s1, const char *s2, size_t len) -{ - const unsigned char *c1 = (const unsigned char *)s1; - const unsigned char *c2 = (const unsigned char *)s2; - size_t i; - int ret = 0; - - for (i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - ret = c1[i] < c2[i]; - break; - } - } - return ret; -} - - -static inline void -UNICODE_COPY(npy_ucs4 *s1, npy_ucs4 const *s2, size_t len) -{ - while(len--) { - *s1++ = *s2++; - } -} - - -static inline void -UNICODE_SWAP(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) -{ - while(len--) { - const npy_ucs4 t = *s1; - *s1++ = *s2; - *s2++ = t; - } -} - - -static inline int -UNICODE_LT(const npy_ucs4 *s1, const npy_ucs4 *s2, size_t len) -{ - size_t i; - int ret = 0; - - for (i = 0; i < len; ++i) { - if (s1[i] != s2[i]) { - ret = s1[i] < s2[i]; - break; - } - } - return ret; -} - - -static inline int -DATETIME_LT(npy_datetime a, npy_datetime b) -{ - if (a == NPY_DATETIME_NAT) { - return 0; - } - - if (b == NPY_DATETIME_NAT) { - return 1; - } - - return a < b; -} - - -static inline int -TIMEDELTA_LT(npy_timedelta a, npy_timedelta b) -{ - if (a == NPY_DATETIME_NAT) { - return 0; - } - - if (b == NPY_DATETIME_NAT) { - return 1; - } - - return a < b; -} - - +/* Element copy / swap for the generic, comparison-function-driven sort. */ static inline void GENERIC_COPY(char *a, char *b, size_t len) { memcpy(a, b, len); } - static inline void GENERIC_SWAP(char *a, char *b, size_t len) { - while(len--) { + while (len--) { const char t = *a; *a++ = *b; *b++ = t; @@ -397,4 +47,4 @@ GENERIC_SWAP(char *a, char *b, size_t len) } #endif -#endif +#endif /* NUMPY_CORE_SRC_NPYSORT_NPYSORT_COMMON_H_ */ From a2d43cb44e858ce51d92cffe3b3f7886932ef6fd Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Wed, 29 Apr 2026 08:41:23 +0200 Subject: [PATCH 1687/1718] ENH: fast path for full contiguous reductions (#31274) Adds a fast path in PyUFunc_Reduce for the common case of a full reduction (axis=None) over a trivially-iterable, aligned, non-object/reference input where the matching dtype equals the input dtype. Co-authored-by: Nathan Goldbaum Co-authored-by: Sebastian Berg --- benchmarks/benchmarks/bench_reduce.py | 24 ++- .../upcoming_changes/31274.performance.rst | 8 + numpy/_core/src/umath/ufunc_object.c | 143 +++++++++++++++--- 3 files changed, 155 insertions(+), 20 deletions(-) create mode 100644 doc/release/upcoming_changes/31274.performance.rst diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py index 1d78e1bba03a..0bd0ef669aa3 100644 --- a/benchmarks/benchmarks/bench_reduce.py +++ b/benchmarks/benchmarks/bench_reduce.py @@ -112,8 +112,26 @@ def time_argmin(self, dtype): class SmallReduction(Benchmark): - def setup(self): - self.d = np.ones(100, dtype=np.float32) + params = [[4, 100]] + param_names = ['size'] + + def setup(self, size): + self.d = np.ones(size, dtype=np.float32) + self.b = np.ones(size, dtype=bool) - def time_small(self): + def time_sum(self, size): np.sum(self.d) + + def time_any(self, size): + np.any(self.b) + + def time_max(self, size): + np.max(self.d) + + +class SmallReduction2D(Benchmark): + def setup(self): + self.d = np.ones((4, 4), dtype=np.float32) + + def time_sum_axis_1(self): + np.sum(self.d, axis=1) diff --git a/doc/release/upcoming_changes/31274.performance.rst b/doc/release/upcoming_changes/31274.performance.rst new file mode 100644 index 000000000000..9fc1009e78c0 --- /dev/null +++ b/doc/release/upcoming_changes/31274.performance.rst @@ -0,0 +1,8 @@ +Faster reductions on small/medium contiguous arrays +--------------------------------------------------- +`numpy.sum`, `numpy.prod`, `numpy.any`, `numpy.all`, and other reductions +with an identity value now use a fast path when the input is a contiguous, +aligned, non-object array and the reduction covers all axes +(``axis=None``) with no special arguments. Typical speedup is ~1.3x on small +arrays; `numpy.any` / `numpy.all` on contiguous boolean arrays can see speedup +up to 1.9x. diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 7bc524bd6040..c5f9b5a6a7fb 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -2519,22 +2519,123 @@ reduce_loop(PyArrayMethod_Context *context, } /* - * The implementation of the reduction operators with the new iterator - * turned into a bit of a long function here, but I think the design - * of this part needs to be changed to be more like einsum, so it may - * not be worth refactoring it too much. Consider this timing: + * Try a fast path that bypasses NpyIter / PyUFunc_ReduceWrapper for full + * reductions (axis=None) over a trivially iterable, aligned input where no + * casting is required. The strided reduce loop is called directly on the + * input buffer and writes into a freshly allocated 0-d result. * - * >>> a = arange(10000) - * - * >>> timeit sum(a) - * 10000 loops, best of 3: 17 us per loop - * - * >>> timeit einsum("i->",a) - * 100000 loops, best of 3: 13.5 us per loop - * - * The axes must already be bounds-checked by the calling function, - * this function does not validate them. + * Returns: + * 1 on success; ``*out_result`` holds the new 0-d result. + * 0 if any precondition is unmet (caller should run the slow path); + * ``*out_result`` is NULL and no error is set. + * -1 on hard error during the fast path; ``*out_result`` is NULL and + * a Python error is set. */ +static inline int +try_reduce_contiguous( + PyArrayMethod_Context *context, PyArrayObject *arr, + PyArray_Descr *const *descrs, + PyArrayObject *out, PyArrayObject *wheremask, PyObject *initial, + int ndim, int naxes, int keepdims, + int errormask, + PyArrayObject **out_result) +{ + NPY_BEGIN_THREADS_DEF; + *out_result = NULL; + + PyArrayMethodObject *ufuncimpl = context->method; + if (!(out == NULL && wheremask == NULL && initial == NULL && keepdims == 0 + && naxes == ndim + && PyArray_TRIVIALLY_ITERABLE(arr) + && PyArray_ISALIGNED(arr) + && PyArray_DESCR(arr) == descrs[1] + && descrs[0] == descrs[1] + && !PyDataType_REFCHK(descrs[0]) + && (ndim <= 1 + || (ufuncimpl->flags & NPY_METH_IS_REORDERABLE)))) { + return 0; + } + npy_intp count = PyArray_SIZE(arr); + if (count == 0) { + /* Let the slow path handle empty (it knows the proper semantics). */ + return 0; + } + + /* Allocate the 0-d result first so the loop can write into it. */ + Py_INCREF(descrs[0]); + PyArrayObject *result = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, descrs[0], 0, NULL, NULL, NULL, 0, NULL); + if (result == NULL) { + return -1; + } + char *accum = PyArray_BYTES(result); + int has_initial = 0; + if (ufuncimpl->get_reduction_initial != NULL) { + has_initial = ufuncimpl->get_reduction_initial( + context, /*reduction_is_empty=*/0, accum); + if (has_initial < 0) { + Py_DECREF(result); + return -1; + } + } + + /* + * For C/F-contiguous N-D arrays the stride is always elsize; for 1-D + * arrays we need the actual stride to handle non-contiguous slices. + */ + npy_intp arr_stride = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(count, arr); + char *src = PyArray_BYTES(arr); + if (!has_initial) { + /* + * No identity available -- seed the accumulator with arr[0] and + * reduce over arr[1:]. + */ + memcpy(accum, src, descrs[1]->elsize); + src += arr_stride; + count -= 1; + } + if (count == 0) { + /* Single-element input with no identity -- accum already holds arr[0]. */ + *out_result = result; + return 1; + } + + npy_intp strides[3] = {0, arr_stride, 0}; + PyArrayMethod_StridedLoop *strided_loop; + NpyAuxData *auxdata = NULL; + NPY_ARRAYMETHOD_FLAGS flags = 0; + if (ufuncimpl->get_strided_loop(context, /*aligned=*/1, + /*move_references=*/0, strides, + &strided_loop, &auxdata, &flags) < 0) { + Py_DECREF(result); + return -1; + } + int needs_fperr = !(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS); + if (needs_fperr) { + npy_clear_floatstatus_barrier((char *)context); + } + if (!(flags & NPY_METH_REQUIRES_PYAPI)) { + NPY_BEGIN_THREADS_THRESHOLDED(count); + } + char *data[3] = {accum, src, accum}; + int res = strided_loop(context, data, &count, strides, auxdata); + NPY_END_THREADS; + NPY_AUXDATA_FREE(auxdata); + if (res == 0 && PyErr_Occurred()) { + res = -1; + } + if (res == 0 && needs_fperr) { + res = _check_ufunc_fperr(errormask, "reduce"); + } + if (res < 0) { + Py_DECREF(result); + return -1; + } + *out_result = result; + return 1; +} + + static PyArrayObject * PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, @@ -2580,10 +2681,18 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, context.caller = (PyObject *)ufunc; context.method = ufuncimpl; - PyArrayObject *result = PyUFunc_ReduceWrapper(&context, - arr, out, wheremask, axis_flags, keepdims, - initial, reduce_loop, buffersize, ufunc_name, errormask); + PyArrayObject *result = NULL; + int fast_status = try_reduce_contiguous( + &context, arr, descrs, out, wheremask, initial, + ndim, naxes, keepdims, errormask, &result); + if (fast_status == 0) { + /* Fast path did not apply; run the full reduction. */ + result = PyUFunc_ReduceWrapper(&context, + arr, out, wheremask, axis_flags, keepdims, + initial, reduce_loop, buffersize, ufunc_name, errormask); + } + /* Fall through to shared cleanup of `descrs`. */ for (int i = 0; i < 3; i++) { Py_DECREF(descrs[i]); } From aafacc6f4fa9e00522a45a81a167533df5d58a55 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 29 Apr 2026 00:38:55 -0700 Subject: [PATCH 1688/1718] PERF: branchless calendar algorithms for datetime conversions (#31126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace `get_datetimestruct_days` (ymd→days) with Hinnant's loop-free public-domain algorithm and `set_datetimestruct_days` (days→ymd) with Neri-Schneider's branchless Euclidean Affine Function algorithm. Simplify `days_to_month_number` to delegate to the new `set_datetimestruct_days` instead of duplicating the days→year→month logic. Mirrors pandas PR #64662. Co-authored-by: Claude Opus 4.6 (1M context) --- numpy/_core/src/multiarray/datetime.c | 152 ++++++++++++++------------ numpy/_core/tests/test_datetime.py | 28 +++++ 2 files changed, 108 insertions(+), 72 deletions(-) diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 26e78f6301d2..a15ad5703658 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -116,64 +116,31 @@ is_leapyear(npy_int64 year) /* * Calculates the days offset from the 1970 epoch. + * + * Adapted from Hinnant's days_from_civil algorithm (public domain). + * See: https://howardhinnant.github.io/date_algorithms.html#days_from_civil + * + * This is the same algorithm used by C++20 std::chrono + * (year_month_day to sys_days). See libstdc++: + * gcc/libstdc++-v3/include/std/chrono (operator sys_days) + * + * The March-1 epoch trick places the leap day at the end of the year, + * eliminating special cases for February. */ NPY_NO_EXPORT npy_int64 get_datetimestruct_days(const npy_datetimestruct *dts) { - int i, month; - npy_int64 year, days = 0; - int *month_lengths; - - year = dts->year - 1970; - days = year * 365; - - /* Adjust for leap years */ - if (days >= 0) { - /* - * 1968 is the closest leap year before 1970. - * Exclude the current year, so add 1. - */ - year += 1; - /* Add one day for each 4 years */ - days += year / 4; - /* 1900 is the closest previous year divisible by 100 */ - year += 68; - /* Subtract one day for each 100 years */ - days -= year / 100; - /* 1600 is the closest previous year divisible by 400 */ - year += 300; - /* Add one day for each 400 years */ - days += year / 400; - } - else { - /* - * 1972 is the closest later year after 1970. - * Include the current year, so subtract 2. - */ - year -= 2; - /* Subtract one day for each 4 years */ - days += year / 4; - /* 2000 is the closest later year divisible by 100 */ - year -= 28; - /* Add one day for each 100 years */ - days -= year / 100; - /* 2000 is also the closest later year divisible by 400 */ - /* Subtract one day for each 400 years */ - days += year / 400; - } - - month_lengths = _days_per_month_table[is_leapyear(dts->year)]; - month = dts->month - 1; - - /* Add the months */ - for (i = 0; i < month; ++i) { - days += month_lengths[i]; - } - - /* Add the days */ - days += dts->day - 1; - - return days; + npy_int64 y = dts->year - (dts->month <= 2); + npy_int64 era = (y >= 0 ? y : y - 399) / 400; + npy_uint32 yoe = (npy_uint32)(y - era * 400); /* [0, 399] */ + npy_uint32 doy = + (153 * (npy_uint32)(dts->month > 2 ? dts->month - 3 + : dts->month + 9) + 2) / + 5 + + (npy_uint32)dts->day - 1; /* [0, 365] */ + npy_uint32 doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; /* [0, 146096] */ + + return era * (npy_int64)146097 + (npy_int64)doe - (npy_int64)719468; } /* @@ -189,6 +156,9 @@ get_datetimestruct_minutes(const npy_datetimestruct *dts) return days; } +static void +set_datetimestruct_days(npy_int64 days, npy_datetimestruct *dts); + /* * Modifies '*days_' to be the day offset within the year, * and returns the year. @@ -226,33 +196,71 @@ days_to_yearsdays(npy_int64 *days_) NPY_NO_EXPORT int days_to_month_number(npy_datetime days) { - npy_int64 year; - int *month_lengths, i; - - year = days_to_yearsdays(&days); - month_lengths = _days_per_month_table[is_leapyear(year)]; - - for (i = 0; i < 12; ++i) { - if (days < month_lengths[i]) { - return i + 1; - } - else { - days -= month_lengths[i]; - } - } - - /* Should never get here */ - return 1; + npy_datetimestruct dts; + set_datetimestruct_days(days, &dts); + return dts.month; } /* * Fills in the year, month, day in 'dts' based on the days * offset from 1970. + * + * Adapted from neri_schneider.hpp::to_date (MIT license) in: + * https://github.com/cassioneri/eaf + * SPDX-FileCopyrightText: 2022 Cassio Neri + * SPDX-FileCopyrightText: 2022 Lorenz Schneider + * + * Algorithm: Neri C, Schneider L. "Euclidean Affine Functions and their + * Application to Calendar Algorithms." Software: Practice and Experience. + * 2023;53(4):937-970. doi:10.1002/spe.3172 + * + * This is the same algorithm used by C++20 std::chrono + * (sys_days to year_month_day). See libstdc++: + * gcc/libstdc++-v3/include/std/chrono (year_month_day::_M_days_since_epoch) + * + * Falls back to the classical algorithm for dates beyond ~32K years + * before epoch or ~2.9M years after (effectively never reached in practice). */ static void set_datetimestruct_days(npy_int64 days, npy_datetimestruct *dts) { - int *month_lengths, i; + /* Neri-Schneider valid range: [-12699422, 1061042401] (~year -32800..2906945) + * s = 82, K = 719468 + 146097 * s, L = 400 * s. */ + if (days >= -12699422LL && days <= 1061042401LL) { + const npy_uint32 K = 12699422u; + const npy_uint32 L = 32800u; + + /* Rata die shift. */ + npy_uint32 N = (npy_uint32)(npy_int32)days + K; + + /* Century. */ + npy_uint32 N_1 = 4 * N + 3; + npy_uint32 C = N_1 / 146097; + npy_uint32 N_C = N_1 % 146097 / 4; + + /* Year. */ + npy_uint32 N_2 = 4 * N_C + 3; + npy_uint64 P_2 = (npy_uint64)2939745 * N_2; + npy_uint32 Z = (npy_uint32)(P_2 / 4294967296ULL); + npy_uint32 N_Y = (npy_uint32)(P_2 % 4294967296ULL) / 2939745 / 4; + + /* Month and day. */ + npy_uint32 N_3 = 2141 * N_Y + 197913; + npy_uint32 M = N_3 / 65536; + npy_uint32 D = N_3 % 65536 / 2141; + + /* Map from March-based to January-based calendar. */ + npy_uint32 J = N_Y >= 306; + npy_uint32 Y = 100 * C + Z; + dts->year = (npy_int64)((npy_int32)(Y - L) + (npy_int32)J); + dts->month = (npy_int32)(J ? M - 12 : M); + dts->day = (npy_int32)(D + 1); + return; + } + + /* Fallback for extreme dates outside Neri-Schneider range */ + const int *month_lengths; + int i; dts->year = days_to_yearsdays(&days); month_lengths = _days_per_month_table[is_leapyear(dts->year)]; diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 4825e6926179..4b57f79476fe 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2800,6 +2800,34 @@ def test_limit_str_roundtrip(self, time_unit, sign): limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit + @pytest.mark.parametrize("days,expected", [ + # Fast-path lower boundary (Neri-Schneider range starts at -12699422) + (-12699422, "-32800-03-01"), + (-12699421, "-32800-03-02"), + # Fast-path upper boundary (Neri-Schneider range ends at 1061042401) + (1061042400, "2907005-06-04"), + (1061042401, "2907005-06-05"), + # Just outside fast-path (fallback) + (-12699423, "-32800-02-29"), + (1061042402, "2907005-06-06"), + # Typical dates near epoch for sanity + (0, "1970-01-01"), + (-1, "1969-12-31"), + (1, "1970-01-02"), + # Leap year boundaries + (10957, "2000-01-01"), + (11016, "2000-02-29"), # 2000 is a leap year + (11017, "2000-03-01"), + (-25567, "1900-01-01"), # 1900 is not a leap year + ]) + def test_days_to_date_roundtrip(self, days, expected): + """Test the calendar conversion at Neri-Schneider algorithm boundaries + and typical dates, verifying both the fast-path and fallback.""" + dt = np.datetime64(days, "D") + assert str(dt) == expected + # roundtrip back + assert dt == np.datetime64(expected, "D") + def test_cast_to_truncated_string_doesnt_overflow(self): a = np.array([1, -2, 1], dtype='timedelta64[D]') assert_array_equal(a.astype('U1'), ['1', '-', '1']) From 4d213572cfb96b95c83693bbada3ebb65d7c9627 Mon Sep 17 00:00:00 2001 From: Rupesh <206439536+Rupeshhsharma@users.noreply.github.com> Date: Wed, 29 Apr 2026 13:31:42 +0530 Subject: [PATCH 1689/1718] BUG: Fix build failure with --latex-doc by filtering flag from backend args (#30331) --- numpy/f2py/f2py2e.py | 2 +- numpy/f2py/tests/test_f2py2e.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 98c2b7c65805..ba1c12e1a4ec 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -627,7 +627,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') + r'--((no-|)(wrap-functions|lower|freethreading-compatible|latex-doc)|debug-capi|quiet|skip-empty-wrappers)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 90063d474a33..c77e5578614a 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -507,6 +507,23 @@ def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "Documentation is saved to file" not in out +def test_latex_doc_gh30268(tmp_path): + + if not util.has_fortran_compiler(): + pytest.skip("No Fortran compiler found") + + fsource = textwrap.dedent(""" + subroutine foo + end + """) + + fpath = tmp_path / "test_latex.f90" + with open(fpath, "w") as f: + f.write(fsource) + + cmd = [sys.executable, "-m", "numpy.f2py", "-c", str(fpath), "-m", "test_latex", "--latex-doc"] + subprocess.check_call(cmd, cwd=tmp_path) + def test_shortlatex(capfd, hello_world_f90, monkeypatch): """Ensures that truncated documentation is written out From d4e5fafa28f4198c535e64ec18584f9ffd180d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Koronth=C3=A1ly?= <60303774+daniel-koronthaly@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:29:54 -0700 Subject: [PATCH 1690/1718] MAINT: Enable linting with ruff E501 #28947 (test_linalg.py) (#30195) Co-authored-by: Joren Hammudoglu Co-authored-by: Sebastian Berg --- numpy/linalg/tests/test_linalg.py | 50 ++++++++++++++++++++++--------- ruff.toml | 1 - 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 88096edb2ed7..e7de3c53c79e 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -157,7 +157,10 @@ def apply_tag(tag, cases): array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_2", array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + array( + [[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], + dtype=cdouble, + )), LinalgCase("0x0", np.empty((0, 0), dtype=double), np.empty((0,), dtype=double), @@ -189,28 +192,43 @@ def apply_tag(tag, cases): array([2., 1., 3.], dtype=double)), LinalgCase("csingle_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=csingle, + ), array([2. + 1j, 1. + 2j], dtype=csingle)), LinalgCase("csingle_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=csingle, + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), LinalgCase("cdouble_nsq_1", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=cdouble, + ), array([2. + 1j, 1. + 2j], dtype=cdouble)), LinalgCase("cdouble_nsq_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=cdouble, + ), array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), LinalgCase("cdouble_nsq_1_2", array( - [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], + dtype=cdouble, + ), array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), LinalgCase("cdouble_nsq_2_2", array( - [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), - array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], + dtype=cdouble, + ), + array( + [[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], + dtype=cdouble, + )), LinalgCase("8x11", np.random.rand(8, 11), np.random.rand(8)), @@ -633,8 +651,8 @@ def do(self, a, b, tags): res = linalg.eig(a) eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors assert_allclose(matmul(a, eigenvectors), - np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], - rtol=get_rtol(eigenvalues.dtype)) + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) assert_(consistent_subclass(eigenvectors, a)) @@ -735,8 +753,10 @@ def hermitian(mat): axes[-1], axes[-2] = axes[-2], axes[-1] return np.conj(np.transpose(mat, axes=axes)) - assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) - assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + expected = np.broadcast_to(np.eye(u.shape[-1]), u.shape) + assert_almost_equal(np.matmul(u, hermitian(u)), expected) + expected = np.broadcast_to(np.eye(vt.shape[-1]), vt.shape) + assert_almost_equal(np.matmul(vt, hermitian(vt)), expected) assert_equal(np.sort(s)[..., ::-1], s) assert_(consistent_subclass(u, a)) assert_(consistent_subclass(vt, a)) @@ -881,7 +901,8 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + result = dot(dot(a, a_ginv), a) + assert_almost_equal(result, a, single_decimal=5, double_decimal=11) assert_(consistent_subclass(a_ginv, a)) @@ -895,7 +916,8 @@ def do(self, a, b, tags): a_ginv = linalg.pinv(a, hermitian=True) # `a @ a_ginv == I` does not hold if a is singular dot = matmul - assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + result = dot(dot(a, a_ginv), a) + assert_almost_equal(result, a, single_decimal=5, double_decimal=11) assert_(consistent_subclass(a_ginv, a)) diff --git a/ruff.toml b/ruff.toml index 5a9f9ea3a3c6..084cab506e0e 100644 --- a/ruff.toml +++ b/ruff.toml @@ -83,7 +83,6 @@ ignore = [ "numpy/_core/_add_newdocs.py" = ["E501"] "numpy/_core/_add_newdocs_scalars.py" = ["E501"] -"numpy/linalg/tests/test_linalg.py" = ["E501"] "numpy/f2py/*py" = ["E501"] "numpy*pyi" = ["E501"] # "useless assignments" aren't so useless when you're testing that they don't make type checkers scream From fc3fff253fcece69d135ef77fcc11b9eb6c5e2b3 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Fri, 24 Apr 2026 18:49:56 +0200 Subject: [PATCH 1691/1718] MAINT: Combine the subarray stuff with the other dtype setting stuff. --- numpy/_core/src/multiarray/common.c | 147 +++++++++++++++++++-------- numpy/_core/src/multiarray/common.h | 26 ++++- numpy/_core/src/multiarray/convert.c | 34 ++----- numpy/_core/src/multiarray/ctors.c | 13 +-- numpy/_core/src/multiarray/getset.c | 50 +++------ 5 files changed, 157 insertions(+), 113 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index f753d6276bd9..6512332413a3 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -18,6 +18,7 @@ #include "get_attr_string.h" #include "mem_overlap.h" #include "array_coercion.h" +#include "alloc.h" /* for npy_alloc_cache_dim */ /* * The casting to use for implicit assignment operations resulting from @@ -371,49 +372,68 @@ _may_have_objects(PyArray_Descr *dtype) } /* - * Get a sub-array descriptor base, storing the subarray - * dimensions and strides, and updating the number of dimensions - * of the array. + * Get the number of dimensions of a subarray. If added to nd, + * it exceeds NPY_MAXDIMS, return -1 with an exception set. + */ +static inline int +_get_subarray_ndim(const PyArray_Descr *descr, const int nd) +{ + PyObject *shape = PyDataType_SUBARRAY(descr)->shape; + assert(shape); + int subnd = PyTuple_Check(shape) ? PyTuple_GET_SIZE(shape) : 1; + if (nd + subnd > NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d]", NPY_MAXDIMS); + return -1; + } + return subnd; +} + +/* + * For a sub-array descriptor, return the base descriptor, + * set newnd to nd plus the number of subarray dimensions, + * and fill newdims by copying nd items from dims and appending + * newnd-nd items from the subarray. + * If newstrides != NULL, newstrides are similarly filled. * - * Strides are only stored if needed. + * Note: caller has to ensure that descr is a subarray, and that + * newdims and newstrides are big enough (i.e., NPY_MAXDIMS if + * the new size is not yet known). */ NPY_NO_EXPORT PyArray_Descr* _get_subarray_base_and_dimensions( const PyArray_Descr *descr, const int nd, const npy_intp *dims, const npy_intp *strides, - int *new_nd, npy_intp *new_dims, npy_intp *new_strides) + int *newnd, npy_intp *newdims, npy_intp *newstrides) { - PyObject *shape = PyDataType_SUBARRAY(descr)->shape; PyArray_Descr *base = PyDataType_SUBARRAY(descr)->base; - assert(shape && base); - npy_bool tuple = PyTuple_Check(shape); - int sub_nd = tuple ? PyTuple_GET_SIZE(shape) : 1; + PyObject *shape = PyDataType_SUBARRAY(descr)->shape; + assert(base && shape); - if (nd + sub_nd > NPY_MAXDIMS) { - PyErr_Format(PyExc_ValueError, - "number of dimensions must be within [0, %d]", NPY_MAXDIMS); + int subnd = _get_subarray_ndim(descr, nd); + if (subnd < 0) { return NULL; } - *new_nd = nd + sub_nd; - memcpy(new_dims, dims, nd * sizeof(npy_intp)); - if (tuple) { - for (int i = 0; i < sub_nd; i++) { - new_dims[i+nd] = (npy_intp)PyLong_AsLong(PyTuple_GET_ITEM(shape, i)); + *newnd = nd + subnd; + memcpy(newdims, dims, nd * sizeof(npy_intp)); + if (PyTuple_Check(shape)) { + for (int i = 0; i < subnd; i++) { + newdims[nd+i] = (npy_intp)PyLong_AsLong(PyTuple_GET_ITEM(shape, i)); } } else { - new_dims[nd] = (npy_intp)PyLong_AsLong(shape); + newdims[nd] = (npy_intp)PyLong_AsLong(shape); } - - if (strides) { - memcpy(new_strides, strides, nd * sizeof(npy_intp)); + if (newstrides) { + assert(strides || nd==0); + memcpy(newstrides, strides, nd * sizeof(npy_intp)); npy_intp tempsize; /* Make new strides -- always C-contiguous */ tempsize = base->elsize; - for (int i = nd + sub_nd - 1; i >= nd; i--) { - new_strides[i] = tempsize; - tempsize *= new_dims[i] ? new_dims[i] : 1; + for (int i = nd + subnd - 1; i >= nd; i--) { + newstrides[i] = tempsize; + tempsize *= newdims[i] ? newdims[i] : 1; } } @@ -424,16 +444,22 @@ _get_subarray_base_and_dimensions( /* * Check whether self can be viewed with the given dtype. * If so, return a new reference to the dtype (possibly changed). - * If needed, also determine new dimensions and strides for the last axis. - * If no change is needed, newlastdim is set to -1. + * If needed, also determine new dimensions and strides: + * - For views, *newdims and *newstrides hold storage. If a change is + * required, copy old dims and strides and make the change. + * If no change is needed, set *newdims and *newstrides to self's versions. + * - For _set_dtype, *newdims and *newstrides are NULL. Allocate a new + * array if the number of dimensions increases (because type is a + * subarray), and otherwise use self's dims and strides, possibly + * changing the last element in-place. */ NPY_NO_EXPORT PyArray_Descr* _check_compatibility_with_new_dtype( PyArrayObject *self, PyArray_Descr *type, - npy_intp *newlastdim, npy_intp *newlaststride) + int *newnd, npy_intp **newdims, npy_intp **newstrides) { PyArray_Descr *dtype = PyArray_DESCR(self); - *newlastdim = -1; /* By default, no change needed. */ + int nd = PyArray_NDIM(self); /* Check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(dtype) || _may_have_objects(type)) { @@ -450,6 +476,31 @@ _check_compatibility_with_new_dtype( Py_DECREF(safe); } + if (PyDataType_HASSUBARRAY(type)) { + if (type->elsize != dtype->elsize) { + PyErr_SetString(PyExc_ValueError, + "Changing the dtype to a subarray type is only supported " + "if the total itemsize is unchanged"); + return NULL; + } + int subnd = _get_subarray_ndim(type, nd); + if (subnd < 0) { + return NULL; + } + if (*newdims == NULL) { + *newdims = npy_alloc_cache_dim(2 * (nd + subnd)); + if (*newdims == NULL) { + return NULL; + } + *newstrides = *newdims + nd + subnd; + } + return _get_subarray_base_and_dimensions( + type, nd, PyArray_DIMS(self), PyArray_STRIDES(self), + newnd, *newdims, *newstrides); /* cannot fail given check above. */ + } + + /* Number of dimensions can no longer change. */ + *newnd = nd; if (type->elsize != dtype->elsize) { /* * Viewing as an unsized void implies a void dtype matching @@ -461,20 +512,13 @@ _check_compatibility_with_new_dtype( return NULL; } newtype->elsize = dtype->elsize; + *newdims = PyArray_DIMS(self); + *newstrides = PyArray_STRIDES(self); return newtype; } /* - * Otherwise, changing the size of the dtype results in a shape change, - * which we signal by setting newlastdim and newlaststride. + * Otherwise, changing the size of the dtype results in a shape change. */ - /* Check forbidden cases. */ - if (PyDataType_HASSUBARRAY(type)) { - PyErr_SetString(PyExc_ValueError, - "Changing the dtype to a subarray type is only supported " - "if the total itemsize is unchanged"); - return NULL; - } - int nd = PyArray_NDIM(self); if (nd == 0) { PyErr_SetString(PyExc_ValueError, "Changing the dtype of a 0d array is only supported " @@ -482,9 +526,10 @@ _check_compatibility_with_new_dtype( return NULL; } /* Resize on last axis only (we have nd>0 here). */ - npy_intp lastdim = PyArray_DIMS(self)[nd-1]; + int lastaxis = nd-1; + npy_intp lastdim = PyArray_DIMS(self)[lastaxis]; if (lastdim != 1 && PyArray_SIZE(self) != 0 && - PyArray_STRIDES(self)[nd-1] != dtype->elsize) { + PyArray_STRIDES(self)[lastaxis] != dtype->elsize) { PyErr_SetString(PyExc_ValueError, "To change to a dtype of a different size, the last axis " "must be contiguous"); @@ -498,7 +543,7 @@ _check_compatibility_with_new_dtype( "divisor of the size of original dtype"); return NULL; } - *newlastdim = (dtype->elsize / type->elsize) * lastdim; + lastdim *= (dtype->elsize / type->elsize); } else /* type->elsize > dtype->elsize */ { /* If it is compatible, decrease the size of the relevant axis. */ @@ -510,9 +555,25 @@ _check_compatibility_with_new_dtype( "of the array."); return NULL; } - *newlastdim = lastsize / type->elsize; + lastdim = lastsize / type->elsize; + } + if (*newdims != NULL) { + /* Have storage for dims & strides (view); copy unchanged ones */ + memcpy(*newdims, PyArray_DIMS(self), lastaxis * sizeof(npy_intp)); + memcpy(*newstrides, PyArray_STRIDES(self), lastaxis * sizeof(npy_intp)); } - *newlaststride = type->elsize; + else { + /* Don't have storage, so update in place (_set_dtype) */ + *newdims = PyArray_DIMS(self); + *newstrides = PyArray_STRIDES(self); + } + (*newdims)[lastaxis] = lastdim; + (*newstrides)[lastaxis] = type->elsize; + } + else { + /* Itemsizes equal, dims and strides unchanged. */ + *newdims = PyArray_DIMS(self); + *newstrides = PyArray_STRIDES(self); } Py_INCREF(type); return type; diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 173243f1d672..a395704ff03f 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -95,23 +95,39 @@ _unpack_field_index( NPY_NO_EXPORT int _may_have_objects(PyArray_Descr *dtype); -/* For use in viewing as a new descriptor */ +/* + * For a sub-array descriptor, return the base descriptor, + * set newnd to nd plus the number of subarray dimensions, + * and fill newdims by copying nd items from dims and appending + * newnd-nd items from the subarray. + * If newstrides != NULL, they are similarly filled. + * + * Note: caller has to ensure that descr is a subarray, and that + * newdims and newstrides are big enough (i.e., NPY_MAXDIMS if + * the new size is not yet known). + */ NPY_NO_EXPORT PyArray_Descr* _get_subarray_base_and_dimensions( const PyArray_Descr *descr, const int nd, const npy_intp *dims, const npy_intp *strides, - int *new_nd, npy_intp *new_dims, npy_intp *new_strides); + int *newnd, npy_intp *newdims, npy_intp *newstrides); /* * Check whether self can be viewed with the given dtype. * If so, return a new reference to the dtype (possibly changed). - * If needed, also determine new dimensions and strides for the last axis. - * If no change is needed, newlastdim is set to -1. + * If needed, also determine new dimensions and strides: + * - For views, *newdims and *newstrides hold storage. If a change is + * required, copy old dims and strides and make the change. + * If no change is needed, set *dims and *strides to self's versions. + * - For _set_dtype, *newdims and *newstrides are NULL. Allocate a new + * array if the number of dimensions increases (because type is a + * subarray), and otherwise use self's dims and strides, possibly + * changing the last element in-place. */ NPY_NO_EXPORT PyArray_Descr* _check_compatibility_with_new_dtype( PyArrayObject *self, PyArray_Descr *type, - npy_intp *newlastdim, npy_intp *newlaststride); + int *newnd, npy_intp **newdims, npy_intp **newstrides); /* * Returns -1 and sets an exception if *index is an invalid index for diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index c5197b76169c..87fdb4b5efd1 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -611,35 +611,21 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype) * Path 1: subclass lives in the future and its __array_finalize__ * can handle getting the correct dtype+shape. */ - npy_intp newlastdim, newlaststride; - /* Check whether the type is compatible. */ + npy_intp storage[2 * NPY_MAXDIMS]; + int newnd; + npy_intp *newdims = storage; + npy_intp *newstrides = storage + NPY_MAXDIMS; + /* Check whether the type is compatible. dims and strides pointers + will be set to input ones if no change needed, otherwise filled. */ Py_SETREF(type, _check_compatibility_with_new_dtype( - self, type, &newlastdim, &newlaststride)); + self, type, &newnd, &newdims, &newstrides)); if (type == NULL) { return NULL; } /* Take view with old or adjusted dims (steals reference to type) */ - if (newlastdim < 0) { - return PyArray_NewFromDescr_int(subtype, type, - nd, dims, strides, PyArray_DATA(self), - flags, (PyObject *)self, (PyObject *)self, 0); - } - else { - NPY_ALLOC_WORKSPACE(newdims, npy_intp, 2 * 4, 2 * nd); - if (newdims == NULL) { - goto finish; - } - npy_intp *newstrides = newdims + nd; - memcpy(newdims, dims, (nd-1)*sizeof(npy_intp)); - memcpy(newstrides, strides, (nd-1)*sizeof(npy_intp)); - newdims[nd-1] = newlastdim; - newstrides[nd-1] = newlaststride; - ret = PyArray_NewFromDescr_int(subtype, type, - nd, newdims, newstrides, PyArray_DATA(self), - flags, (PyObject *)self, (PyObject *)self, 0); - npy_free_workspace(newdims); - return ret; - } + return PyArray_NewFromDescr_int( + subtype, type, newnd, newdims, newstrides, PyArray_DATA(self), + flags, (PyObject *)self, (PyObject *)self, 0); } /* * Other paths: first create a view with the old dtype. diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index d5ba8228f987..3edbda23f283 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -636,21 +636,22 @@ PyArray_NewFromDescr_int( */ if (!(cflags & _NPY_ARRAY_ENSURE_DTYPE_IDENTITY)) { if (PyDataType_SUBARRAY(descr)) { - PyObject *ret; + /* For a subarray, get the base dtype and use that in a retry + with the subarray dimensions and strides appended to the + input ones (for strides, if the input strides are known). */ int newnd; npy_intp newdims[2*NPY_MAXDIMS]; - npy_intp *newstrides = strides ? newdims + NPY_MAXDIMS : NULL; + npy_intp *newstrides = ( + (strides != NULL || nd == 0) ? newdims + NPY_MAXDIMS : NULL); Py_SETREF(descr, _get_subarray_base_and_dimensions( descr, nd, dims, strides, &newnd, newdims, newstrides)); if (descr == NULL) { return NULL; } - ret = PyArray_NewFromDescr_int( - subtype, descr, - newnd, newdims, newstrides, data, + return PyArray_NewFromDescr_int( + subtype, descr, newnd, newdims, newstrides, data, flags, obj, base, cflags); - return ret; } /* Check datatype element size */ diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index c3fc0cb4025c..2bf9988d3184 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -354,46 +354,26 @@ array_descr_set_internal(PyArrayObject *self, PyObject *arg) return -1; } /* Check dtype and possibly give new dim & stride for last axis */ - npy_intp newlastdim, newlaststride; + int newnd; + npy_intp *newdims = NULL; + npy_intp *newstrides = NULL; + /* Check whether the type is compatible, get pointers to dimensions and + strides (can be from input or to newly allocated dim_array). */ Py_SETREF(newtype, _check_compatibility_with_new_dtype( - self, newtype, &newlastdim, &newlaststride)); + self, newtype, &newnd, &newdims, &newstrides)); if (newtype == NULL) { return -1; } - - /* Viewing as a subarray increases the number of dimensions */ - if (PyDataType_HASSUBARRAY(newtype)) { - assert(newlastdim < 0); /* not allowed for subarrays */ - PyObject *shape = PyDataType_SUBARRAY(newtype)->shape; - /* Just assert(PyTuple_Check(shape))?? */ - int nd = PyArray_NDIM(self); - int new_nd = nd + (PyTuple_Check(shape) ? PyTuple_GET_SIZE(shape) : 1); - /* create new dimensions cache and fill it */ - npy_intp *new_dims = npy_alloc_cache_dim(2 * new_nd); - if (new_dims == NULL) { - PyErr_NoMemory(); - Py_DECREF(newtype); - return -1; - } - npy_intp *new_strides = new_dims + new_nd; - int chk_nd; - Py_SETREF(newtype, _get_subarray_base_and_dimensions( - newtype, nd, PyArray_DIMS(self), PyArray_STRIDES(self), - &chk_nd, new_dims, new_strides)); - if (newtype == NULL) { - return -1; - } - assert(chk_nd == new_nd); - /* Update self with new cache */ + if (newnd != PyArray_NDIM(self)) { + /* Update self with new dim array created above (subarray dtype). */ + assert(newdims != PyArray_DIMS(self)); npy_free_cache_dim_array(self); - ((PyArrayObject_fields *)self)->nd = new_nd; - ((PyArrayObject_fields *)self)->dimensions = new_dims; - ((PyArrayObject_fields *)self)->strides = new_strides; - } - else if (newlastdim >= 0) { - int lastaxis = PyArray_NDIM(self) - 1; - PyArray_DIMS(self)[lastaxis] = newlastdim; - PyArray_STRIDES(self)[lastaxis] = newlaststride; + ((PyArrayObject_fields *)self)->nd = newnd; + ((PyArrayObject_fields *)self)->dimensions = newdims; + ((PyArrayObject_fields *)self)->strides = newstrides; + } + else { /* We keep our old dims (possibly changed inplace) */ + assert(newdims == PyArray_DIMS(self)); } Py_DECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = newtype; From a3b68e8dfa5f592dcd36a8ac3457a63484714a24 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Wed, 29 Apr 2026 14:54:57 +0200 Subject: [PATCH 1692/1718] MAINT: ensure order F is respected also for scalar subarray. --- numpy/_core/src/multiarray/ctors.c | 3 +-- numpy/_core/tests/test_multiarray.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 3edbda23f283..61de211083d4 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -641,8 +641,7 @@ PyArray_NewFromDescr_int( input ones (for strides, if the input strides are known). */ int newnd; npy_intp newdims[2*NPY_MAXDIMS]; - npy_intp *newstrides = ( - (strides != NULL || nd == 0) ? newdims + NPY_MAXDIMS : NULL); + npy_intp *newstrides = strides ? newdims + NPY_MAXDIMS : NULL; Py_SETREF(descr, _get_subarray_base_and_dimensions( descr, nd, dims, strides, &newnd, newdims, newstrides)); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 9dce25e36298..26c200971daf 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -475,6 +475,17 @@ def test_fill_subarrays(self): assert_equal(arr, np.array(data, dtype=dtype)) + def test_subarray_order_f(self): + # Subarrays are always C contiguous, but if we request Fortran, + # we should get it if the subarray is resolved away. + data = ([[1, 2, 3], [4, 5, 6]]) + arr = np.full((), data, dtype=("f8", (2, 3)), order="F") + assert not arr.flags.c_contiguous + assert arr.flags.f_contiguous + arr2 = np.full((4, 5), data, dtype=("f8", (2, 3)), order="F") + assert not arr2.flags.c_contiguous + assert arr2.flags.f_contiguous + class TestArrayConstruction: def test_array(self): From 001a3861b538e042acca4acbfa05c965e3390957 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Apr 2026 17:57:14 +0000 Subject: [PATCH 1693/1718] MAINT: Update gitpython requirement Updates the requirements on [gitpython](https://github.com/gitpython-developers/GitPython) to permit the latest version. Updates `gitpython` to 3.1.47 - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.46...3.1.47) --- updated-dependencies: - dependency-name: gitpython dependency-version: 3.1.47 dependency-type: direct:production dependency-group: python-deps ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- requirements/release_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index c5fd608f0b9d..328071b48f58 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint ruff==0.15.11 -GitPython>=3.1.46 +GitPython>=3.1.47 spin diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index 8462e8dedf14..a4b381b327e8 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -3,7 +3,7 @@ # changelog.py pygithub -gitpython>=3.1.46 +gitpython>=3.1.47 # uploading release documentation packaging From 451309f539a0ffa1b6bd8c8cff407c8b3d694410 Mon Sep 17 00:00:00 2001 From: AnkitAhlawat Date: Thu, 30 Apr 2026 19:18:10 +0530 Subject: [PATCH 1694/1718] BUG: Fix signed overflow issue in npy_gcd for INT_MIN on s390x (#31360) This PR fixes signed overflow issue in npy_gcd when one of the inputs is the minimum value of a signed integer type (INT_MIN) on s390x architecture. issue was the expression a < 0 ? -a : a in npy_math_internal.h.src causes undefined behavior when a = INT_MIN because negating INT_MIN overflows a signed integer on s390x. i see it is working fine with other arch so fix only provided to s390x --- numpy/_core/src/npymath/npy_math_internal.h.src | 11 ++++++++++- numpy/_core/tests/test_umath.py | 12 ++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/npymath/npy_math_internal.h.src b/numpy/_core/src/npymath/npy_math_internal.h.src index 2f3849744688..787f7e6d87c0 100644 --- a/numpy/_core/src/npymath/npy_math_internal.h.src +++ b/numpy/_core/src/npymath/npy_math_internal.h.src @@ -650,12 +650,21 @@ npy_lcm@c@(@type@ a, @type@ b) * * #type = (npy_int, npy_long, npy_longlong)*2# * #c = (,l,ll)*2# + * #utype = (npy_uint, npy_ulong, npy_ulonglong)*2# * #func=gcd*3,lcm*3# */ NPY_INPLACE @type@ npy_@func@@c@(@type@ a, @type@ b) { - return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b); + /* + * Cast to unsigned to avoid overflow when negating minimum signed value. + * For negative values, cast to unsigned first, then apply negation in + * unsigned arithmetic to avoid undefined behavior. + * This ensures correct behavior across all architectures. #31359 + */ + @utype@ a_abs = a < 0 ? (@utype@)(0) - (@utype@)a : (@utype@)a; + @utype@ b_abs = b < 0 ? (@utype@)(0) - (@utype@)b : (@utype@)b; + return (@type@)npy_@func@u@c@(a_abs, b_abs); } /**end repeat**/ diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index ff61e7f3bafc..b5f621c7927d 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4297,13 +4297,17 @@ def test_lcm_overflow(self): assert_equal(np.lcm(a, b), 10 * big) def test_gcd_overflow(self): - for dtype in (np.int32, np.int64): - # verify that we don't overflow when taking abs(x) - # not relevant for lcm, where the result is unrepresentable anyway - a = dtype(np.iinfo(dtype).min) # negative power of two + # verify that we don't overflow when taking abs(x) for INT_MIN + # this was undefined behavior that manifested on s390x with GCC 11.5 + for dtype in (np.int8, np.int16, np.int32, np.int64): + a = dtype(np.iinfo(dtype).min) # INT_MIN q = -(a // 4) + # Test with INT_MIN as first argument assert_equal(np.gcd(a, q * 3), q) assert_equal(np.gcd(a, -q * 3), q) + # Test with INT_MIN as second argument + assert_equal(np.gcd(q * 3, a), q) + assert_equal(np.gcd(-q * 3, a), q) def test_decimal(self): from decimal import Decimal From 5cba3b4aa6c4847c6595d7e0a8e9f2f077296c79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Apr 2026 15:18:26 -0600 Subject: [PATCH 1695/1718] MAINT: Bump conda-incubator/setup-miniconda from 3.3.0 to 4.0.0 (#31368) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8bc040a73fe5..ff5264f6c466 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -59,7 +59,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@fc2d68f6413eb2d87b895e92f8584b5b94a10167 # v3.3.0 + uses: conda-incubator/setup-miniconda@bce0bd83659520ccaf4925e604d458d512f7df37 # v4.0.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge From 2d004114eb05d021c896b41b2bf497776d717a0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 May 2026 16:38:49 -0600 Subject: [PATCH 1696/1718] MAINT: Bump int128/hide-comment-action from 1.56.0 to 1.57.0 (#31372) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index dc753f55252c..29b5b4d4e213 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@1f2fb354c37723970677f254382dd0324a77327a # v1.56.0 + uses: int128/hide-comment-action@d38cf5901fbb26e4d5790890955713b0ec539087 # v1.57.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 78515a7ebe285a8edb046b954acb7c825ff48576 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 May 2026 17:07:51 -0600 Subject: [PATCH 1697/1718] MAINT: Bump ruff from 0.15.11 to 0.15.12 in /requirements in the python-deps group (#31374) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/linter_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 328071b48f58..8aa8aea20871 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint -ruff==0.15.11 +ruff==0.15.12 GitPython>=3.1.47 spin From a572cd9d4bceefbda6a5542f201e9e77670a4322 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 May 2026 17:08:34 -0600 Subject: [PATCH 1698/1718] MAINT: Bump conda-incubator/setup-miniconda from 4.0.0 to 4.0.1 (#31373) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index ff5264f6c466..f080c4802e36 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -59,7 +59,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@bce0bd83659520ccaf4925e604d458d512f7df37 # v4.0.0 + uses: conda-incubator/setup-miniconda@8ee1f361103df19b6f8c8655fd3967a8ecb162d5 # v4.0.1 with: python-version: ${{ matrix.python-version }} channels: conda-forge From 8d78a99c0bb441ebea2487aa5ac4f6a19cb9a34b Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Sat, 2 May 2026 04:23:20 +0200 Subject: [PATCH 1699/1718] BUG: Avoid optimization causing astropy regression (#31371) --- numpy/_core/src/multiarray/descriptor.c | 7 +++++++ numpy/_core/tests/test_dtype.py | 15 +++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a45206e3b5ef..1edfdd6c7b3b 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1315,6 +1315,13 @@ _convert_from_dict(PyObject *obj, int align) Py_DECREF(new); goto fail; } + if (itemsize != new->elsize) { + /* + * astropy relied on "holes" at the end not being used until this + * is fixed we cannot optimize the path as a full dtype copy. + */ + new->flags |= NPY_NOT_TRIVIALLY_COPYABLE; + } /* Set the itemsize */ new->elsize = itemsize; } diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 6464ccd61f9d..89f9f1919bcd 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -628,6 +628,21 @@ def test_nonstructured_with_object(self): assert arr.dtype.hasobject # but claims to contain objects del arr # the deletion failed previously. + def test_structured_out_of_range(self): + # This test should be able to fail as an optimization, but astropy + # relied on the non-optimized behavior. + # A future version could force astropy to either fix the code or + # introduce a kwarg to disable the optimization default. But for now + # we avoid the astropy regression. + # The following dtype cannot be assumed to just include padding + dtype = np.dtype(dict(names=["a"], formats=["i4"], itemsize=8)) + raw_arr = np.zeros(10, dtype="i8") + arr1 = raw_arr.view(dtype) + arr2 = np.full(10, -1, dtype="i8").view(dtype) + # This should only fill in the i4 field of `dtype`: + arr1[...] = arr2 + assert (raw_arr.view("i4")[1::2] == 0).all() + class TestSubarray: def test_single_subarray(self): From 32ba7b65473abbb852c82deedd0a655ec9059752 Mon Sep 17 00:00:00 2001 From: Marten Henric van Kerkwijk Date: Sun, 3 May 2026 10:21:18 +0200 Subject: [PATCH 1700/1718] MAINT: clarify why a structured dtype might not be trivially copyable --- numpy/_core/src/multiarray/descriptor.c | 19 +++++++------------ numpy/_core/tests/test_dtype.py | 9 +++------ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 1edfdd6c7b3b..fe6f6259411a 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -1315,24 +1315,19 @@ _convert_from_dict(PyObject *obj, int align) Py_DECREF(new); goto fail; } - if (itemsize != new->elsize) { - /* - * astropy relied on "holes" at the end not being used until this - * is fixed we cannot optimize the path as a full dtype copy. - */ - new->flags |= NPY_NOT_TRIVIALLY_COPYABLE; - } /* Set the itemsize */ new->elsize = itemsize; } /* - * Mark as not trivially copyable only if explicit offsets were provided - * and the layout has holes. Alignment padding (without explicit offsets) - * is safe to overwrite with memcpy. + * Check if anything prevents using memcpy for whole items for this dtype, + * i.e., whether there are any holes unrelated to alignment padding + * (since those holes might be used to avoid accessing/overwriting stuff). + * Such holes can be introduced due to choices of itemsize or offsets. */ - if (offsets != NULL && - !is_dtype_struct_simple_unaligned_layout((PyArray_Descr *)new)) { + if (new->elsize != totalsize + || (offsets != NULL && !is_dtype_struct_simple_unaligned_layout( + (PyArray_Descr *)new))) { new->flags |= NPY_NOT_TRIVIALLY_COPYABLE; } diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 89f9f1919bcd..1e69c9ed6443 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -629,12 +629,9 @@ def test_nonstructured_with_object(self): del arr # the deletion failed previously. def test_structured_out_of_range(self): - # This test should be able to fail as an optimization, but astropy - # relied on the non-optimized behavior. - # A future version could force astropy to either fix the code or - # introduce a kwarg to disable the optimization default. But for now - # we avoid the astropy regression. - # The following dtype cannot be assumed to just include padding + # Regression test for gh-29270 against over-eager optimization of + # copying of structured dtype: do not copy when dtype has holes + # because the itemsize was explicitly given. dtype = np.dtype(dict(names=["a"], formats=["i4"], itemsize=8)) raw_arr = np.zeros(10, dtype="i8") arr1 = raw_arr.view(dtype) From 27b26e9e3f95e8af3611891ccd0ff53f3169cde2 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 4 May 2026 16:03:12 +0200 Subject: [PATCH 1701/1718] CI: remove Cirrus CI FreeBSD job (#31380) --- .cirrus.star | 36 ----------- doc/neps/nep-0057-numpy-platform-support.rst | 4 +- tools/ci/cirrus_arm.yml | 68 -------------------- 3 files changed, 2 insertions(+), 106 deletions(-) delete mode 100644 .cirrus.star delete mode 100644 tools/ci/cirrus_arm.yml diff --git a/.cirrus.star b/.cirrus.star deleted file mode 100644 index 3de5ce97b0e8..000000000000 --- a/.cirrus.star +++ /dev/null @@ -1,36 +0,0 @@ -# The guide to programming cirrus-ci tasks using starlark is found at -# https://cirrus-ci.org/guide/programming-tasks/ -# -# In this simple starlark script we simply check conditions for whether -# a CI run should go ahead. If the conditions are met, then we just -# return the yaml containing the tasks to be run. - -load("cirrus", "env", "fs", "http") - -def main(ctx): - ###################################################################### - # Only test on the numpy/numpy repository - ###################################################################### - - if env.get("CIRRUS_REPO_FULL_NAME") != "numpy/numpy": - return [] - - # Obtain commit message for the event. Unfortunately CIRRUS_CHANGE_MESSAGE - # only contains the actual commit message on a non-PR trigger event. - # For a PR event it contains the PR title and description. - SHA = env.get("CIRRUS_CHANGE_IN_REPO") - url = "https://api.github.com/repos/numpy/numpy/git/commits/" + SHA - dct = http.get(url).json() - - commit_msg = dct["message"] - if "[skip cirrus]" in commit_msg or "[skip ci]" in commit_msg: - return [] - - labels = env.get("CIRRUS_PR_LABELS", "") - pr_number = env.get("CIRRUS_PR", "-1") - tag = env.get("CIRRUS_TAG", "") - - if int(pr_number) < 0: - return [] - - return fs.read("tools/ci/cirrus_arm.yml") diff --git a/doc/neps/nep-0057-numpy-platform-support.rst b/doc/neps/nep-0057-numpy-platform-support.rst index 570287d4d0d4..1fdaa50eb412 100644 --- a/doc/neps/nep-0057-numpy-platform-support.rst +++ b/doc/neps/nep-0057-numpy-platform-support.rst @@ -227,8 +227,6 @@ Tier 3 platforms: +--------------------+----------------------------------------+----------------------------------+ | Platform | Notes | Contacts | +====================+========================================+==================================+ -| FreeBSD | Runs on Cirrus CI | Ralf Gommers | -+--------------------+----------------------------------------+----------------------------------+ | Linux ppc64le | Runs on IBM-provided self-hosted | Sandeep Gupta | | | runners, see gh-22318_ | | +--------------------+----------------------------------------+----------------------------------+ @@ -257,6 +255,8 @@ list): +------------------------------------+--------------------------------------------------+ | Platform | Notes | +====================================+==================================================+ +| FreeBSD | Was Tier 3, ran on Cirrus CI | ++--------------------+----------------------------------------+-------------------------+ | PyPy | Was Tier 2 for releases <=2.4.x, see gh-30416_ | +------------------------------------+--------------------------------------------------+ | macOS ppc64, universal, universal2 | | diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml deleted file mode 100644 index 977921d8236d..000000000000 --- a/tools/ci/cirrus_arm.yml +++ /dev/null @@ -1,68 +0,0 @@ -modified_clone: &MODIFIED_CLONE - # makes sure that for a PR the CI runs against a merged main - clone_script: | - if [ -z "$CIRRUS_PR" ]; then - # if you're not in a PR then clone against the branch name that was pushed to. - git clone --recursive --branch=$CIRRUS_BRANCH https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR - git reset --hard $CIRRUS_CHANGE_IN_REPO - else - # it's a PR so clone the main branch then merge the changes from the PR - git clone https://x-access-token:${CIRRUS_REPO_CLONE_TOKEN}@github.com/${CIRRUS_REPO_FULL_NAME}.git $CIRRUS_WORKING_DIR - git fetch origin pull/$CIRRUS_PR/head:pull/$CIRRUS_PR - - # CIRRUS_BASE_BRANCH will probably be `main` for the majority of the time - # However, if you do a PR against a maintenance branch we will want to - # merge the PR into the maintenance branch, not main - git checkout $CIRRUS_BASE_BRANCH - - # alpine git package needs default user.name and user.email to be set before a merge - git -c user.email="you@example.com" merge --no-commit pull/$CIRRUS_PR - git submodule update --init --recursive - fi - - -freebsd_test_task: - use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' - compute_engine_instance: - image_project: freebsd-org-cloud-dev - image: family/freebsd-14-3 - platform: freebsd - cpu: 1 - memory: 4G - - install_devtools_script: | - pkg install -y git bash ninja ccache blas cblas lapack pkgconf - pkg install -y python312 - - <<: *MODIFIED_CLONE - - ccache_cache: - folder: .ccache - populate_script: - - mkdir -p .ccache - fingerprint_key: ccache-freebsd - - prepare_env_script: | - # Create a venv (the `source` command needs bash, not the default sh shell) - chsh -s /usr/local/bin/bash - python3.12 -m venv .venv - source .venv/bin/activate - # Minimal build and test requirements - python3.12 -m pip install -U pip - python3.12 -m pip install meson-python Cython pytest hypothesis - - build_script: | - chsh -s /usr/local/bin/bash - source .venv/bin/activate - python3.12 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" - - test_script: | - chsh -s /usr/local/bin/bash - source .venv/bin/activate - cd tools - python3.12 -m pytest --pyargs numpy -m "not slow" - ccache -s - - on_failure: - debug_script: | - cat build/meson-logs/meson-log.txt From 626596a6facfa364346983183d7a791b453dc236 Mon Sep 17 00:00:00 2001 From: mattip Date: Mon, 4 May 2026 23:41:41 +0300 Subject: [PATCH 1702/1718] update openblas to 0.3.33 --- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 02ff529de09c..b484bfaa683f 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.31.188.0 +scipy-openblas32==0.3.33.0.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 397fa703e28d..e70209e05dd3 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.31.188.0 -scipy-openblas64==0.3.31.188.0 +scipy-openblas32==0.3.33.0.0 +scipy-openblas64==0.3.33.0.0 From ebb9a67884df64995799eeecb4eac435a0c0fd46 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 5 May 2026 11:45:55 +0300 Subject: [PATCH 1703/1718] BUILD: newer MKL uses so.3 --- .github/workflows/linux_blas.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index aaed39e21e84..2fe6eed7d4d9 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -313,6 +313,7 @@ jobs: # add the expected .so -> .so.2 symlinks to fix linking cd .. for i in $( ls libmkl*.so.2 ); do ln -s $i ${i%.*}; done + for i in $( ls libmkl*.so.3 ); do ln -s $i ${i%.*}; done - name: Build with defaults (LP64) run: | From b8f46acaf157be60ed714015d4728d86ce451a19 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 5 May 2026 11:01:14 +0200 Subject: [PATCH 1704/1718] BLD/MAINT: improve support for Intel LLVM compilers With these changes, downstream users wanting to use the Intel compilers can do so when linking against NumPy headers. Also include a build change for NumPy itself that is clearly correct: Intel compilers do not use `_Fcomplex` & co for complex types, but the regular C99 complex.h types. Taken over from PR 25044 Closes gh-25044 Closes gh-31337 (reports the same `_Fcomplex` issue) Co-authored-by: Matti Picus --- numpy/_core/include/numpy/npy_common.h | 4 ++-- numpy/_core/meson.build | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 5eaa29035428..bf97ba689a73 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -392,11 +392,11 @@ typedef struct #include -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__INTEL_LLVM_COMPILER) typedef _Dcomplex npy_cdouble; typedef _Fcomplex npy_cfloat; typedef _Lcomplex npy_clongdouble; -#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */ +#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) && !defined(__INTEL_LLVM_COMPILER) */ typedef double _Complex npy_cdouble; typedef float _Complex npy_cfloat; typedef longdouble_t _Complex npy_clongdouble; diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 331afe21e0b5..2a2ee53ba745 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -179,7 +179,7 @@ if not cc.has_header('complex.h') error('"complex.h" header not found') endif -if cc.get_argument_syntax() == 'msvc' +if cc.get_id() == 'msvc' or cc.get_id() == 'clang-cl' complex_types_to_check = [ ['NPY_SIZEOF_COMPLEX_FLOAT', '_Fcomplex'], ['NPY_SIZEOF_COMPLEX_DOUBLE', '_Dcomplex'], From 3d88bcbb1b75b9764676ab73a7fd6426c9be6b90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 May 2026 17:59:42 +0000 Subject: [PATCH 1705/1718] MAINT: Bump the python-deps group in /requirements with 2 updates Updates the requirements on [gitpython](https://github.com/gitpython-developers/GitPython) and [pyrefly](https://github.com/facebook/pyrefly) to permit the latest version. Updates `gitpython` to 3.1.48 - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.47...3.1.48) Updates `pyrefly` from 0.62.0 to 0.63.0 - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.62.0...0.63.0) --- updated-dependencies: - dependency-name: gitpython dependency-version: 3.1.48 dependency-type: direct:production dependency-group: python-deps - dependency-name: pyrefly dependency-version: 0.63.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: python-deps ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- requirements/release_requirements.txt | 2 +- requirements/typing_requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 8aa8aea20871..16687746c0fa 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint ruff==0.15.12 -GitPython>=3.1.47 +GitPython>=3.1.48 spin diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index a4b381b327e8..4bf00589c9fa 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -3,7 +3,7 @@ # changelog.py pygithub -gitpython>=3.1.47 +gitpython>=3.1.48 # uploading release documentation packaging diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index dfeb1fe276f2..73dd5604e556 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.2 -pyrefly==0.62.0 +pyrefly==0.63.0 From bff2957bce128a047e9ef7768f071a2b49085a06 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 May 2026 17:58:04 +0000 Subject: [PATCH 1706/1718] MAINT: Bump the python-deps group in /requirements with 2 updates Updates the requirements on [gitpython](https://github.com/gitpython-developers/GitPython) and [pyrefly](https://github.com/facebook/pyrefly) to permit the latest version. Updates `gitpython` to 3.1.49 - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.48...3.1.49) Updates `pyrefly` from 0.63.0 to 0.63.1 - [Release notes](https://github.com/facebook/pyrefly/releases) - [Commits](https://github.com/facebook/pyrefly/compare/0.63.0...0.63.1) --- updated-dependencies: - dependency-name: gitpython dependency-version: 3.1.49 dependency-type: direct:production dependency-group: python-deps - dependency-name: pyrefly dependency-version: 0.63.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: python-deps ... Signed-off-by: dependabot[bot] --- requirements/linter_requirements.txt | 2 +- requirements/release_requirements.txt | 2 +- requirements/typing_requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 16687746c0fa..3e4661a73733 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,5 +1,5 @@ # keep in sync with `environment.yml` cython-lint ruff==0.15.12 -GitPython>=3.1.48 +GitPython>=3.1.49 spin diff --git a/requirements/release_requirements.txt b/requirements/release_requirements.txt index 4bf00589c9fa..bf24d14c4238 100644 --- a/requirements/release_requirements.txt +++ b/requirements/release_requirements.txt @@ -3,7 +3,7 @@ # changelog.py pygithub -gitpython>=3.1.48 +gitpython>=3.1.49 # uploading release documentation packaging diff --git a/requirements/typing_requirements.txt b/requirements/typing_requirements.txt index 73dd5604e556..24ef1e426475 100644 --- a/requirements/typing_requirements.txt +++ b/requirements/typing_requirements.txt @@ -3,4 +3,4 @@ -r test_requirements.txt mypy==1.20.2 -pyrefly==0.63.0 +pyrefly==0.63.1 From 15a344dcce52a0e17d3d8f7af37f858f98a9f058 Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Thu, 7 May 2026 01:28:12 +0530 Subject: [PATCH 1707/1718] ENH: Implement opaque PyObject ABI support (#31091) --- doc/release/upcoming_changes/31091.c_api.rst | 4 + numpy/__init__.cython-30.pxd | 10 +- numpy/_core/code_generators/cversions.txt | 3 +- numpy/_core/code_generators/numpy_api.py | 11 + .../numpy/_neighborhood_iterator_imp.h | 16 +- numpy/_core/include/numpy/arrayscalars.h | 32 +- numpy/_core/include/numpy/ndarrayobject.h | 11 +- numpy/_core/include/numpy/ndarraytypes.h | 852 ++++++++++-------- numpy/_core/include/numpy/npy_2_compat.h | 6 +- numpy/_core/include/numpy/numpyconfig.h | 6 +- numpy/_core/include/numpy/ufuncobject.h | 18 +- numpy/_core/meson.build | 4 +- numpy/_core/src/multiarray/abstractdtypes.h | 6 +- numpy/_core/src/multiarray/arrayobject.c | 75 ++ numpy/_core/src/umath/ufunc_object.c | 7 + .../examples/limited_api/limited_api_opaque.c | 363 ++++++++ .../tests/examples/limited_api/meson.build | 75 +- numpy/_core/tests/test_limited_api.py | 50 + 18 files changed, 1090 insertions(+), 459 deletions(-) create mode 100644 doc/release/upcoming_changes/31091.c_api.rst create mode 100644 numpy/_core/tests/examples/limited_api/limited_api_opaque.c diff --git a/doc/release/upcoming_changes/31091.c_api.rst b/doc/release/upcoming_changes/31091.c_api.rst new file mode 100644 index 000000000000..dd7e3caf6b89 --- /dev/null +++ b/doc/release/upcoming_changes/31091.c_api.rst @@ -0,0 +1,4 @@ +Support for free-threaded stable ABI +------------------------------------ +NumPy now supports the stable ABI for free-threaded Python as described in +:pep:`803`. diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index e05c20c57761..5b59d4df8f7c 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -836,6 +836,10 @@ cdef extern from "numpy/ndarraytypes.h": ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + PyArray_DatetimeMetaData _PyDatetimeScalarObject_GetMetadata(object) noexcept nogil + PyArray_DatetimeMetaData _PyTimedeltaScalarObject_GetMetadata(object) noexcept nogil + npy_datetime _PyDatetimeScalarObject_GetValue(object) noexcept nogil + npy_timedelta _PyTimedeltaScalarObject_GetValue(object) noexcept nogil cdef extern from "numpy/arrayscalars.h": @@ -1126,21 +1130,21 @@ cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: Note that to interpret this as a datetime, the corresponding unit is also needed. That can be found using `get_datetime64_unit`. """ - return (obj).obval + return _PyDatetimeScalarObject_GetValue(obj) cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy timedelta64 object """ - return (obj).obval + return _PyTimedeltaScalarObject_GetValue(obj) cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ - return (obj).obmeta.base + return _PyDatetimeScalarObject_GetMetadata(obj).base cdef extern from "numpy/arrayobject.h": diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index b058875d0455..a3982b656285 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -83,5 +83,6 @@ # Version 21 (NumPy 2.4.0) # Add 'same_value' casting, header additions. # General loop registration for ufuncs, sort, and argsort -# Version 21 (NumPy 2.5.0) No change 0x00000015 = fbd24fc5b2ba4f7cd3606ec6128de7a5 +# Version 22 (NumPy 2.5.0) Opaque PyObject ABI support +0x00000016 = cdfba10e4e01454262d2293c394cbed5 diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index c2b471c71757..7f78d467d10b 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -409,6 +409,16 @@ def get_annotations(): # End 2.0 API # NpyIterGetTransferFlags (slot 223) added. # End 2.3 API + '_PyArray_GET_ITEM_DATA': (369, MinVersion("2.5")), + '_PyArrayIter_GET_ITEM_DATA': (370, MinVersion("2.5")), + '_PyArray_LegacyDescr_GET_ITEM_DATA': (371, MinVersion("2.5")), + '_PyDataType_GET_ITEM_DATA': (372, MinVersion("2.5")), + '_PyArrayMultiIter_GET_ITEM_DATA': (373, MinVersion("2.5")), + '_PyArrayNeighborhoodIter_GET_ITEM_DATA': (374, MinVersion("2.5")), + '_PyDatetimeScalarObject_GetMetadata': (375,), + '_PyTimedeltaScalarObject_GetMetadata': (376,), + '_PyDatetimeScalarObject_GetValue': (377,), + '_PyTimedeltaScalarObject_GetValue': (378,), } ufunc_types_api = { @@ -470,6 +480,7 @@ def get_annotations(): 'PyUFunc_GiveFloatingpointErrors': (46, MinVersion("2.0")), # End 2.0 API 'PyUFunc_AddLoopsFromSpecs': (47, MinVersion("2.4")), + '_PyUFuncObject_GET_ITEM_DATA': (48, MinVersion("2.5")), } # List of all the dicts which define the C API diff --git a/numpy/_core/include/numpy/_neighborhood_iterator_imp.h b/numpy/_core/include/numpy/_neighborhood_iterator_imp.h index b365cb50854f..503ecfe8c2d7 100644 --- a/numpy/_core/include/numpy/_neighborhood_iterator_imp.h +++ b/numpy/_core/include/numpy/_neighborhood_iterator_imp.h @@ -25,13 +25,13 @@ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); * .... */ #define _UPDATE_COORD_ITER(c) \ - wb = iter->coordinates[c] < iter->bounds[c][1]; \ + wb = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates[c] < _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->bounds[c][1]; \ if (wb) { \ - iter->coordinates[c] += 1; \ + _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates[c] += 1; \ return 0; \ } \ else { \ - iter->coordinates[c] = iter->bounds[c][0]; \ + _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates[c] = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->bounds[c][0]; \ } static inline int @@ -39,7 +39,7 @@ _PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) { npy_intp i, wb; - for (i = iter->nd - 1; i >= 0; --i) { + for (i = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->nd - 1; i >= 0; --i) { _UPDATE_COORD_ITER(i) } @@ -68,7 +68,7 @@ static inline int PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) { _PyArrayNeighborhoodIter_IncrCoord (iter); - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->dataptr = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->translate((PyArrayIterObject*)iter, _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates); return 0; } @@ -81,10 +81,10 @@ PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) { npy_intp i; - for (i = 0; i < iter->nd; ++i) { - iter->coordinates[i] = iter->bounds[i][0]; + for (i = 0; i < _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->nd; ++i) { + _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates[i] = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->bounds[i][0]; } - iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->dataptr = _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->translate((PyArrayIterObject*)iter, _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter)->coordinates); return 0; } diff --git a/numpy/_core/include/numpy/arrayscalars.h b/numpy/_core/include/numpy/arrayscalars.h index 46bc58cc2a35..5c46e5ed1ffa 100644 --- a/numpy/_core/include/numpy/arrayscalars.h +++ b/numpy/_core/include/numpy/arrayscalars.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ #define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#ifndef Py_TARGET_ABI3T #ifndef _MULTIARRAYMODULE typedef struct { PyObject_HEAD @@ -134,7 +135,7 @@ typedef struct { char obval; } PyScalarObject; -#define PyStringScalarObject PyBytesObject + #ifndef Py_LIMITED_API typedef struct { /* note that the PyObject_HEAD macro lives right here */ @@ -162,6 +163,34 @@ typedef struct { void *_buffer_info; /* private buffer info, tagged to allow warning */ #endif } PyVoidScalarObject; +#else +typedef struct PyBoolScalarObject PyBoolScalarObject; +typedef struct PyByteScalarObject PyByteScalarObject; +typedef struct PyShortScalarObject PyShortScalarObject; +typedef struct PyIntScalarObject PyIntScalarObject; +typedef struct PyLongScalarObject PyLongScalarObject; +typedef struct PyLongLongScalarObject PyLongLongScalarObject; +typedef struct PyUByteScalarObject PyUByteScalarObject; +typedef struct PyUShortScalarObject PyUShortScalarObject; +typedef struct PyUIntScalarObject PyUIntScalarObject; +typedef struct PyULongScalarObject PyULongScalarObject; +typedef struct PyULongLongScalarObject PyULongLongScalarObject; +typedef struct PyHalfScalarObject PyHalfScalarObject; +typedef struct PyFloatScalarObject PyFloatScalarObject; +typedef struct PyDoubleScalarObject PyDoubleScalarObject; +typedef struct PyLongDoubleScalarObject PyLongDoubleScalarObject; +typedef struct PyCFloatScalarObject PyCFloatScalarObject; +typedef struct PyCDoubleScalarObject PyCDoubleScalarObject; +typedef struct PyCLongDoubleScalarObject PyCLongDoubleScalarObject; +typedef struct PyObjectScalarObject PyObjectScalarObject; +typedef struct PyDatetimeScalarObject PyDatetimeScalarObject; +typedef struct PyTimedeltaScalarObject PyTimedeltaScalarObject; +typedef struct PyScalarObject PyScalarObject; +typedef struct PyVoidScalarObject PyVoidScalarObject; +#endif + + + /* Macros PyScalarObject @@ -193,6 +222,7 @@ typedef struct { ((Py##cls##ScalarObject *)obj)->obval #define PyArrayScalar_ASSIGN(obj, cls, val) \ PyArrayScalar_VAL(obj, cls) = val +#define PyStringScalarObject PyBytesObject #endif #endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/numpy/_core/include/numpy/ndarrayobject.h b/numpy/_core/include/numpy/ndarrayobject.h index e2974ad00e04..405b9d881db7 100644 --- a/numpy/_core/include/numpy/ndarrayobject.h +++ b/numpy/_core/include/numpy/ndarrayobject.h @@ -10,16 +10,7 @@ extern "C" { #include #include "ndarraytypes.h" -#include "dtype_api.h" -/* Includes the "function" C-API -- these are all stored in a - list of pointers --- one for each file - The two lists are concatenated into one in multiarray. - - They are available as import_array() -*/ - -#include "__multiarray_api.h" /* * Include any definitions which are defined differently for 1.x and 2.x @@ -156,7 +147,7 @@ extern "C" { static inline void PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) { - PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + PyArrayObject_fields *fa = _PyArray_GET_ITEM_DATA(arr); if (fa && fa->base) { if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 7af26ce3a07e..91781d467945 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -615,8 +615,12 @@ typedef struct { /* * Public version of the Descriptor struct as of 2.x */ +#if !defined(Py_TARGET_ABI3T) typedef struct _PyArray_Descr { PyObject_HEAD +#else +typedef struct _PyArray_Descr_fields { +#endif /* * the type object representing an * instance of this type -- should not @@ -649,7 +653,7 @@ typedef struct _PyArray_Descr { npy_hash_t hash; /* Unused slot (must be initialized to NULL) for future use */ void *reserved_null[2]; -} PyArray_Descr; +} PyArray_Descr_fields; #else /* 1.x and 2.x compatible version (only shared fields): */ @@ -661,7 +665,7 @@ typedef struct _PyArray_Descr { char byteorder; char _former_flags; int type_num; -} PyArray_Descr; +} PyArray_Descr_fields; /* To access modified fields, define the full 2.0 struct: */ typedef struct { @@ -682,13 +686,17 @@ typedef struct { #endif /* 1.x and 2.x compatible version */ +typedef struct _PyArray_Descr PyArray_Descr; + /* * Semi-private struct with additional field of legacy descriptors (must * check NPY_DT_is_legacy before casting/accessing). The struct is also not * valid when running on 1.x (i.e. in public API use). */ typedef struct { +#ifndef Py_TARGET_ABI3T PyObject_HEAD +#endif PyTypeObject *typeobj; char kind; char type; @@ -705,9 +713,16 @@ typedef struct { PyObject *fields; PyObject *names; NpyAuxData *c_metadata; -} _PyArray_LegacyDescr; +} _PyArray_LegacyDescr_fields; +#ifdef Py_TARGET_ABI3T +typedef struct _PyArray_LegacyDescrTag _PyArray_LegacyDescr; +#else +typedef _PyArray_LegacyDescr_fields _PyArray_LegacyDescr; +#endif + +#if !defined(Py_TARGET_ABI3T) /* * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is * used as a prototype for registering a new legacy DType. @@ -731,15 +746,13 @@ typedef struct { NpyAuxData *c_metadata; npy_hash_t hash; } PyArray_DescrProto; - +#endif typedef struct _arr_descr { PyArray_Descr *base; PyObject *shape; /* a tuple */ } PyArray_ArrayDescr; -#define PyDataType_TYPENUM(descr) (((PyArray_Descr *)(descr))->type_num) - /* * Memory handler structure for array data. */ @@ -775,7 +788,9 @@ typedef struct { */ /* This struct will be moved to a private header in a future release */ typedef struct tagPyArrayObject_fields { +#ifndef Py_TARGET_ABI3T PyObject_HEAD +#endif /* Pointer to the raw data buffer */ char *data; /* The number of dimensions, also called 'ndim' */ @@ -827,6 +842,7 @@ typedef struct tagPyArrayObject_fields { * To hide the implementation details, we only expose * the Python struct HEAD. */ +#ifndef Py_TARGET_ABI3T #if !defined(NPY_NO_DEPRECATED_API) || \ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) /* @@ -839,6 +855,10 @@ typedef struct tagPyArrayObject { PyObject_HEAD } PyArrayObject; #endif +#else +typedef struct tagPyArrayObjectOpaque PyArrayObject; +#endif + /* * Removed 2020-Nov-25, NumPy 1.20 @@ -853,7 +873,7 @@ typedef struct tagPyArrayObject { */ /* Mirrors buffer object to ptr */ - +#ifndef Py_TARGET_ABI3T typedef struct { PyObject_HEAD PyObject *base; @@ -861,6 +881,7 @@ typedef struct { npy_intp len; int flags; } PyArray_Chunk; +#endif typedef struct { NPY_DATETIMEUNIT base; @@ -1176,13 +1197,16 @@ typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, #define NPY_ITER_GLOBAL_FLAGS 0x0000ffff #define NPY_ITER_PER_OP_FLAGS 0xffff0000 - /***************************** * Basic iterator object *****************************/ /* FWD declaration */ +#ifndef Py_TARGET_ABI3T +typedef struct PyArrayIterObject_fields PyArrayIterObject; +#else typedef struct PyArrayIterObject_tag PyArrayIterObject; +#endif /* * type of the function which translates a set of coordinates to a @@ -1191,8 +1215,10 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject; typedef char* (*npy_iter_get_dataptr_t)( PyArrayIterObject* iter, const npy_intp*); -struct PyArrayIterObject_tag { +typedef struct PyArrayIterObject_fields { +#ifndef Py_TARGET_ABI3T PyObject_HEAD +#endif int nd_m1; /* number of dimensions - 1 */ npy_intp index, size; npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ @@ -1208,124 +1234,17 @@ struct PyArrayIterObject_tag { npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp)(ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->coordinates[__npy_i] = \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) +} PyArrayIterObject_fields; /* * Any object passed to PyArray_Broadcast must be binary compatible * with this structure. */ - typedef struct { +#ifndef Py_TARGET_ABI3T PyObject_HEAD +#endif int numiter; /* number of iters */ npy_intp size; /* broadcasted size */ npy_intp index; /* current index */ @@ -1350,92 +1269,13 @@ typedef struct { #else PyArrayIterObject *iters[]; #endif -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - - -static NPY_INLINE int -PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi) -{ - return multi->numiter; -} - - -static NPY_INLINE npy_intp -PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi) -{ - return multi->size; -} - - -static NPY_INLINE npy_intp -PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi) -{ - return multi->index; -} - - -static NPY_INLINE int -PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi) -{ - return multi->nd; -} - - -static NPY_INLINE npy_intp * -PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi) -{ - return multi->dimensions; -} - - -static NPY_INLINE void ** -PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi) -{ - return (void**)multi->iters; -} +} PyArrayMultiIterObject_fields; +#ifndef Py_TARGET_ABI3T +typedef PyArrayMultiIterObject_fields PyArrayMultiIterObject; +#else +typedef struct PyArrayMultiIterObject_tag PyArrayMultiIterObject; +#endif enum { NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, @@ -1446,8 +1286,9 @@ enum { }; typedef struct { +#ifndef Py_TARGET_ABI3T PyObject_HEAD - +#endif /* * PyArrayIterObject part: keep this in this exact order */ @@ -1487,7 +1328,13 @@ typedef struct { char* constant; int mode; -} PyArrayNeighborhoodIterObject; +} PyArrayNeighborhoodIterObject_fields; + +#ifndef Py_TARGET_ABI3T +typedef PyArrayNeighborhoodIterObject_fields PyArrayNeighborhoodIterObject; +#else +typedef struct PyArrayNeighborhoodIterObject_tag PyArrayNeighborhoodIterObject; +#endif /* * Neighborhood iterator API @@ -1507,9 +1354,6 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); * Include inline implementations - functions defined there are not * considered public API */ -#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ -#include "_neighborhood_iterator_imp.h" -#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ @@ -1517,88 +1361,278 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); #define NPY_DEFAULT_TYPE NPY_DOUBLE /* default integer type defined in npy_2_compat header */ -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ -static inline int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} + int nd; /* number of dimensions */ -static inline void * -PyArray_DATA(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} + char typekind; /* + * kind in array --- character code of + * typestr + */ -static inline char * -PyArray_BYTES(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} + int itemsize; /* size of each element */ -static inline npy_intp * + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + + +/**************************************** + * NpyString + * + * Types used by the NpyString API. + ****************************************/ + +/* + * A "packed" encoded string. The string data must be accessed by first unpacking the string. + */ +typedef struct npy_packed_static_string npy_packed_static_string; + +/* + * An unpacked read-only view onto the data in a packed string + */ +typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; +} npy_static_string; + +/* + * Handles heap allocations for static strings. + */ +typedef struct npy_string_allocator npy_string_allocator; + +typedef struct { + PyArray_Descr_fields base; + // The object representing a null value + PyObject *na_object; + // Flag indicating whether or not to coerce arbitrary objects to strings + char coerce; + // Flag indicating the na object is NaN-like + char has_nan_na; + // Flag indicating the na object is a string + char has_string_na; + // If nonzero, indicates that this instance is owned by an array already + char array_owned; + // The string data to use when a default string is needed + npy_static_string default_string; + // The name of the missing data object, if any + npy_static_string na_name; + // the allocator should only be directly accessed after + // acquiring the allocator_lock and the lock should + // be released immediately after the allocator is + // no longer needed + npy_string_allocator *allocator; +} PyArray_StringDTypeObject; + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is expected to be public API without + * further modifications. + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* NPY_INTERNAL_BUILD */ + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "dtype_api.h" +#include "__multiarray_api.h" + +#ifndef Py_TARGET_ABI3T +#undef _PyArray_GET_ITEM_DATA +#define _PyArray_GET_ITEM_DATA(arr) ((PyArrayObject_fields *)(arr)) +#undef _PyArrayIter_GET_ITEM_DATA +#define _PyArrayIter_GET_ITEM_DATA(iter) ((PyArrayIterObject_fields *)(iter)) +#undef _PyArrayMultiIter_GET_ITEM_DATA +#define _PyArrayMultiIter_GET_ITEM_DATA(multi) ((PyArrayMultiIterObject_fields *)(multi)) +#undef _PyArrayNeighborhoodIter_GET_ITEM_DATA +#define _PyArrayNeighborhoodIter_GET_ITEM_DATA(iter) ((PyArrayNeighborhoodIterObject_fields *)(iter)) +#undef _PyDataType_GET_ITEM_DATA +#define _PyDataType_GET_ITEM_DATA(descr) ((PyArray_Descr_fields *)(descr)) +#undef _PyArray_LegacyDescr_GET_ITEM_DATA +#define _PyArray_LegacyDescr_GET_ITEM_DATA(descr) ((_PyArray_LegacyDescr_fields *)(descr)) +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#include "_neighborhood_iterator_imp.h" +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ +#define PyDataType_TYPENUM(descr) (_PyDataType_GET_ITEM_DATA((PyArray_Descr *)(descr))->type_num) + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) +static inline int +PyArray_NDIM(const PyArrayObject *arr) +{ + return _PyArray_GET_ITEM_DATA(arr)->nd; +} + +static inline void * +PyArray_DATA(const PyArrayObject *arr) +{ + return _PyArray_GET_ITEM_DATA(arr)->data; +} + +static inline char * +PyArray_BYTES(const PyArrayObject *arr) +{ + return _PyArray_GET_ITEM_DATA(arr)->data; +} + +static inline npy_intp * PyArray_DIMS(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->dimensions; + return _PyArray_GET_ITEM_DATA(arr)->dimensions; } static inline npy_intp * PyArray_STRIDES(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->strides; + return _PyArray_GET_ITEM_DATA(arr)->strides; } static inline npy_intp PyArray_DIM(const PyArrayObject *arr, int idim) { - return ((PyArrayObject_fields *)arr)->dimensions[idim]; + return _PyArray_GET_ITEM_DATA(arr)->dimensions[idim]; } static inline npy_intp PyArray_STRIDE(const PyArrayObject *arr, int istride) { - return ((PyArrayObject_fields *)arr)->strides[istride]; + return _PyArray_GET_ITEM_DATA(arr)->strides[istride]; } static inline NPY_RETURNS_BORROWED_REF PyObject * PyArray_BASE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->base; + return _PyArray_GET_ITEM_DATA(arr)->base; } static inline NPY_RETURNS_BORROWED_REF PyArray_Descr * PyArray_DESCR(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->descr; + return _PyArray_GET_ITEM_DATA(arr)->descr; } static inline int PyArray_FLAGS(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->flags; + return _PyArray_GET_ITEM_DATA(arr)->flags; } - static inline int PyArray_TYPE(const PyArrayObject *arr) { @@ -1620,7 +1654,7 @@ PyArray_DTYPE(const PyArrayObject *arr) static inline npy_intp * PyArray_SHAPE(const PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->dimensions; + return _PyArray_GET_ITEM_DATA(arr)->dimensions; } /* @@ -1630,7 +1664,7 @@ PyArray_SHAPE(const PyArrayObject *arr) static inline void PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) { - ((PyArrayObject_fields *)arr)->flags |= flags; + _PyArray_GET_ITEM_DATA(arr)->flags |= flags; } /* @@ -1640,14 +1674,14 @@ PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) static inline void PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) { - ((PyArrayObject_fields *)arr)->flags &= ~flags; + _PyArray_GET_ITEM_DATA(arr)->flags &= ~flags; } #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION static inline NPY_RETURNS_BORROWED_REF PyObject * PyArray_HANDLER(PyArrayObject *arr) { - return ((PyArrayObject_fields *)arr)->mem_handler; + return _PyArray_GET_ITEM_DATA(arr)->mem_handler; } #endif @@ -1775,166 +1809,202 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) #define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(PyDataType_BYTEORDER(d)) +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(PyDataType_BYTEORDER((PyArray_Descr *)(d))) #define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) -/************************************************************ - * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. - ************************************************************/ -typedef struct { - npy_intp perm, stride; -} npy_stride_sort_item; -/************************************************************ - * This is the form of the struct that's stored in the - * PyCapsule returned by an array's __array_struct__ attribute. See - * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full - * documentation. - ************************************************************/ -typedef struct { - int two; /* - * contains the integer 2 as a sanity - * check - */ +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) - int nd; /* number of dimensions */ +#define _PyAIT(it) _PyArrayIter_GET_ITEM_DATA((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) - char typekind; /* - * kind in array --- character code of - * typestr - */ +#define _PyArray_ITER_NEXT1(it) do { \ + _PyAIT(it)->dataptr += _PyAIT(it)->strides[0]; \ + _PyAIT(it)->coordinates[0]++; \ +} while (0) - int itemsize; /* size of each element */ +#define _PyArray_ITER_NEXT2(it) do { \ + if (_PyAIT(it)->coordinates[1] < _PyAIT(it)->dims_m1[1]) { \ + _PyAIT(it)->coordinates[1]++; \ + _PyAIT(it)->dataptr += _PyAIT(it)->strides[1]; \ + } \ + else { \ + _PyAIT(it)->coordinates[1] = 0; \ + _PyAIT(it)->coordinates[0]++; \ + _PyAIT(it)->dataptr += _PyAIT(it)->strides[0] - \ + _PyAIT(it)->backstrides[1]; \ + } \ +} while (0) - int flags; /* - * how should be data interpreted. Valid - * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), - * ALIGNED (0x100), NOTSWAPPED (0x200), and - * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) - * states that arrdescr field is present in - * structure - */ +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(it); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(it); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) - npy_intp *shape; /* - * A length-nd array of shape - * information - */ +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) - npy_intp *strides; /* A length-nd array of stride information */ +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) - void *data; /* A pointer to the first element of the array */ +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - PyObject *descr; /* - * A list of fields or NULL (ignored if flags - * does not have ARR_HAS_DESCR flag set) - */ -} PyArrayInterface; +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) -/**************************************** - * NpyString - * - * Types used by the NpyString API. - ****************************************/ +#define _PyMIT(m) (_PyArrayMultiIter_GET_ITEM_DATA((PyArrayMultiIterObject *)m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) -/* - * A "packed" encoded string. The string data must be accessed by first unpacking the string. - */ -typedef struct npy_packed_static_string npy_packed_static_string; +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) -/* - * An unpacked read-only view onto the data in a packed string - */ -typedef struct npy_unpacked_static_string { - size_t size; - const char *buf; -} npy_static_string; +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyAIT(_PyMIT(multi)->iters[0])->index; \ +} while (0) -/* - * Handles heap allocations for static strings. - */ -typedef struct npy_string_allocator npy_string_allocator; +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyAIT(_PyMIT(multi)->iters[0])->index; \ +} while (0) -typedef struct { - PyArray_Descr base; - // The object representing a null value - PyObject *na_object; - // Flag indicating whether or not to coerce arbitrary objects to strings - char coerce; - // Flag indicating the na object is NaN-like - char has_nan_na; - // Flag indicating the na object is a string - char has_string_na; - // If nonzero, indicates that this instance is owned by an array already - char array_owned; - // The string data to use when a default string is needed - npy_static_string default_string; - // The name of the missing data object, if any - npy_static_string na_name; - // the allocator should only be directly accessed after - // acquiring the allocator_lock and the lock should - // be released immediately after the allocator is - // no longer needed - npy_string_allocator *allocator; -} PyArray_StringDTypeObject; +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyAIT(_PyMIT(multi)->iters[i])->dataptr)) -/* - * PyArray_DTypeMeta related definitions. - * - * As of now, this API is preliminary and will be extended as necessary. - */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD - /* - * The Structures defined in this block are currently considered - * private API and may change without warning! - * Part of this (at least the size) is expected to be public API without - * further modifications. - */ - /* TODO: Make this definition public in the API, as soon as its settled */ - NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - /* - * While NumPy DTypes would not need to be heap types the plan is to - * make DTypes available in Python at which point they will be heap types. - * Since we also wish to add fields to the DType class, this looks like - * a typical instance definition, but with PyHeapTypeObject instead of - * only the PyObject_HEAD. - * This must only be exposed very extremely careful consideration, since - * it is a fairly complex construct which may be better to allow - * refactoring of. - */ - typedef struct { - PyHeapTypeObject super; +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) - /* - * Most DTypes will have a singleton default instance, for the - * parametric legacy DTypes (bytes, string, void, datetime) this - * may be a pointer to the *prototype* instance? - */ - PyArray_Descr *singleton; - /* Copy of the legacy DTypes type number, usually invalid. */ - int type_num; - /* The type object of the scalar instances (may be NULL?) */ - PyTypeObject *scalar_type; - /* - * DType flags to signal legacy, parametric, or - * abstract. But plenty of space for additional information/flags. - */ - npy_uint64 flags; +static NPY_INLINE int +PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi) +{ + return _PyMIT(multi)->numiter; +} - /* - * Use indirection in order to allow a fixed size for this struct. - * A stable ABI size makes creating a static DType less painful - * while also ensuring flexibility for all opaque API (with one - * indirection due the pointer lookup). - */ - void *dt_slots; - void *reserved[3]; - } PyArray_DTypeMeta; -#endif /* NPY_INTERNAL_BUILD */ +static NPY_INLINE npy_intp +PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi) +{ + return _PyMIT(multi)->size; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi) +{ + return _PyMIT(multi)->index; +} + + +static NPY_INLINE int +PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi) +{ + return _PyMIT(multi)->nd; +} + + +static NPY_INLINE npy_intp * +PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi) +{ + return _PyMIT(multi)->dimensions; +} + + +static NPY_INLINE void ** +PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi) +{ + return (void**)_PyMIT(multi)->iters; +} /* diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 9b14bc2de86a..85be5d4b8869 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -145,14 +145,14 @@ PyArray_ImportNumPyAPI(void) static inline void PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) { - dtype->elsize = size; + _PyDataType_GET_ITEM_DATA(dtype)->elsize = size; } static inline npy_uint64 PyDataType_FLAGS(const PyArray_Descr *dtype) { #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION - return dtype->flags; + return _PyDataType_GET_ITEM_DATA(dtype)->flags; #else return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */ #endif @@ -164,7 +164,7 @@ PyArray_ImportNumPyAPI(void) if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ return (type)0; \ } \ - return ((_PyArray_LegacyDescr *)dtype)->field; \ + return _PyArray_LegacyDescr_GET_ITEM_DATA((const _PyArray_LegacyDescr *)dtype)->field; \ } #else /* compiling for both 1.x and 2.x */ diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 4bfe3ab09dea..b50e42d30c5b 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -79,7 +79,7 @@ #define NPY_2_2_API_VERSION 0x00000013 #define NPY_2_3_API_VERSION 0x00000014 #define NPY_2_4_API_VERSION 0x00000015 -#define NPY_2_5_API_VERSION 0x00000015 +#define NPY_2_5_API_VERSION 0x00000016 /* @@ -124,6 +124,10 @@ #define NPY_FEATURE_VERSION NPY_1_25_API_VERSION #endif +#if defined(Py_TARGET_ABI3T) && NPY_TARGET_VERSION < NPY_2_5_API_VERSION + #error "NumPy 2.5 or later is required when compiling Py_TARGET_ABI3T" +#endif + /* Sanity check the (requested) feature version */ #if NPY_FEATURE_VERSION > NPY_API_VERSION #error "NPY_TARGET_VERSION higher than NumPy headers!" diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index b5a8b6852468..5247f7353133 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -100,7 +100,9 @@ typedef int (PyUFunc_ProcessCoreDimsFunc)( npy_intp *core_dim_sizes); typedef struct _tagPyUFuncObject { +#ifndef Py_TARGET_ABI3T PyObject_HEAD +#endif /* * nin: Number of inputs * nout: Number of outputs @@ -234,7 +236,6 @@ typedef struct _tagPyUFuncObject { #endif } PyUFuncObject_fields; -typedef PyUFuncObject_fields PyUFuncObject; #include "arrayobject.h" /* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ @@ -345,13 +346,18 @@ typedef struct _loop1d_info { #endif #endif +#ifndef Py_TARGET_ABI3T +typedef struct _tagPyUFuncObject PyUFuncObject; +#else +typedef struct tagPyUFuncObject PyUFuncObject; +#endif + #include "__ufunc_api.h" -// In future, when adding support for opaque PyObject, this would become -// a ABI function call to get the ufunc struct fields from the PyObject. -static inline PyUFuncObject_fields *_PyUFuncObject_GET_ITEM_DATA(PyUFuncObject *ufunc) { - return (PyUFuncObject_fields *)ufunc; -} +#ifndef Py_TARGET_ABI3T +#undef _PyUFuncObject_GET_ITEM_DATA +#define _PyUFuncObject_GET_ITEM_DATA(ufunc) ((PyUFuncObject_fields *)(ufunc)) +#endif #ifdef __cplusplus } diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 2a2ee53ba745..c90346ab4d01 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -51,8 +51,8 @@ C_ABI_VERSION = '0x02000000' # 0x00000013 - 2.2.x # 0x00000014 - 2.3.x # 0x00000015 - 2.4.x -# 0x00000015 - 2.5.x -C_API_VERSION = '0x00000015' +# 0x00000016 - 2.5.x +C_API_VERSION = '0x00000016' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 63efea9580db..a74b8f86e394 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -43,7 +43,7 @@ npy_mark_tmp_array_if_pyscalar( PyObject *obj, PyArrayObject *arr, PyArray_DTypeMeta **dtype) { if (PyLong_CheckExact(obj)) { - ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; + _PyArray_GET_ITEM_DATA(arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { Py_INCREF(&PyArray_PyLongDType); Py_SETREF(*dtype, &PyArray_PyLongDType); @@ -51,7 +51,7 @@ npy_mark_tmp_array_if_pyscalar( return 1; } else if (PyFloat_CheckExact(obj)) { - ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; + _PyArray_GET_ITEM_DATA(arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { Py_INCREF(&PyArray_PyFloatDType); Py_SETREF(*dtype, &PyArray_PyFloatDType); @@ -59,7 +59,7 @@ npy_mark_tmp_array_if_pyscalar( return 1; } else if (PyComplex_CheckExact(obj)) { - ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; + _PyArray_GET_ITEM_DATA(arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { Py_INCREF(&PyArray_PyComplexDType); Py_SETREF(*dtype, &PyArray_PyComplexDType); diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 9440bec477bf..23bba6a43d51 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -1275,3 +1275,78 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = { .tp_getset = array_getsetlist, .tp_new = (newfunc)array_new, }; + +/* + The following *_GET_ITEM_DATA functions are used to get the pointer to the fields of the + corresponding struct from the given object. It is technically undefined behaviour + to access the fields of the struct through a pointer that is not of the same type, + but in our case it is not a problem in practice because this is used only in stable ABI + extensions where the original object layout is opaque. +*/ +#undef _PyDataType_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT PyArray_Descr_fields * +_PyDataType_GET_ITEM_DATA(const PyArray_Descr *dtype) +{ + return (PyArray_Descr_fields *)(((char *)dtype) + sizeof(PyObject)); +} +#undef _PyArray_LegacyDescr_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT _PyArray_LegacyDescr_fields * +_PyArray_LegacyDescr_GET_ITEM_DATA(const _PyArray_LegacyDescr *dtype) +{ + return (_PyArray_LegacyDescr_fields *)(((char *)dtype) + sizeof(PyObject)); +} +#undef _PyArray_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT PyArrayObject_fields * +_PyArray_GET_ITEM_DATA(const PyArrayObject *arr) +{ + return (PyArrayObject_fields *)(((char *)arr) + sizeof(PyObject)); +} +#undef _PyArrayMultiIter_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT PyArrayMultiIterObject_fields * +_PyArrayMultiIter_GET_ITEM_DATA(const PyArrayMultiIterObject *multi) +{ + return (PyArrayMultiIterObject_fields *)(((char *)multi) + sizeof(PyObject)); +} +#undef _PyArrayIter_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT PyArrayIterObject_fields * +_PyArrayIter_GET_ITEM_DATA(const PyArrayIterObject *iter) +{ + return (PyArrayIterObject_fields *)(((char *)iter) + sizeof(PyObject)); +} +#undef _PyArrayNeighborhoodIter_GET_ITEM_DATA +/*NUMPY_API*/ +NPY_NO_EXPORT PyArrayNeighborhoodIterObject_fields * +_PyArrayNeighborhoodIter_GET_ITEM_DATA(const PyArrayNeighborhoodIterObject *iter) +{ + return (PyArrayNeighborhoodIterObject_fields *)(((char *)iter) + sizeof(PyObject)); +} +/*NUMPY_API*/ +NPY_NO_EXPORT PyArray_DatetimeMetaData +_PyDatetimeScalarObject_GetMetadata(PyObject *self) +{ + return ((PyDatetimeScalarObject *)self)->obmeta; +} +/*NUMPY_API*/ +NPY_NO_EXPORT PyArray_DatetimeMetaData +_PyTimedeltaScalarObject_GetMetadata(PyObject *self) +{ + return ((PyTimedeltaScalarObject *)self)->obmeta; +} +/*NUMPY_API*/ +NPY_NO_EXPORT npy_datetime +_PyDatetimeScalarObject_GetValue(PyObject *self) +{ + return ((PyDatetimeScalarObject *)self)->obval; +} +/*NUMPY_API*/ +NPY_NO_EXPORT npy_timedelta +_PyTimedeltaScalarObject_GetValue(PyObject *self) +{ + return ((PyTimedeltaScalarObject *)self)->obval; +} + diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index c5f9b5a6a7fb..a834de0acb97 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -5486,6 +5486,13 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, #undef _SETCPTR +#undef _PyUFuncObject_GET_ITEM_DATA +/*UFUNC_API*/ +NPY_NO_EXPORT PyUFuncObject_fields * +_PyUFuncObject_GET_ITEM_DATA(const PyUFuncObject *obj) +{ + return (PyUFuncObject_fields *)((char *)obj + sizeof(PyObject)); +} static void ufunc_dealloc(PyUFuncObject *ufunc) diff --git a/numpy/_core/tests/examples/limited_api/limited_api_opaque.c b/numpy/_core/tests/examples/limited_api/limited_api_opaque.c new file mode 100644 index 000000000000..5c2af1403199 --- /dev/null +++ b/numpy/_core/tests/examples/limited_api/limited_api_opaque.c @@ -0,0 +1,363 @@ +#ifndef Py_TARGET_ABI3T +#error "This file must be compiled with -DPy_TARGET_ABI3T" +#endif + +#include +#include +#include +#include + +static PyObject *limited_api_opaque_nonzero(PyObject *mod, PyArrayObject *self) +{ + PyArray_NonzeroFunc* nonzero = PyDataType_GetArrFuncs(PyArray_DESCR(self))->nonzero; + + NpyIter* iter; + NpyIter_IterNextFunc *iternext; + char** dataptr; + npy_intp nonzero_count; + npy_intp* strideptr,* innersizeptr; + + /* Handle zero-sized arrays specially */ + if (PyArray_SIZE(self) == 0) { + return PyLong_FromLong(0); + } + + /* + * Create and use an iterator to count the nonzeros. + * flag NPY_ITER_READONLY + * - The array is never written to. + * flag NPY_ITER_EXTERNAL_LOOP + * - Inner loop is done outside the iterator for efficiency. + * flag NPY_ITER_NPY_ITER_REFS_OK + * - Reference types are acceptable. + * order NPY_KEEPORDER + * - Visit elements in memory order, regardless of strides. + * This is good for performance when the specific order + * elements are visited is unimportant. + * casting NPY_NO_CASTING + * - No casting is required for this operation. + */ + iter = NpyIter_New(self, NPY_ITER_READONLY| + NPY_ITER_EXTERNAL_LOOP| + NPY_ITER_REFS_OK, + NPY_KEEPORDER, NPY_NO_CASTING, + NULL); + if (iter == NULL) { + return NULL; + } + + /* + * The iternext function gets stored in a local variable + * so it can be called repeatedly in an efficient manner. + */ + iternext = NpyIter_GetIterNext(iter, NULL); + if (iternext == NULL) { + NpyIter_Deallocate(iter); + return NULL; + } + /* The location of the data pointer which the iterator may update */ + dataptr = NpyIter_GetDataPtrArray(iter); + /* The location of the stride which the iterator may update */ + strideptr = NpyIter_GetInnerStrideArray(iter); + /* The location of the inner loop size which the iterator may update */ + innersizeptr = NpyIter_GetInnerLoopSizePtr(iter); + + nonzero_count = 0; + do { + /* Get the inner loop data/stride/count values */ + char* data = *dataptr; + npy_intp stride = *strideptr; + npy_intp count = *innersizeptr; + + /* This is a typical inner loop for NPY_ITER_EXTERNAL_LOOP */ + while (count--) { + if (nonzero(data, self)) { + ++nonzero_count; + } + data += stride; + } + + /* Increment the iterator to the next inner loop */ + } while(iternext(iter)); + + NpyIter_Deallocate(iter); + + return PyLong_FromLong(nonzero_count); +} + +/* + * Test PyArray_ITER_NEXT, PyArray_ITER_RESET, PyArray_ITER_DATA, + * and PyArray_ITER_NOTDONE by summing all elements using the + * legacy iterator macros. + */ +static PyObject * +limited_api_opaque_iter_next(PyObject *mod, PyArrayObject *self) +{ + PyObject *iter_obj = PyArray_IterNew((PyObject *)self); + if (iter_obj == NULL) { + return NULL; + } + double sum = 0.0; + while (PyArray_ITER_NOTDONE(iter_obj)) { + sum += *(double *)PyArray_ITER_DATA(iter_obj); + PyArray_ITER_NEXT(iter_obj); + } + Py_DECREF(iter_obj); + return PyFloat_FromDouble(sum); +} + +/* + * Test PyArray_ITER_GOTO1D by accessing a specific flat index. + */ +static PyObject * +limited_api_opaque_iter_goto1d(PyObject *mod, PyObject *args) +{ + PyArrayObject *arr; + npy_intp index; + if (!PyArg_ParseTuple(args, "O!n", &PyArray_Type, &arr, &index)) { + return NULL; + } + PyObject *iter_obj = PyArray_IterNew((PyObject *)arr); + if (iter_obj == NULL) { + return NULL; + } + PyArray_ITER_GOTO1D(iter_obj, index); + double val = *(double *)PyArray_ITER_DATA(iter_obj); + Py_DECREF(iter_obj); + return PyFloat_FromDouble(val); +} + +/* + * Test PyArray_ITER_RESET by iterating, resetting, and iterating again. + * Returns the sum from the second pass (should equal the first). + */ +static PyObject * +limited_api_opaque_iter_reset(PyObject *mod, PyArrayObject *self) +{ + PyObject *iter_obj = PyArray_IterNew((PyObject *)self); + if (iter_obj == NULL) { + return NULL; + } + /* First pass: skip through */ + while (PyArray_ITER_NOTDONE(iter_obj)) { + PyArray_ITER_NEXT(iter_obj); + } + /* Reset and sum */ + PyArray_ITER_RESET(iter_obj); + double sum = 0.0; + while (PyArray_ITER_NOTDONE(iter_obj)) { + sum += *(double *)PyArray_ITER_DATA(iter_obj); + PyArray_ITER_NEXT(iter_obj); + } + Py_DECREF(iter_obj); + return PyFloat_FromDouble(sum); +} + +/* + * Test PyArray_MultiIter_NEXT, PyArray_MultiIter_RESET, + * PyArray_MultiIter_DATA, and PyArray_MultiIter_NOTDONE + * by computing the element-wise sum of two broadcastable arrays. + * Returns the total sum of (a + b) for all broadcast elements. + */ +static PyObject * +limited_api_opaque_multi_iter_next(PyObject *mod, PyObject *args) +{ + PyArrayObject *a, *b; + if (!PyArg_ParseTuple(args, "O!O!", &PyArray_Type, &a, + &PyArray_Type, &b)) { + return NULL; + } + PyObject *multi = PyArray_MultiIterNew(2, a, b); + if (multi == NULL) { + return NULL; + } + double sum = 0.0; + while (PyArray_MultiIter_NOTDONE(multi)) { + double va = *(double *)PyArray_MultiIter_DATA(multi, 0); + double vb = *(double *)PyArray_MultiIter_DATA(multi, 1); + sum += va + vb; + PyArray_MultiIter_NEXT(multi); + } + /* Test reset: iterate again and verify same sum */ + PyArray_MultiIter_RESET(multi); + double sum2 = 0.0; + while (PyArray_MultiIter_NOTDONE(multi)) { + double va = *(double *)PyArray_MultiIter_DATA(multi, 0); + double vb = *(double *)PyArray_MultiIter_DATA(multi, 1); + sum2 += va + vb; + PyArray_MultiIter_NEXT(multi); + } + Py_DECREF(multi); + if (sum != sum2) { + PyErr_SetString(PyExc_RuntimeError, + "MultiIter reset produced different sum"); + return NULL; + } + return PyFloat_FromDouble(sum); +} + +/* + * Test PyArray_ITER_GOTO by jumping to a coordinate and reading the value. + */ +static PyObject * +limited_api_opaque_iter_goto(PyObject *mod, PyObject *args) +{ + PyArrayObject *arr; + PyObject *coord_tuple; + if (!PyArg_ParseTuple(args, "O!O!", &PyArray_Type, &arr, + &PyTuple_Type, &coord_tuple)) { + return NULL; + } + int nd = PyArray_NDIM(arr); + if (PyTuple_Size(coord_tuple) != nd) { + PyErr_SetString(PyExc_ValueError, "coordinate length mismatch"); + return NULL; + } + npy_intp destination[NPY_MAXDIMS_LEGACY_ITERS]; + for (int i = 0; i < nd; i++) { + destination[i] = PyLong_AsLong(PyTuple_GetItem(coord_tuple, i)); + if (destination[i] == -1 && PyErr_Occurred()) { + return NULL; + } + } + PyObject *iter_obj = PyArray_IterNew((PyObject *)arr); + if (iter_obj == NULL) { + return NULL; + } + PyArray_ITER_GOTO(iter_obj, destination); + double val = *(double *)PyArray_ITER_DATA(iter_obj); + Py_DECREF(iter_obj); + return PyFloat_FromDouble(val); +} + +/* + * Test PyArray_MultiIter_GOTO by jumping to a coordinate + * and returning (a_val, b_val) at that position. + */ +static PyObject * +limited_api_opaque_multi_iter_goto(PyObject *mod, PyObject *args) +{ + PyArrayObject *a, *b; + PyObject *coord_tuple; + if (!PyArg_ParseTuple(args, "O!O!O!", &PyArray_Type, &a, + &PyArray_Type, &b, + &PyTuple_Type, &coord_tuple)) { + return NULL; + } + PyObject *multi = PyArray_MultiIterNew(2, a, b); + if (multi == NULL) { + return NULL; + } + int nd = _PyMIT(multi)->nd; + if (PyTuple_Size(coord_tuple) != nd) { + Py_DECREF(multi); + PyErr_SetString(PyExc_ValueError, "coordinate length mismatch"); + return NULL; + } + npy_intp destination[NPY_MAXDIMS_LEGACY_ITERS]; + for (int i = 0; i < nd; i++) { + destination[i] = PyLong_AsLong(PyTuple_GetItem(coord_tuple, i)); + if (destination[i] == -1 && PyErr_Occurred()) { + Py_DECREF(multi); + return NULL; + } + } + PyArray_MultiIter_GOTO(multi, destination); + double va = *(double *)PyArray_MultiIter_DATA(multi, 0); + double vb = *(double *)PyArray_MultiIter_DATA(multi, 1); + Py_DECREF(multi); + return Py_BuildValue("dd", va, vb); +} + +/* + * Test PyArray_MultiIter_GOTO1D by jumping to a flat index + * and returning (a_val, b_val) at that position. + */ +static PyObject * +limited_api_opaque_multi_iter_goto1d(PyObject *mod, PyObject *args) +{ + PyArrayObject *a, *b; + npy_intp index; + if (!PyArg_ParseTuple(args, "O!O!n", &PyArray_Type, &a, + &PyArray_Type, &b, &index)) { + return NULL; + } + PyObject *multi = PyArray_MultiIterNew(2, a, b); + if (multi == NULL) { + return NULL; + } + PyArray_MultiIter_GOTO1D(multi, index); + double va = *(double *)PyArray_MultiIter_DATA(multi, 0); + double vb = *(double *)PyArray_MultiIter_DATA(multi, 1); + Py_DECREF(multi); + return Py_BuildValue("dd", va, vb); +} + +/* + * Test PyArray_MultiIter_NEXTi by advancing only the first iterator + * and returning its data pointer value after N steps. + */ +static PyObject * +limited_api_opaque_multi_iter_nexti(PyObject *mod, PyObject *args) +{ + PyArrayObject *a, *b; + int steps; + if (!PyArg_ParseTuple(args, "O!O!i", &PyArray_Type, &a, + &PyArray_Type, &b, &steps)) { + return NULL; + } + PyObject *multi = PyArray_MultiIterNew(2, a, b); + if (multi == NULL) { + return NULL; + } + for (int i = 0; i < steps; i++) { + PyArray_MultiIter_NEXTi(multi, 0); + } + double val = *(double *)PyArray_MultiIter_DATA(multi, 0); + Py_DECREF(multi); + return PyFloat_FromDouble(val); +} + +static PyMethodDef limited_api_opaque_methods[] = { + {"nonzero", (PyCFunction)limited_api_opaque_nonzero, METH_O, + "Count the number of non-zero elements in the array."}, + {"iter_next", (PyCFunction)limited_api_opaque_iter_next, METH_O, + "Sum array elements using PyArray_ITER_NEXT."}, + {"iter_goto1d", (PyCFunction)limited_api_opaque_iter_goto1d, METH_VARARGS, + "Get element at flat index using PyArray_ITER_GOTO1D."}, + {"iter_reset", (PyCFunction)limited_api_opaque_iter_reset, METH_O, + "Sum array elements after reset using PyArray_ITER_RESET."}, + {"multi_iter_next", (PyCFunction)limited_api_opaque_multi_iter_next, + METH_VARARGS, + "Sum broadcast (a+b) using PyArray_MultiIter_NEXT."}, + {"iter_goto", (PyCFunction)limited_api_opaque_iter_goto, METH_VARARGS, + "Get element at coordinate using PyArray_ITER_GOTO."}, + {"multi_iter_goto", (PyCFunction)limited_api_opaque_multi_iter_goto, + METH_VARARGS, + "Get (a, b) at coordinate using PyArray_MultiIter_GOTO."}, + {"multi_iter_goto1d", (PyCFunction)limited_api_opaque_multi_iter_goto1d, + METH_VARARGS, + "Get (a, b) at flat index using PyArray_MultiIter_GOTO1D."}, + {"multi_iter_nexti", (PyCFunction)limited_api_opaque_multi_iter_nexti, + METH_VARARGS, + "Advance only iter 0 N steps using PyArray_MultiIter_NEXTi."}, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +PyABIInfo_VAR(abi_info); + +static PyModuleDef_Slot limited_api_opaque_slots[] = { + {Py_mod_abi, &abi_info}, + {Py_mod_name, "limited_api_opaque"}, + {Py_mod_methods, limited_api_opaque_methods}, + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, + {0, NULL}, +}; + +PyMODEXPORT_FUNC +PyModExport_limited_api_opaque(void) +{ + import_array(); + import_umath(); + return limited_api_opaque_slots; +} diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index 2348b0856d0f..2ab8312e855c 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -28,36 +28,51 @@ npy_path = run_command(py, [ # where cython may not find the right __init__.pyd file. add_project_arguments('-I', npy_path, language : 'cython') -py.extension_module( - 'limited_api1', - 'limited_api1.c', - c_args: [ - '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', - ], - include_directories: [npy_include_path], - limited_api: '3.9', -) +if py.get_variable('Py_GIL_DISABLED', 0) == 0 + py.extension_module( + 'limited_api1', + 'limited_api1.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: '3.9', + ) -py.extension_module( - 'limited_api_latest', - 'limited_api_latest.c', - c_args: [ - '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', - ], - include_directories: [npy_include_path], - limited_api: py.language_version(), -) + py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), + ) -py.extension_module( - 'limited_api2', - 'limited_api2.pyx', - install: false, - c_args: [ - '-DNPY_NO_DEPRECATED_API=0', - # Require 1.25+ to test datetime additions - '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', - '-DCYTHON_LIMITED_API=1', - ], - include_directories: [npy_include_path], - limited_api: '3.9', + py.extension_module( + 'limited_api2', + 'limited_api2.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + '-DCYTHON_LIMITED_API=1', + ], + include_directories: [npy_include_path], + limited_api: '3.9', ) +endif + +if py.language_version().version_compare('>=3.15') + py.extension_module( + 'limited_api_opaque', + 'limited_api_opaque.c', + c_args: [ + '-DNPY_TARGET_VERSION=NPY_2_5_API_VERSION', + '-DPy_TARGET_ABI3T=0x030F0000', + ], + include_directories: [npy_include_path], + limited_api: '3.15', + ) +endif diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 30ed3023cc92..2449d636f8b2 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -99,3 +99,53 @@ def test_limited_api(install_temp): import limited_api1 # Earliest (3.6) # noqa: F401 import limited_api2 # cython # noqa: F401 import limited_api_latest # Latest version (current Python) # noqa: F401 + +@pytest.mark.skipif( + sys.version_info < (3, 15), reason="opaque PyObject requires Python 3.15+" +) +def test_limited_opaque(install_temp): + import limited_api_opaque + + import numpy as np + arr = np.ones((200, 200)) + assert limited_api_opaque.nonzero(arr) == 200 * 200 + + # Test PyArray_ITER_NEXT / PyArray_ITER_DATA / PyArray_ITER_NOTDONE + arr = np.arange(12.0).reshape(3, 4) + assert limited_api_opaque.iter_next(arr) == 66.0 + + # Test PyArray_ITER_GOTO1D + assert limited_api_opaque.iter_goto1d(arr, 5) == 5.0 + assert limited_api_opaque.iter_goto1d(arr, -1) == 11.0 + + # Test PyArray_ITER_RESET + assert limited_api_opaque.iter_reset(arr) == 66.0 + + # Test PyArray_MultiIter_NEXT / RESET / DATA with broadcasting + a = np.arange(3.0).reshape(3, 1) # shape (3, 1) + b = np.arange(4.0).reshape(1, 4) # shape (1, 4) + # Each broadcast element is a[i] + b[j], total sum: + expected = float(np.sum(a + b)) + assert limited_api_opaque.multi_iter_next(a, b) == expected + + # Test PyArray_ITER_GOTO + arr = np.arange(12.0).reshape(3, 4) + assert limited_api_opaque.iter_goto(arr, (1, 2)) == 6.0 + assert limited_api_opaque.iter_goto(arr, (2, 3)) == 11.0 + + # Test PyArray_MultiIter_GOTO + a = np.arange(3.0).reshape(3, 1) + b = np.arange(4.0).reshape(1, 4) + va, vb = limited_api_opaque.multi_iter_goto(a, b, (1, 2)) + assert va == 1.0 and vb == 2.0 + + # Test PyArray_MultiIter_GOTO1D + # flat index 6 in (3,4) broadcast → row 1, col 2 + va, vb = limited_api_opaque.multi_iter_goto1d(a, b, 6) + assert va == 1.0 and vb == 2.0 + + # Test PyArray_MultiIter_NEXTi + a = np.arange(6.0).reshape(2, 3) + b = np.zeros((2, 3)) + # Advance iter 0 by 3 steps → flat index 3 → value 3.0 + assert limited_api_opaque.multi_iter_nexti(a, b, 3) == 3.0 From 11b1baa054d4df8984c3480d31ecdd52c761d720 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 7 May 2026 06:32:05 -0600 Subject: [PATCH 1708/1718] BUG: Avoid UB in safe_[add,sub,mul] helpers (#31396) --- numpy/_core/config.h.in | 2 + numpy/_core/meson.build | 2 + numpy/_core/src/common/npy_extint128.h | 51 ++++++++++++++++++++++++-- 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/numpy/_core/config.h.in b/numpy/_core/config.h.in index 7625615270a2..7768939bbd04 100644 --- a/numpy/_core/config.h.in +++ b/numpy/_core/config.h.in @@ -32,6 +32,8 @@ #mesondefine HAVE___BUILTIN_BSWAP64 #mesondefine HAVE___BUILTIN_EXPECT #mesondefine HAVE___BUILTIN_MUL_OVERFLOW +#mesondefine HAVE___BUILTIN_ADD_OVERFLOW +#mesondefine HAVE___BUILTIN_SUB_OVERFLOW #mesondefine HAVE___BUILTIN_PREFETCH #mesondefine HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index c90346ab4d01..ace58c9c4c13 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -464,6 +464,8 @@ optional_intrinsics = [ ['__builtin_expect', '5, 0', [], []], # Test `long long` for arm+clang 13 (gh-22811, but we use all versions): ['__builtin_mul_overflow', '(long long)5, 5, (int*)5', [], []], + ['__builtin_add_overflow', '(long long)5, 5, (int*)5', [], []], + ['__builtin_sub_overflow', '(long long)5, 5, (int*)5', [], []], ['__builtin_prefetch', '(float*)0, 0, 3', [], []], ] foreach intrin: optional_intrinsics diff --git a/numpy/_core/src/common/npy_extint128.h b/numpy/_core/src/common/npy_extint128.h index 776d71c772e8..b9b3e0f97205 100644 --- a/numpy/_core/src/common/npy_extint128.h +++ b/numpy/_core/src/common/npy_extint128.h @@ -8,17 +8,41 @@ typedef struct { } npy_extint128_t; +/* + * Integer add/sub/mul with overflow checking. + * + * On overflow, *overflow_flag is set to 1 and the return value is + * unspecified (callers must not use it). The arithmetic itself is + * never performed on values that would overflow signed `npy_int64`, + * so these helpers are free of the signed-overflow undefined behavior + * that an unguarded `a + b` / `a - b` / `a * b` would have. + * + * `__builtin_{add,sub,mul}_overflow` are probed independently by the + * meson build (see `numpy/_core/meson.build`); fall back to a + * branch-and-skip implementation otherwise. + */ + /* Integer addition with overflow checking */ static inline npy_int64 safe_add(npy_int64 a, npy_int64 b, char *overflow_flag) { +#ifdef HAVE___BUILTIN_ADD_OVERFLOW + npy_int64 result; + if (__builtin_add_overflow(a, b, &result)) { + *overflow_flag = 1; + } + return result; +#else if (a > 0 && b > NPY_MAX_INT64 - a) { *overflow_flag = 1; + return 0; } - else if (a < 0 && b < NPY_MIN_INT64 - a) { + if (a < 0 && b < NPY_MIN_INT64 - a) { *overflow_flag = 1; + return 0; } return a + b; +#endif } @@ -26,13 +50,23 @@ safe_add(npy_int64 a, npy_int64 b, char *overflow_flag) static inline npy_int64 safe_sub(npy_int64 a, npy_int64 b, char *overflow_flag) { +#ifdef HAVE___BUILTIN_SUB_OVERFLOW + npy_int64 result; + if (__builtin_sub_overflow(a, b, &result)) { + *overflow_flag = 1; + } + return result; +#else if (a >= 0 && b < a - NPY_MAX_INT64) { *overflow_flag = 1; + return 0; } - else if (a < 0 && b > a - NPY_MIN_INT64) { + if (a < 0 && b > a - NPY_MIN_INT64) { *overflow_flag = 1; + return 0; } return a - b; +#endif } @@ -40,20 +74,31 @@ safe_sub(npy_int64 a, npy_int64 b, char *overflow_flag) static inline npy_int64 safe_mul(npy_int64 a, npy_int64 b, char *overflow_flag) { +#ifdef HAVE___BUILTIN_MUL_OVERFLOW + npy_int64 result; + if (__builtin_mul_overflow(a, b, &result)) { + *overflow_flag = 1; + } + return result; +#else if (a > 0) { if (b > NPY_MAX_INT64 / a || b < NPY_MIN_INT64 / a) { *overflow_flag = 1; + return 0; } } else if (a < 0) { if (b > 0 && a < NPY_MIN_INT64 / b) { *overflow_flag = 1; + return 0; } - else if (b < 0 && a < NPY_MAX_INT64 / b) { + if (b < 0 && a < NPY_MAX_INT64 / b) { *overflow_flag = 1; + return 0; } } return a * b; +#endif } From 5b4f1dedd53f509c1a5e69c98370adcdd4b93adc Mon Sep 17 00:00:00 2001 From: Praneeth Kodumagulla <64239307+praneethhere@users.noreply.github.com> Date: Thu, 7 May 2026 07:36:08 -0500 Subject: [PATCH 1709/1718] BUG: exclude __pycache__ directories from wheels (#31397) --- numpy/meson.build | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/numpy/meson.build b/numpy/meson.build index 45d5a2b52eb8..36694ef4efd8 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -363,13 +363,28 @@ else endif foreach subdir: pure_subdirs - install_subdir(subdir, install_dir: np_dir, install_tag: 'python-runtime', exclude_directories: ['tests']) + install_subdir( + subdir, + install_dir: np_dir, + install_tag: 'python-runtime', + exclude_directories: ['tests', '__pycache__'] + ) if fs.is_dir(subdir/'tests') - install_subdir(subdir/'tests', install_dir: np_dir/subdir, install_tag: 'tests') + install_subdir( + subdir/'tests', + install_dir: np_dir/subdir, + install_tag: 'tests', + exclude_directories: ['__pycache__'] + ) endif endforeach -install_subdir('tests', install_dir: np_dir, install_tag: 'tests') +install_subdir( + 'tests', + install_dir: np_dir, + install_tag: 'tests', + exclude_directories: ['__pycache__'] +) compilers = { 'C': cc, From 4d711763f8c5b978abd88cea710147c14d802675 Mon Sep 17 00:00:00 2001 From: Rishabh Dewangan <107680241+Rishabh-git10@users.noreply.github.com> Date: Thu, 7 May 2026 21:21:06 +0530 Subject: [PATCH 1710/1718] TST: Validate warning message content in _DeprecationTestCase (#31370) --- numpy/_core/tests/test_deprecations.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 581958edb749..a8155a9304f8 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -4,6 +4,7 @@ """ import contextlib +import re import warnings from collections.abc import Callable @@ -86,6 +87,12 @@ def assert_deprecated(self, function, num=1, ignore_others=False, for warning in w_context: if warning.category is self.warning_cls: num_found += 1 + if self.message: + if not re.match(self.message, str(warning.message)): + raise AssertionError( + f"Warning message '{warning.message}' did not match " + f"expected pattern '{self.message}'" + ) elif not ignore_others: name = self.warning_cls.__name__ raise AssertionError(f"expected {name} but got: {warning.category}") From 7acf88c9ff8386969fe66b6f30692f1148a39779 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Fri, 8 May 2026 17:29:33 +0200 Subject: [PATCH 1711/1718] TYP: `_NestedSequence` type parameter default to work around a mypy issue (#31399) --- numpy/_typing/_nested_sequence.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 6755c2ec0ec9..0aa3ce6fcff6 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -4,12 +4,20 @@ if TYPE_CHECKING: from collections.abc import Iterator + from typing_extensions import TypeVar + + _T_co = TypeVar("_T_co", covariant=True, default=Any) +else: + from typing import TypeVar + + _T_co = TypeVar("_T_co", covariant=True) + __all__ = ["_NestedSequence"] @runtime_checkable -class _NestedSequence[T](Protocol): +class _NestedSequence(Protocol[_T_co]): """A protocol for representing nested sequences. Warning @@ -52,7 +60,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> "T | _NestedSequence[T]": + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": """Implement ``self[x]``.""" raise NotImplementedError @@ -60,11 +68,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> "Iterator[T | _NestedSequence[T]]": + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> "Iterator[T | _NestedSequence[T]]": + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``reversed(self)``.""" raise NotImplementedError From b530f83a4011f2d6beb6c67338176ac2fb7fd01a Mon Sep 17 00:00:00 2001 From: Kumar Aditya Date: Fri, 8 May 2026 21:51:08 +0530 Subject: [PATCH 1712/1718] TST: fix test_limited_opaque for 3.15 PySlot (#31403) --- .../tests/examples/limited_api/limited_api_opaque.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/tests/examples/limited_api/limited_api_opaque.c b/numpy/_core/tests/examples/limited_api/limited_api_opaque.c index 5c2af1403199..86a934fd1622 100644 --- a/numpy/_core/tests/examples/limited_api/limited_api_opaque.c +++ b/numpy/_core/tests/examples/limited_api/limited_api_opaque.c @@ -346,12 +346,12 @@ static PyMethodDef limited_api_opaque_methods[] = { PyABIInfo_VAR(abi_info); -static PyModuleDef_Slot limited_api_opaque_slots[] = { - {Py_mod_abi, &abi_info}, - {Py_mod_name, "limited_api_opaque"}, - {Py_mod_methods, limited_api_opaque_methods}, - {Py_mod_gil, Py_MOD_GIL_NOT_USED}, - {0, NULL}, +static PySlot limited_api_opaque_slots[] = { + PySlot_STATIC_DATA(Py_mod_abi, &abi_info), + PySlot_STATIC_DATA(Py_mod_name, "limited_api_opaque"), + PySlot_STATIC_DATA(Py_mod_methods, limited_api_opaque_methods), + PySlot_STATIC_DATA(Py_mod_gil, Py_MOD_GIL_NOT_USED), + PySlot_END, }; PyMODEXPORT_FUNC From 1f294ec3359d156f52c218f9ef7f297e10e7c1a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 May 2026 17:55:26 +0000 Subject: [PATCH 1713/1718] MAINT: Bump github/codeql-action from 4.35.2 to 4.35.3 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.35.2 to 4.35.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/95e58e9a2cdfd71adc6e0353d5c52f41a045d225...e46ed2cbd01164d986452f91f178727624ae40d7) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.35.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d8fe3a1f47b7..090ed3650035 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 + uses: github/codeql-action/init@e46ed2cbd01164d986452f91f178727624ae40d7 # v4.35.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 + uses: github/codeql-action/autobuild@e46ed2cbd01164d986452f91f178727624ae40d7 # v4.35.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2 + uses: github/codeql-action/analyze@e46ed2cbd01164d986452f91f178727624ae40d7 # v4.35.3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index bff4c54758a2..1f86f8322aa1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v2.1.27 + uses: github/codeql-action/upload-sarif@e46ed2cbd01164d986452f91f178727624ae40d7 # v2.1.27 with: sarif_file: results.sarif From 83bac4c23243f8f223f7e76bfdc0c250a9b2a51a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 May 2026 17:55:42 +0000 Subject: [PATCH 1714/1718] MAINT: Bump int128/hide-comment-action from 1.57.0 to 1.58.0 Bumps [int128/hide-comment-action](https://github.com/int128/hide-comment-action) from 1.57.0 to 1.58.0. - [Release notes](https://github.com/int128/hide-comment-action/releases) - [Commits](https://github.com/int128/hide-comment-action/compare/d38cf5901fbb26e4d5790890955713b0ec539087...392bc214093dafaebdf99c7d6298245262832d15) --- updated-dependencies: - dependency-name: int128/hide-comment-action dependency-version: 1.58.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/mypy_primer_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mypy_primer_comment.yml b/.github/workflows/mypy_primer_comment.yml index 29b5b4d4e213..da7c677fcd2e 100644 --- a/.github/workflows/mypy_primer_comment.yml +++ b/.github/workflows/mypy_primer_comment.yml @@ -49,7 +49,7 @@ jobs: return parseInt(fs.readFileSync("pr_number.txt", { encoding: "utf8" })) - name: Hide old comments - uses: int128/hide-comment-action@d38cf5901fbb26e4d5790890955713b0ec539087 # v1.57.0 + uses: int128/hide-comment-action@392bc214093dafaebdf99c7d6298245262832d15 # v1.58.0 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ steps.get-pr-number.outputs.result }} From 0a1ed72ff52640579390ce2fb9242f48127dd8f8 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 11 May 2026 13:47:00 +0200 Subject: [PATCH 1715/1718] ENH: Allow downstream to register for DLPack support (#31256) This allows downstream packages (mostly ml_dtypes) to register their dtype with something like: np.dtypes.register_dlpack_dtype((4, 16), np.dtype("bfloat16")) which will map it for dlpack use (in both directions). The function is idempotent if the dtypes are identical. It may raise an error if another dtype is already mappend _from_ a DLPack code, in which case the path for exporting (to the code) is still registered as it still makes sense (the other reason is that this just simplifies thread-safety a lot). Co-authored-by: Tyler Reddy --- .../upcoming_changes/31256.new_feature.rst | 3 + numpy/_core/src/common/npy_dlpack.h | 8 + numpy/_core/src/multiarray/dlpack.c | 226 ++++++++++++++++-- numpy/_core/src/multiarray/multiarraymodule.c | 4 + numpy/_core/src/multiarray/npy_static_data.c | 10 + numpy/_core/src/multiarray/npy_static_data.h | 3 + numpy/_core/tests/test_dlpack.py | 77 +++++- numpy/_core/tests/test_dtype.py | 7 +- numpy/dtypes.py | 47 +++- numpy/dtypes.pyi | 3 + 10 files changed, 367 insertions(+), 21 deletions(-) create mode 100644 doc/release/upcoming_changes/31256.new_feature.rst diff --git a/doc/release/upcoming_changes/31256.new_feature.rst b/doc/release/upcoming_changes/31256.new_feature.rst new file mode 100644 index 000000000000..b5177206ff13 --- /dev/null +++ b/doc/release/upcoming_changes/31256.new_feature.rst @@ -0,0 +1,3 @@ +* It is now possible to register user-dtypes for dlpack export and import + via `numpy.dtypes.register_dlpack_dtype`. This functionality is meant to + be used with care by user-dtype authors. diff --git a/numpy/_core/src/common/npy_dlpack.h b/numpy/_core/src/common/npy_dlpack.h index 1dd3ae7f88e5..bfbb251cf080 100644 --- a/numpy/_core/src/common/npy_dlpack.h +++ b/numpy/_core/src/common/npy_dlpack.h @@ -29,4 +29,12 @@ NPY_NO_EXPORT PyObject * from_dlpack(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); + +NPY_NO_EXPORT PyObject * +_register_dlpack_dtype(PyObject *NPY_UNUSED(self), PyObject *args); + + +NPY_NO_EXPORT PyObject * +_dlpack_registry_replace(PyObject *NPY_UNUSED(self), PyObject *args); + #endif diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index fc870b90ddfb..a321b12a7eb8 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -6,10 +6,85 @@ #include "dlpack/dlpack.h" #include "numpy/arrayobject.h" +#include "npy_pycompat.h" #include "npy_argparse.h" #include "npy_dlpack.h" #include "npy_static_data.h" +#include "common.h" #include "conversion_utils.h" +#include "descriptor.h" + + +/* + * Find user-registered DLPack dtype mapping (1 if found, -1 on error). + */ +static int +dlpack_export_registry_lookup(PyArray_Descr *dtype, + uint8_t *out_code, uint8_t *out_bits) +{ + PyObject *val = NULL; + int gres = PyDict_GetItemRef( + npy_static_pydata.dlpack_export_registry, + (PyObject *)dtype, + &val); + if (gres <= 0) { + return gres; + } + if (!PyTuple_Check(val) || PyTuple_GET_SIZE(val) != 2) { + PyErr_SetString(PyExc_RuntimeError, + "internal: dlpack export_registry values must be length-2 tuples"); + Py_DECREF(val); + return -1; + } + long c = PyLong_AsLong(PyTuple_GET_ITEM(val, 0)); + if (error_converting(c)) { + Py_DECREF(val); + return -1; + } + long b = PyLong_AsLong(PyTuple_GET_ITEM(val, 1)); + if (error_converting(b)) { + Py_DECREF(val); + return -1; + } + *out_code = (uint8_t)c; + *out_bits = (uint8_t)b; + Py_DECREF(val); + return 1; +} + + +/* + * Return a registered dtype or raise an error if none is found. + */ +static PyArray_Descr * +dlpack_dtype_registry_lookup(uint8_t code, uint8_t bits) +{ + npy_intp key_vals[2] = {code, bits}; + PyObject *key = PyArray_IntTupleFromIntp(2, key_vals); + if (key == NULL) { + return NULL; + } + PyObject *reg_val = NULL; + int gres = PyDict_GetItemRef( + npy_static_pydata.dlpack_dtype_registry, key, ®_val); + Py_DECREF(key); + if (gres < 0) { + return NULL; + } + if (gres == 0) { + PyErr_SetString(PyExc_BufferError, + "Unsupported dtype in DLTensor."); + return NULL; + } + if (!PyArray_DescrCheck(reg_val)) { + Py_DECREF(reg_val); + PyErr_SetString(PyExc_TypeError, + "from_dlpack(): DLPack dtype registry must only contain " + "numpy.dtype instances; see numpy.dtypes.register_dlpack_dtype."); + return NULL; + } + return (PyArray_Descr *)reg_val; +} /* @@ -254,10 +329,18 @@ fill_dl_tensor_information( managed_dtype.code = kDLComplex; } else { - PyErr_SetString(PyExc_BufferError, - "DLPack only supports signed/unsigned integers, float " - "and complex dtypes."); - return -1; + int reg_res = dlpack_export_registry_lookup(dtype, + &managed_dtype.code, &managed_dtype.bits); + if (reg_res < 0) { + return -1; + } + if (!reg_res) { + PyErr_SetString(PyExc_BufferError, + "DLPack only supports signed/unsigned integers, float " + "and complex dtypes (or dtypes registered by third-party " + "packages)."); + return -1; + } } /* @@ -434,7 +517,7 @@ array_dlpack(PyArrayObject *self, } if (stream != Py_None) { - PyErr_SetString(PyExc_RuntimeError, + PyErr_SetString(PyExc_ValueError, "NumPy only supports stream=None."); return NULL; } @@ -498,7 +581,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* + /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). @@ -604,7 +687,6 @@ from_dlpack(PyObject *NPY_UNUSED(self), int typenum = -1; const uint8_t bits = dl_tensor.dtype.bits; - const npy_intp itemsize = bits / 8; switch (dl_tensor.dtype.code) { case kDLBool: if (bits == 8) { @@ -646,13 +728,25 @@ from_dlpack(PyObject *NPY_UNUSED(self), break; } - if (typenum == -1) { - PyErr_SetString(PyExc_BufferError, - "Unsupported dtype in DLTensor."); - Py_DECREF(capsule); - return NULL; + PyArray_Descr *descr = NULL; + if (typenum != -1) { + descr = PyArray_DescrFromType(typenum); + if (descr == NULL) { + Py_DECREF(capsule); + return NULL; + } + } + else { + descr = dlpack_dtype_registry_lookup( + (uint8_t)dl_tensor.dtype.code, bits); + if (descr == NULL) { + Py_DECREF(capsule); + return NULL; + } } + npy_intp itemsize = descr->elsize; + npy_intp shape[NPY_MAXDIMS]; npy_intp strides[NPY_MAXDIMS]; @@ -666,12 +760,6 @@ from_dlpack(PyObject *NPY_UNUSED(self), char *data = (char *)dl_tensor.data + dl_tensor.byte_offset; - PyArray_Descr *descr = PyArray_DescrFromType(typenum); - if (descr == NULL) { - Py_DECREF(capsule); - return NULL; - } - PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, dl_tensor.strides != NULL ? strides : NULL, data, readonly ? 0 : NPY_ARRAY_WRITEABLE, NULL); @@ -719,3 +807,105 @@ from_dlpack(PyObject *NPY_UNUSED(self), } +NPY_NO_EXPORT PyObject * +_register_dlpack_dtype(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *ret = NULL; + PyArray_Descr *descr = NULL; + PyObject *dlpack_tuple = NULL; + PyObject *original_tuple = NULL; // Existing dlpack tuple for export + PyObject *original_descr = NULL; // Existing dtype for import + + long code = 0; + long bits_l = 0; + if (!PyArg_ParseTuple(args, "(ll)O!:register_dlpack_dtype", &code, &bits_l, + &PyArrayDescr_Type, &descr)) { + goto finish; + } + + /* Sanity check code and bits, if DLPack relaxes this we can do this also. */ + if (code < 0 || code > 255) { + PyErr_SetString(PyExc_ValueError, + "register_dlpack_dtype: DLPack code must be in 0..255."); + goto finish; + } + // Check bits fit into 255 bytes via elsize to avoid elsize * 8 overflow. + if (descr->elsize > 255/8 || descr->elsize * 8 != bits_l) { + PyErr_SetString(PyExc_ValueError, + "register_dlpack_dtype: number of bits must match the " + "dtype's elsize and be <=255."); + goto finish; + } + + dlpack_tuple = Py_BuildValue("(ll)", code, bits_l); + if (dlpack_tuple == NULL) { + goto finish; + } + + int set_res = PyDict_SetDefaultRef( + npy_static_pydata.dlpack_export_registry, (PyObject *)descr, dlpack_tuple, + &original_tuple); + if (set_res < 0) { + goto finish; + } + else if (set_res == 1) { + /* Key was present, allow if the value is equal: */ + int exp_same = PyObject_RichCompareBool(original_tuple, dlpack_tuple, Py_EQ); + if (exp_same < 0) { + goto finish; + } + if (exp_same == 0) { + PyErr_Format(PyExc_ValueError, + "register_dlpack_dtype: this NumPy dtype is already exported " + "with a different DLPack (code, bits)."); + goto finish; + } + } + + if (PyDict_SetDefaultRef( + npy_static_pydata.dlpack_dtype_registry, dlpack_tuple, (PyObject *)descr, + &original_descr) < 0) { + goto finish; + } + if ((PyObject *)descr != original_descr) { + PyErr_Format(PyExc_ValueError, + "register_dlpack_dtype: the same (code, bits) already maps to a " + "%R which is not identical (it may be equal). " + "The dtype->(code, bits) was, however, established.", + original_descr); + goto finish; + } + + ret = Py_NewRef(Py_None); + +finish: + Py_XDECREF(dlpack_tuple); + Py_XDECREF(original_tuple); + Py_XDECREF(original_descr); + return ret; +} + + +/* Swap out the registry dicts for testing purposes. */ +NPY_NO_EXPORT PyObject * +_dlpack_registry_replace(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *imp, *exp; + if (!PyArg_ParseTuple(args, "O!O!: _dlpack_registry_replace", + &PyDict_Type, &imp, &PyDict_Type, &exp)) { + return NULL; + } + + PyObject *ret = PyTuple_Pack(2, + npy_static_pydata.dlpack_dtype_registry, + npy_static_pydata.dlpack_export_registry); + if (ret == NULL) { + return ret; + } + + /* Replace the currently used dicts in place. */ + Py_SETREF(npy_static_pydata.dlpack_dtype_registry, Py_NewRef(imp)); + Py_SETREF(npy_static_pydata.dlpack_export_registry, Py_NewRef(exp)); + return ret; +} + diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index eb97b0ff267d..3c63bbaecaa3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -4730,6 +4730,10 @@ static struct PyMethodDef array_module_methods[] = { "Give a warning on reload and big warning in sub-interpreters."}, {"from_dlpack", (PyCFunction)from_dlpack, METH_FASTCALL | METH_KEYWORDS, NULL}, + {"_register_dlpack_dtype", (PyCFunction)_register_dlpack_dtype, + METH_VARARGS, NULL}, + {"_dlpack_registry_replace", (PyCFunction)_dlpack_registry_replace, + METH_VARARGS, "unsafe testing helper to swap out dlpack registry"}, {"_unique_hash", (PyCFunction)array__unique_hash, METH_FASTCALL | METH_KEYWORDS, "Collect unique values via a hash map."}, {NULL, NULL, 0, NULL} /* sentinel */ diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 40dfc2dddaf7..9d5d840ccfed 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -214,6 +214,16 @@ initialize_static_globals(void) return -1; } + npy_static_pydata.dlpack_dtype_registry = PyDict_New(); + if (npy_static_pydata.dlpack_dtype_registry == NULL) { + return -1; + } + + npy_static_pydata.dlpack_export_registry = PyDict_New(); + if (npy_static_pydata.dlpack_export_registry == NULL) { + return -1; + } + /* * Initialize contents of npy_static_cdata struct * diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index abf98b5f0c09..3536dac8f1ea 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -163,6 +163,9 @@ typedef struct npy_static_pydata_struct { PyObject *dl_call_kwnames; PyObject *dl_cpu_device_tuple; PyObject *dl_max_version; + /* dicts for implementing `register_dlpack_dtype` */ + PyObject *dlpack_dtype_registry; + PyObject *dlpack_export_registry; } npy_static_pydata_struct; diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index 239f34559cef..83272f72f74a 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -30,7 +30,8 @@ def test_dunder_dlpack_stream(self): x = np.arange(5) x.__dlpack__(stream=None) - with pytest.raises(RuntimeError): + with pytest.raises( + ValueError, match="NumPy only supports stream=None."): x.__dlpack__(stream=1) def test_dunder_dlpack_copy(self): @@ -186,3 +187,77 @@ def test_device(self): x.__dlpack__(dl_device=(10, 0)) with pytest.raises(ValueError): np.from_dlpack(x, device="gpu") + + +class TestRegisterDlpackDtype: + @pytest.fixture(scope="class", autouse=True) + @staticmethod + def dlpack_registry_clear(): + prev = np._core._multiarray_umath._dlpack_registry_replace({}, {}) + yield + np._core._multiarray_umath._dlpack_registry_replace(*prev) + + @pytest.mark.parametrize("key,dtype", [ + ((2,), np.dtype("f4")), + ((2, 2, 2), np.dtype(np.float16)), + (None, np.dtype(np.float16)), + ((4, 16), "S2"), # not a dtype instance + ]) + def test_register_bad_dlpack_tuple(self, key, dtype): + with pytest.raises(TypeError): + np.dtypes.register_dlpack_dtype(key, dtype) + + @pytest.mark.parametrize("key,dtype", [ + ((-1, 16), np.dtype(np.float16)), + ((256, 16), np.dtype(np.float16)), + ((4, 15), np.dtype(np.float16)), + ((4, 256), np.dtype("V256")), + ((4, 15), np.dtype(np.float32)), + ]) + def test_register_bad_code_or_bits(self, key, dtype): + with pytest.raises(ValueError, match="(0..255|must match the dtype)"): + np.dtypes.register_dlpack_dtype(key, dtype) + + def test_register_idempotent(self): + dt = np.dtype(np.float16) + np.dtypes.register_dlpack_dtype((4, 16), dt) + np.dtypes.register_dlpack_dtype((4, 16), dt) + + def test_roundtrip(self, dtype=np.dtype("S1")): # noqa: B008 + # Register "S1" as kDLFloat8_e3m4 == 7 + # (use of kwarg ensure singleton in free-threading) + np.dtypes.register_dlpack_dtype((7, 8), dtype) + x = np.array([1.0, 2.0], dtype="S1") + y = np.from_dlpack(x) + assert y.dtype == "S1" + assert_array_equal(x, y) + + def test_register_conflict(self): + np.dtypes.register_dlpack_dtype((4, 16), np.dtype(np.float16)) + with pytest.raises(ValueError, match="already exported"): + np.dtypes.register_dlpack_dtype((5, 16), np.dtype(np.float16)) + + a = np.array(["12", "23"]) + with pytest.raises(BufferError): + np.from_dlpack(a) # dtype not yet registered + + with pytest.raises(ValueError, match="already maps"): + np.dtypes.register_dlpack_dtype((4, 16), np.dtype("S2")) + + # But... accept that this now does get exported (but won't roundtrip) + arr = np.from_dlpack(np.array(["12", "23"], dtype="S2")) + assert arr.dtype == np.float16 + + @pytest.mark.thread_unsafe(reason="dlpack registry is thread-unsafe") + def test_buffererror_bad_dtype(self, dtype=np.dtype("S3")): # noqa: B008 + # Register S3 as a nonsensical dtype + np.dtypes.register_dlpack_dtype((123, 24), dtype) + # Delete from import but not from export. + imp, exp = np._core._multiarray_umath._dlpack_registry_replace({}, {}) + imp.pop((123, 24)) + np._core._multiarray_umath._dlpack_registry_replace(imp, exp) + + arr = np.array(["1", "2"], dtype=dtype) + arr.__dlpack__() # passes + with pytest.raises(BufferError): + np.from_dlpack(arr) # doesn't round-trip diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 1e69c9ed6443..6ecb55ee1233 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1670,6 +1670,9 @@ def test_float_alias_names_not_present(self, name): def test_scalar_helper_all_dtypes(self): for dtype in np.dtypes.__all__: + if dtype == "register_dlpack_dtype": + continue + dt_class = getattr(np.dtypes, dtype) dt = np.dtype(dt_class) if dt.char not in 'OTVM': @@ -2039,7 +2042,9 @@ def test_signature_dtype_newbyteorder(self): assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY assert sig.parameters["new_order"].default == "S" - @pytest.mark.parametrize("typename", np.dtypes.__all__) + @pytest.mark.parametrize("typename", [ + n for n in np.dtypes.__all__ if n != "register_dlpack_dtype" + ]) def test_signature_dtypes_classes(self, typename: str): dtype_type = getattr(np.dtypes, typename) sig = inspect.signature(dtype_type) diff --git a/numpy/dtypes.py b/numpy/dtypes.py index 550a29e18f29..341642f8c6c5 100644 --- a/numpy/dtypes.py +++ b/numpy/dtypes.py @@ -23,8 +23,53 @@ """ # See doc/source/reference/routines.dtypes.rst for module-level docs +__all__ = ["register_dlpack_dtype"] -__all__ = [] + +def register_dlpack_dtype(dlpack_key, dtype, /): + """ + Register a NumPy dtype for a DLPack ``(code, bits)`` pair so that + `numpy.from_dlpack` can import it and ``ndarray.__dlpack__`` can export + it. Built-in dtype mappings take priority on import. + + If you think a conflict is possible but unproblematic you may wrap this + into a try/except block as NumPy will raise an error if another DType + is already registered for the same (code, bits) pair. + While an error is raised, the export is registered even on error. + + Registering an identical (not equal) dtype multiple times is allowed but + normally registration should happen at import time. + + .. warning:: + It is the responsibility of the registering user to ensure that the + mapping is valid. + + .. note:: + This function was added primarily for ``ml_dtypes`` and may be + replaced with a different mechanism in the future. It is intended + to be used by authors of user-defined dtypes and not end-users. + + Parameters + ---------- + dlpack_key : tuple of int + ``(dl_dtype_code, dl_bits)`` matching the DLPack ``DLDataType``, + lanes is assumed to be always 1, currently. + dtype : dtype + A NumPy dtype instance. + + Raises + ------ + ValueError : If a conflicting registration was already done. + + See Also + -------- + numpy.from_dlpack + + """ + # Deferred to avoid circular import. + from numpy._core._multiarray_umath import _register_dlpack_dtype + + return _register_dlpack_dtype(dlpack_key, dtype) def _add_dtype_helper(DType, alias): diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index f1f1261d3d32..72ecb24ec8f4 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -17,6 +17,7 @@ from typing_extensions import TypeVar import numpy as np __all__ = [ + "register_dlpack_dtype", "BoolDType", "Int8DType", "ByteDType", @@ -622,3 +623,5 @@ class StringDType( # type: ignore[misc] def isalignedstruct(self) -> L[False]: ... @property def isnative(self) -> L[True]: ... + +def register_dlpack_dtype(dlpack_key: tuple[int, int], dtype: np.dtype, /) -> None: ... From c36307a4241a3cee9a806044787e6ebd3e7b2c03 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 11 May 2026 12:44:20 -0700 Subject: [PATCH 1716/1718] ENH: raise OverflowError on datetime64/timedelta64 arithmetic overflow (#31378) Co-authored-by: Claude Opus 4.7 (1M context) --- .../upcoming_changes/31378.compatibility.rst | 9 + numpy/_core/src/umath/loops.c.src | 82 ++++++++- numpy/_core/tests/test_datetime.py | 172 ++++++++++++++++++ 3 files changed, 255 insertions(+), 8 deletions(-) create mode 100644 doc/release/upcoming_changes/31378.compatibility.rst diff --git a/doc/release/upcoming_changes/31378.compatibility.rst b/doc/release/upcoming_changes/31378.compatibility.rst new file mode 100644 index 000000000000..76134b211bf2 --- /dev/null +++ b/doc/release/upcoming_changes/31378.compatibility.rst @@ -0,0 +1,9 @@ +``datetime64``/``timedelta64`` arithmetic raises on overflow +------------------------------------------------------------ + +Addition, subtraction, and integer multiplication of ``datetime64`` and +``timedelta64`` values now raise ``OverflowError`` when the result would +overflow ``int64`` or land on the ``NaT`` sentinel value. Previously these +operations silently wrapped, often producing a value that was +indistinguishable from ``NaT``. This matches the overflow checking already +performed by unit-conversion casts. diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 1ad9cab4666e..bf3142096163 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -15,6 +15,7 @@ #include "lowlevel_strided_loops.h" #include "loops_utils.h" #include "gil_utils.h" +#include "npy_extint128.h" #include "npy_pycompat.h" @@ -807,6 +808,15 @@ NPY_NO_EXPORT void /**end repeat**/ +/* + * Overflow-checked arithmetic for datetime64/timedelta64. + * + * The datetime64/timedelta64 add, subtract, and integer-multiply loops + * defined below raise OverflowError on signed int64 overflow rather + * than silently wrapping. NPY_DATETIME_NAT == NPY_MIN_INT64, so a + * valid arithmetic result that happens to equal NPY_MIN_INT64 would be + * silently misinterpreted as NaT; we treat that as overflow as well. + */ NPY_NO_EXPORT void DATETIME_Mm_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { @@ -817,7 +827,14 @@ DATETIME_Mm_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps *((npy_datetime *)op1) = NPY_DATETIME_NAT; } else { - *((npy_datetime *)op1) = in1 + in2; + char overflow = 0; + const npy_int64 result = safe_add(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in datetime64 + timedelta64 addition"); + return; + } + *((npy_datetime *)op1) = result; } } } @@ -832,7 +849,14 @@ DATETIME_mM_M_add(char **args, npy_intp const *dimensions, npy_intp const *steps *((npy_datetime *)op1) = NPY_DATETIME_NAT; } else { - *((npy_datetime *)op1) = in1 + in2; + char overflow = 0; + const npy_int64 result = safe_add(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in timedelta64 + datetime64 addition"); + return; + } + *((npy_datetime *)op1) = result; } } } @@ -847,7 +871,14 @@ TIMEDELTA_mm_m_add(char **args, npy_intp const *dimensions, npy_intp const *step *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } else { - *((npy_timedelta *)op1) = in1 + in2; + char overflow = 0; + const npy_int64 result = safe_add(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in timedelta64 + timedelta64 addition"); + return; + } + *((npy_timedelta *)op1) = result; } } } @@ -862,7 +893,14 @@ DATETIME_Mm_M_subtract(char **args, npy_intp const *dimensions, npy_intp const * *((npy_datetime *)op1) = NPY_DATETIME_NAT; } else { - *((npy_datetime *)op1) = in1 - in2; + char overflow = 0; + const npy_int64 result = safe_sub(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in datetime64 - timedelta64 subtraction"); + return; + } + *((npy_datetime *)op1) = result; } } } @@ -877,7 +915,14 @@ DATETIME_MM_m_subtract(char **args, npy_intp const *dimensions, npy_intp const * *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } else { - *((npy_timedelta *)op1) = in1 - in2; + char overflow = 0; + const npy_int64 result = safe_sub(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in datetime64 - datetime64 subtraction"); + return; + } + *((npy_timedelta *)op1) = result; } } } @@ -892,7 +937,14 @@ TIMEDELTA_mm_m_subtract(char **args, npy_intp const *dimensions, npy_intp const *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } else { - *((npy_timedelta *)op1) = in1 - in2; + char overflow = 0; + const npy_int64 result = safe_sub(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in timedelta64 - timedelta64 subtraction"); + return; + } + *((npy_timedelta *)op1) = result; } } } @@ -908,7 +960,14 @@ TIMEDELTA_mq_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } else { - *((npy_timedelta *)op1) = in1 * in2; + char overflow = 0; + const npy_int64 result = safe_mul(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in timedelta64 * int64 multiplication"); + return; + } + *((npy_timedelta *)op1) = result; } } } @@ -924,7 +983,14 @@ TIMEDELTA_qm_m_multiply(char **args, npy_intp const *dimensions, npy_intp const *((npy_timedelta *)op1) = NPY_DATETIME_NAT; } else { - *((npy_timedelta *)op1) = in1 * in2; + char overflow = 0; + const npy_int64 result = safe_mul(in1, in2, &overflow); + if (overflow || result == NPY_DATETIME_NAT) { + npy_gil_error(PyExc_OverflowError, + "Overflow in int64 * timedelta64 multiplication"); + return; + } + *((npy_timedelta *)op1) = result; } } } diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 4b57f79476fe..02e87e284774 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1093,6 +1093,178 @@ def test_cast_overflow_safe_unit_conversion(self): with pytest.raises(OverflowError, match="Overflow"): arr_2s_big.astype("datetime64[ns]") + def test_arithmetic_overflow_raises_add_sub(self): + # Add/sub on datetime64/timedelta64 must raise OverflowError instead + # of silently wrapping past INT64 range. Covers all six loops: + # Mm_M_add, mM_M_add, mm_m_add, Mm_M_subtract, MM_m_subtract, + # mm_m_subtract. Each loop is exercised both via the scalar fast + # path and via the strided ufunc loop. + big = np.iinfo(np.int64).max + dt_pos = np.datetime64(big - 1, "s") + dt_neg = np.datetime64(-big + 1, "s") + td_pos = np.timedelta64(2, "s") + td_big = np.timedelta64(big - 1, "s") + td_neg = np.timedelta64(-big + 1, "s") + + # datetime64 + timedelta64 (both operand orders) + with pytest.raises(OverflowError, match="Overflow"): + dt_pos + td_pos + with pytest.raises(OverflowError, match="Overflow"): + td_pos + dt_pos + + # datetime64 - timedelta64 + with pytest.raises(OverflowError, match="Overflow"): + dt_neg - td_pos + + # datetime64 - datetime64 (result is timedelta64) + with pytest.raises(OverflowError, match="Overflow"): + np.datetime64(big, "s") - dt_neg + + # timedelta64 + timedelta64 + with pytest.raises(OverflowError, match="Overflow"): + td_big + td_pos + + # timedelta64 - timedelta64 + with pytest.raises(OverflowError, match="Overflow"): + td_neg - td_pos + + # Overflow that does *not* wrap onto NPY_DATETIME_NAT -- isolates + # the safe_add bounds check from the result == NaT short-circuit. + # big + 2 wraps to INT64_MIN + 1, a valid timedelta value. + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(big, "s") + np.timedelta64(2, "s") + # Negative-side branch of safe_add: a < 0 && b < INT64_MIN - a. + # -big + -2 wraps to INT64_MAX - 1, also non-NaT. + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(-big, "s") + np.timedelta64(-2, "s") + + # Array path -- one case per loop so the strided ufunc kernel is + # exercised for every signature, not only mm_m_add/subtract. + arr_td = np.array([0, big - 1, -big + 1], dtype="timedelta64[s]") + # TIMEDELTA_mm_m_add, TIMEDELTA_mm_m_subtract + with pytest.raises(OverflowError, match="Overflow"): + arr_td + td_pos + with pytest.raises(OverflowError, match="Overflow"): + arr_td - td_pos + # DATETIME_Mm_M_add (datetime + timedelta) + arr_dt = np.array([0, big - 1], dtype="datetime64[s]") + arr_td_add = np.array([0, 2], dtype="timedelta64[s]") + with pytest.raises(OverflowError, match="Overflow"): + arr_dt + arr_td_add + # DATETIME_mM_M_add (timedelta + datetime, swapped operand order) + with pytest.raises(OverflowError, match="Overflow"): + arr_td_add + arr_dt + # DATETIME_Mm_M_subtract (datetime - timedelta) + arr_dt_neg = np.array([0, -big + 1], dtype="datetime64[s]") + with pytest.raises(OverflowError, match="Overflow"): + arr_dt_neg - arr_td_add + # DATETIME_MM_m_subtract (datetime - datetime, result timedelta) + arr_dt_big = np.array([0, big], dtype="datetime64[s]") + with pytest.raises(OverflowError, match="Overflow"): + arr_dt_big - arr_dt_neg + + def test_arithmetic_overflow_raises_multiply(self): + # Integer multiplication of timedelta64 must raise OverflowError on + # signed-integer overflow. Covers TIMEDELTA_mq_m_multiply and + # TIMEDELTA_qm_m_multiply. + big = np.iinfo(np.int64).max + int64_min = np.iinfo(np.int64).min + td = np.timedelta64(big // 2 + 1, "s") + + with pytest.raises(OverflowError, match="Overflow"): + td * np.int64(2) + with pytest.raises(OverflowError, match="Overflow"): + np.int64(2) * td + + # Overflow that does *not* wrap onto NPY_DATETIME_NAT -- isolates + # the safe_mul bounds check from the result == NaT short-circuit. + # big * 2 wraps to -2, a valid (non-NaT) timedelta. + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(big, "s") * np.int64(2) + # Negative multiplier branch of safe_mul. + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(big, "s") * np.int64(-2) + # INT64_MIN multiplier exercises the b < 0 sub-branch and is + # itself the NaT sentinel; the bounds check must catch it before + # the multiply produces a UB-tainted value. + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(2, "s") * np.int64(int64_min) + + # Array path -- one case per loop signature. + arr = np.array([1, big // 2 + 1], dtype="timedelta64[s]") + # TIMEDELTA_mq_m_multiply (timedelta * int64) + with pytest.raises(OverflowError, match="Overflow"): + arr * np.int64(2) + # TIMEDELTA_qm_m_multiply (int64 * timedelta), reversed operand + # order to select the qm signature. + with pytest.raises(OverflowError, match="Overflow"): + np.int64(2) * arr + + def test_arithmetic_result_equals_nat_raises(self): + # NPY_DATETIME_NAT == INT64_MIN. An arithmetic result that lands + # exactly on INT64_MIN would be silently misinterpreted as NaT, so + # it must raise instead. + big = np.iinfo(np.int64).max + + # add: (-big) + (-1) == INT64_MIN + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(-big, "s") + np.timedelta64(-1, "s") + # sub: (-big) - 1 == INT64_MIN + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(-big, "s") - np.timedelta64(1, "s") + # mul: (-2**62) * 2 == INT64_MIN + with pytest.raises(OverflowError, match="Overflow"): + np.timedelta64(-(1 << 62), "s") * np.int64(2) + + # Same check via the strided ufunc loop -- the second element + # (-big + -1) == INT64_MIN. + arr = np.array([0, -big], dtype="timedelta64[s]") + with pytest.raises(OverflowError, match="Overflow"): + arr + np.timedelta64(-1, "s") + + def test_arithmetic_nat_propagation(self): + # NaT inputs must pass through every datetime/timedelta arithmetic + # ufunc without raising, even now that overflow checking is on. + dt = np.datetime64(0, "s") + td = np.timedelta64(2, "s") + nat_dt = np.datetime64("NaT", "s") + nat_td = np.timedelta64("NaT", "s") + big = np.iinfo(np.int64).max + + # add/sub + assert np.isnat(nat_dt + td) + assert np.isnat(td + nat_dt) + assert np.isnat(dt + nat_td) + assert np.isnat(nat_dt - td) + assert np.isnat(nat_dt - dt) + assert np.isnat(nat_td + td) + assert np.isnat(nat_td - td) + + # multiply + assert np.isnat(nat_td * np.int64(5)) + assert np.isnat(np.int64(5) * nat_td) + + # Regression guard: the NaT short-circuit must run before the + # overflow check, so a NaT operand combined with a value that + # would otherwise overflow still yields NaT instead of raising. + assert np.isnat(nat_dt + np.timedelta64(big, "s")) + assert np.isnat(np.timedelta64(big, "s") + nat_td) + assert np.isnat(nat_td * np.int64(big)) + + def test_arithmetic_valid_boundary(self): + # Regression guard: overflow checks must not be too aggressive -- + # values that just barely fit must continue to work. + big = np.iinfo(np.int64).max + + ok_dt = np.datetime64(big - 1, "s") + np.timedelta64(1, "s") + assert ok_dt == np.datetime64(big, "s") + ok_td = np.timedelta64(big - 1, "s") + np.timedelta64(1, "s") + assert ok_td == np.timedelta64(big, "s") + + small = np.timedelta64(3, "s") + assert small * np.int64(7) == np.timedelta64(21, "s") + assert np.int64(7) * small == np.timedelta64(21, "s") + def test_pyobject_roundtrip(self): # All datetime types should be able to roundtrip through object a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, From 0adb7ef2609525f0c3ab91fe2914cc700d4311cc Mon Sep 17 00:00:00 2001 From: Michael Bommarito Date: Tue, 12 May 2026 05:13:11 -0400 Subject: [PATCH 1717/1718] BUG: handle non-ASCII flags and tofile format strings (#31257) As described in #31254, non-ASCII strings are not handled safely by arrayflags_setitem and PyArray_ToFile. What to do with Unicode strings elsewhere upstream / downstream is a complicated question that has design consequences, so Warren Weckesser suggested reasonable error handling in the bug report comments to provide the user with useful, non-segfaulting execptions. This PR simply implements those recommendations and adds coverage to ensure KeyError / ValueError are properly raised when ecnountered. --- numpy/_core/src/multiarray/convert.c | 31 +++++++++++++++++------- numpy/_core/src/multiarray/flagsobject.c | 3 +++ numpy/_core/tests/test_multiarray.py | 29 ++++++++++++++++++++++ 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index 27e9292c6160..d9f97e9ac968 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -144,7 +144,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) npy_intp n, n2; size_t n3, n4; PyArrayIterObject *it; - PyObject *obj, *strobj, *tupobj, *byteobj; + PyObject *obj, *strobj, *tupobj, *byteobj, *formatobj = NULL; n3 = (sep ? strlen((const char *)sep) : 0); if (n3 == 0) { @@ -254,6 +254,13 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); n4 = (format ? strlen((const char *)format) : 0); + if (n4 != 0) { + formatobj = PyUnicode_FromString((const char *)format); + if (formatobj == NULL) { + Py_DECREF(it); + return -1; + } + } while (it->index < it->size) { /* * This is as documented. If we have a low precision float value @@ -263,6 +270,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) */ obj = PyArray_GETITEM(self, it->dataptr); if (obj == NULL) { + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } @@ -273,6 +281,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) strobj = PyObject_Str(obj); Py_DECREF(obj); if (strobj == NULL) { + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } @@ -283,25 +292,26 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) */ tupobj = PyTuple_New(1); if (tupobj == NULL) { + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } PyTuple_SET_ITEM(tupobj,0,obj); - obj = PyUnicode_FromString((const char *)format); - if (obj == NULL) { - Py_DECREF(tupobj); - Py_DECREF(it); - return -1; - } - strobj = PyUnicode_Format(obj, tupobj); - Py_DECREF(obj); + strobj = PyUnicode_Format(formatobj, tupobj); Py_DECREF(tupobj); if (strobj == NULL) { + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } } byteobj = PyUnicode_AsASCIIString(strobj); + if (byteobj == NULL) { + Py_DECREF(strobj); + Py_XDECREF(formatobj); + Py_DECREF(it); + return -1; + } NPY_BEGIN_ALLOW_THREADS; n2 = PyBytes_GET_SIZE(byteobj); n = fwrite(PyBytes_AS_STRING(byteobj), 1, n2, fp); @@ -312,6 +322,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) "problem writing element %" NPY_INTP_FMT " to file", it->index); Py_DECREF(strobj); + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } @@ -321,6 +332,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) PyErr_Format(PyExc_OSError, "problem writing separator to file"); Py_DECREF(strobj); + Py_XDECREF(formatobj); Py_DECREF(it); return -1; } @@ -328,6 +340,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format) Py_DECREF(strobj); PyArray_ITER_NEXT(it); } + Py_XDECREF(formatobj); Py_DECREF(it); } return 0; diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 2570d3ec5d16..fe5af1b96af3 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -586,6 +586,9 @@ arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item) if (PyUnicode_Check(ind)) { PyObject *tmp_str; tmp_str = PyUnicode_AsASCIIString(ind); + if (tmp_str == NULL) { + goto fail; + } key = PyBytes_AS_STRING(tmp_str); n = PyBytes_GET_SIZE(tmp_str); if (n > 16) n = 16; diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 26c200971daf..09ed8e1edf84 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -270,6 +270,11 @@ def test_otherflags(self): assert_equal(arr.flags['X'], False) assert_equal(arr.flags['WRITEBACKIFCOPY'], False) + def test_non_ascii_flag_setitem_raises_keyerror(self): + arr = np.arange(10) + with pytest.raises(KeyError, match="Unknown flag"): + arr.flags["\N{MICRO SIGN}"] = True + def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) @@ -6184,6 +6189,30 @@ def test_tofile_format(self, tmp_path, param_filename, decimal_sep_localization) s = f.read() assert_equal(s, '1.51,2.00,3.51,4.00') + def test_tofile_non_ascii_format_raises_unicodeencodeerror( + self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = np.array([1.51, 2, 3.51, 4], dtype=float) + with open(tmp_filename, 'w') as f: + with pytest.raises(UnicodeEncodeError): + x.tofile(f, sep=',', format='\N{MICRO SIGN}%.2f') + + def test_tofile_non_ascii_element_without_format_raises_unicodeencodeerror( + self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = np.array(['\N{MICRO SIGN}']) + with open(tmp_filename, 'w') as f: + with pytest.raises(UnicodeEncodeError): + x.tofile(f, sep=',') + + def test_tofile_non_ascii_element_with_ascii_format_raises_unicodeencodeerror( + self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = np.array(['\N{MICRO SIGN}123']) + with open(tmp_filename, 'w') as f: + with pytest.raises(UnicodeEncodeError): + x.tofile(f, sep=',', format='%s') + def test_tofile_cleanup(self, tmp_path, param_filename): tmp_filename = normalize_filename(tmp_path, param_filename) x = np.zeros((10), dtype=object) From aa48f6090c8cf0d2f4a46852733cb141d0d92cbe Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 12 May 2026 12:26:16 +0200 Subject: [PATCH 1718/1718] DOC: Add forgotten ``register_dlpack_dtype`` to docs [skip azp] [skip cirrus] [skip actions] --- doc/source/reference/routines.dtypes.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/source/reference/routines.dtypes.rst b/doc/source/reference/routines.dtypes.rst index bdb065d34b8a..a88307d9f165 100644 --- a/doc/source/reference/routines.dtypes.rst +++ b/doc/source/reference/routines.dtypes.rst @@ -71,3 +71,11 @@ Others .. attribute:: ObjectDType VoidDType + +Routines for DType authors +-------------------------- + +.. autosummary:: + :toctree: generated/ + + register_dlpack_dtype